From c527fd1f14c27855a37f2e8ac5346ce8d940ced2 Mon Sep 17 00:00:00 2001 From: Tudor Florea Date: Thu, 16 Oct 2014 03:05:19 +0200 Subject: initial commit for Enea Linux 4.0-140929 Migrated from the internal git server on the daisy-enea-point-release branch Signed-off-by: Tudor Florea --- meta/lib/oe/__init__.py | 2 + meta/lib/oe/buildhistory_analysis.py | 456 +++++++ meta/lib/oe/cachedpath.py | 233 ++++ meta/lib/oe/classextend.py | 104 ++ meta/lib/oe/classutils.py | 43 + meta/lib/oe/data.py | 17 + meta/lib/oe/distro_check.py | 383 ++++++ meta/lib/oe/image.py | 337 +++++ meta/lib/oe/license.py | 116 ++ meta/lib/oe/lsb.py | 81 ++ meta/lib/oe/maketype.py | 99 ++ meta/lib/oe/manifest.py | 345 +++++ meta/lib/oe/package.py | 99 ++ meta/lib/oe/package_manager.py | 1721 +++++++++++++++++++++++++ meta/lib/oe/packagedata.py | 94 ++ meta/lib/oe/packagegroup.py | 36 + meta/lib/oe/patch.py | 447 +++++++ meta/lib/oe/path.py | 243 ++++ meta/lib/oe/prservice.py | 126 ++ meta/lib/oe/qa.py | 111 ++ meta/lib/oe/rootfs.py | 757 +++++++++++ meta/lib/oe/sdk.py | 325 +++++ meta/lib/oe/sstatesig.py | 166 +++ meta/lib/oe/terminal.py | 218 ++++ meta/lib/oe/tests/__init__.py | 0 meta/lib/oe/tests/test_license.py | 68 + meta/lib/oe/tests/test_path.py | 89 ++ meta/lib/oe/tests/test_types.py | 62 + meta/lib/oe/tests/test_utils.py | 51 + meta/lib/oe/types.py | 153 +++ meta/lib/oe/utils.py | 166 +++ meta/lib/oeqa/__init__.py | 0 meta/lib/oeqa/controllers/__init__.py | 3 + meta/lib/oeqa/controllers/masterimage.py | 133 ++ meta/lib/oeqa/controllers/testtargetloader.py | 69 + meta/lib/oeqa/oetest.py | 107 ++ meta/lib/oeqa/runexported.py | 140 ++ meta/lib/oeqa/runtime/__init__.py | 3 + meta/lib/oeqa/runtime/buildcvs.py | 30 + meta/lib/oeqa/runtime/buildiptables.py | 30 + meta/lib/oeqa/runtime/buildsudoku.py | 27 + meta/lib/oeqa/runtime/connman.py | 30 + meta/lib/oeqa/runtime/date.py | 22 + meta/lib/oeqa/runtime/df.py | 11 + meta/lib/oeqa/runtime/dmesg.py | 11 + meta/lib/oeqa/runtime/files/hellomod.c | 19 + meta/lib/oeqa/runtime/files/hellomod_makefile | 8 + meta/lib/oeqa/runtime/files/test.c | 26 + meta/lib/oeqa/runtime/files/test.pl | 2 + meta/lib/oeqa/runtime/files/test.py | 6 + meta/lib/oeqa/runtime/files/testmakefile | 5 + meta/lib/oeqa/runtime/gcc.py | 36 + meta/lib/oeqa/runtime/kernelmodule.py | 33 + meta/lib/oeqa/runtime/ldd.py | 19 + meta/lib/oeqa/runtime/logrotate.py | 27 + meta/lib/oeqa/runtime/multilib.py | 17 + meta/lib/oeqa/runtime/pam.py | 24 + meta/lib/oeqa/runtime/perl.py | 28 + meta/lib/oeqa/runtime/ping.py | 20 + meta/lib/oeqa/runtime/python.py | 33 + meta/lib/oeqa/runtime/rpm.py | 50 + meta/lib/oeqa/runtime/scanelf.py | 26 + meta/lib/oeqa/runtime/scp.py | 21 + meta/lib/oeqa/runtime/skeletoninit.py | 28 + meta/lib/oeqa/runtime/smart.py | 110 ++ meta/lib/oeqa/runtime/ssh.py | 18 + meta/lib/oeqa/runtime/syslog.py | 46 + meta/lib/oeqa/runtime/systemd.py | 84 ++ meta/lib/oeqa/runtime/vnc.py | 19 + meta/lib/oeqa/runtime/x32lib.py | 17 + meta/lib/oeqa/runtime/xorg.py | 21 + meta/lib/oeqa/selftest/__init__.py | 2 + meta/lib/oeqa/selftest/_sstatetests_noauto.py | 95 ++ meta/lib/oeqa/selftest/base.py | 129 ++ meta/lib/oeqa/selftest/bblayers.py | 37 + meta/lib/oeqa/selftest/bbtests.py | 104 ++ meta/lib/oeqa/selftest/buildhistory.py | 45 + meta/lib/oeqa/selftest/buildoptions.py | 113 ++ meta/lib/oeqa/selftest/oescripts.py | 60 + meta/lib/oeqa/selftest/prservice.py | 113 ++ meta/lib/oeqa/selftest/sstate.py | 53 + meta/lib/oeqa/selftest/sstatetests.py | 193 +++ meta/lib/oeqa/targetcontrol.py | 175 +++ meta/lib/oeqa/utils/__init__.py | 3 + meta/lib/oeqa/utils/commands.py | 137 ++ meta/lib/oeqa/utils/decorators.py | 50 + meta/lib/oeqa/utils/ftools.py | 27 + meta/lib/oeqa/utils/httpserver.py | 33 + meta/lib/oeqa/utils/qemurunner.py | 237 ++++ meta/lib/oeqa/utils/sshcontrol.py | 127 ++ meta/lib/oeqa/utils/targetbuild.py | 68 + 91 files changed, 10308 insertions(+) create mode 100644 meta/lib/oe/__init__.py create mode 100644 meta/lib/oe/buildhistory_analysis.py create mode 100644 meta/lib/oe/cachedpath.py create mode 100644 meta/lib/oe/classextend.py create mode 100644 meta/lib/oe/classutils.py create mode 100644 meta/lib/oe/data.py create mode 100644 meta/lib/oe/distro_check.py create mode 100644 meta/lib/oe/image.py create mode 100644 meta/lib/oe/license.py create mode 100644 meta/lib/oe/lsb.py create mode 100644 meta/lib/oe/maketype.py create mode 100644 meta/lib/oe/manifest.py create mode 100644 meta/lib/oe/package.py create mode 100644 meta/lib/oe/package_manager.py create mode 100644 meta/lib/oe/packagedata.py create mode 100644 meta/lib/oe/packagegroup.py create mode 100644 meta/lib/oe/patch.py create mode 100644 meta/lib/oe/path.py create mode 100644 meta/lib/oe/prservice.py create mode 100644 meta/lib/oe/qa.py create mode 100644 meta/lib/oe/rootfs.py create mode 100644 meta/lib/oe/sdk.py create mode 100644 meta/lib/oe/sstatesig.py create mode 100644 meta/lib/oe/terminal.py create mode 100644 meta/lib/oe/tests/__init__.py create mode 100644 meta/lib/oe/tests/test_license.py create mode 100644 meta/lib/oe/tests/test_path.py create mode 100644 meta/lib/oe/tests/test_types.py create mode 100644 meta/lib/oe/tests/test_utils.py create mode 100644 meta/lib/oe/types.py create mode 100644 meta/lib/oe/utils.py create mode 100644 meta/lib/oeqa/__init__.py create mode 100644 meta/lib/oeqa/controllers/__init__.py create mode 100644 meta/lib/oeqa/controllers/masterimage.py create mode 100644 meta/lib/oeqa/controllers/testtargetloader.py create mode 100644 meta/lib/oeqa/oetest.py create mode 100755 meta/lib/oeqa/runexported.py create mode 100644 meta/lib/oeqa/runtime/__init__.py create mode 100644 meta/lib/oeqa/runtime/buildcvs.py create mode 100644 meta/lib/oeqa/runtime/buildiptables.py create mode 100644 meta/lib/oeqa/runtime/buildsudoku.py create mode 100644 meta/lib/oeqa/runtime/connman.py create mode 100644 meta/lib/oeqa/runtime/date.py create mode 100644 meta/lib/oeqa/runtime/df.py create mode 100644 meta/lib/oeqa/runtime/dmesg.py create mode 100644 meta/lib/oeqa/runtime/files/hellomod.c create mode 100644 meta/lib/oeqa/runtime/files/hellomod_makefile create mode 100644 meta/lib/oeqa/runtime/files/test.c create mode 100644 meta/lib/oeqa/runtime/files/test.pl create mode 100644 meta/lib/oeqa/runtime/files/test.py create mode 100644 meta/lib/oeqa/runtime/files/testmakefile create mode 100644 meta/lib/oeqa/runtime/gcc.py create mode 100644 meta/lib/oeqa/runtime/kernelmodule.py create mode 100644 meta/lib/oeqa/runtime/ldd.py create mode 100644 meta/lib/oeqa/runtime/logrotate.py create mode 100644 meta/lib/oeqa/runtime/multilib.py create mode 100644 meta/lib/oeqa/runtime/pam.py create mode 100644 meta/lib/oeqa/runtime/perl.py create mode 100644 meta/lib/oeqa/runtime/ping.py create mode 100644 meta/lib/oeqa/runtime/python.py create mode 100644 meta/lib/oeqa/runtime/rpm.py create mode 100644 meta/lib/oeqa/runtime/scanelf.py create mode 100644 meta/lib/oeqa/runtime/scp.py create mode 100644 meta/lib/oeqa/runtime/skeletoninit.py create mode 100644 meta/lib/oeqa/runtime/smart.py create mode 100644 meta/lib/oeqa/runtime/ssh.py create mode 100644 meta/lib/oeqa/runtime/syslog.py create mode 100644 meta/lib/oeqa/runtime/systemd.py create mode 100644 meta/lib/oeqa/runtime/vnc.py create mode 100644 meta/lib/oeqa/runtime/x32lib.py create mode 100644 meta/lib/oeqa/runtime/xorg.py create mode 100644 meta/lib/oeqa/selftest/__init__.py create mode 100644 meta/lib/oeqa/selftest/_sstatetests_noauto.py create mode 100644 meta/lib/oeqa/selftest/base.py create mode 100644 meta/lib/oeqa/selftest/bblayers.py create mode 100644 meta/lib/oeqa/selftest/bbtests.py create mode 100644 meta/lib/oeqa/selftest/buildhistory.py create mode 100644 meta/lib/oeqa/selftest/buildoptions.py create mode 100644 meta/lib/oeqa/selftest/oescripts.py create mode 100644 meta/lib/oeqa/selftest/prservice.py create mode 100644 meta/lib/oeqa/selftest/sstate.py create mode 100644 meta/lib/oeqa/selftest/sstatetests.py create mode 100644 meta/lib/oeqa/targetcontrol.py create mode 100644 meta/lib/oeqa/utils/__init__.py create mode 100644 meta/lib/oeqa/utils/commands.py create mode 100644 meta/lib/oeqa/utils/decorators.py create mode 100644 meta/lib/oeqa/utils/ftools.py create mode 100644 meta/lib/oeqa/utils/httpserver.py create mode 100644 meta/lib/oeqa/utils/qemurunner.py create mode 100644 meta/lib/oeqa/utils/sshcontrol.py create mode 100644 meta/lib/oeqa/utils/targetbuild.py (limited to 'meta/lib') diff --git a/meta/lib/oe/__init__.py b/meta/lib/oe/__init__.py new file mode 100644 index 0000000000..3ad9513f40 --- /dev/null +++ b/meta/lib/oe/__init__.py @@ -0,0 +1,2 @@ +from pkgutil import extend_path +__path__ = extend_path(__path__, __name__) diff --git a/meta/lib/oe/buildhistory_analysis.py b/meta/lib/oe/buildhistory_analysis.py new file mode 100644 index 0000000000..5395c768a3 --- /dev/null +++ b/meta/lib/oe/buildhistory_analysis.py @@ -0,0 +1,456 @@ +# Report significant differences in the buildhistory repository since a specific revision +# +# Copyright (C) 2012 Intel Corporation +# Author: Paul Eggleton +# +# Note: requires GitPython 0.3.1+ +# +# You can use this from the command line by running scripts/buildhistory-diff +# + +import sys +import os.path +import difflib +import git +import re +import bb.utils + + +# How to display fields +list_fields = ['DEPENDS', 'RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RREPLACES', 'RCONFLICTS', 'FILES', 'FILELIST', 'USER_CLASSES', 'IMAGE_CLASSES', 'IMAGE_FEATURES', 'IMAGE_LINGUAS', 'IMAGE_INSTALL', 'BAD_RECOMMENDATIONS', 'PACKAGE_EXCLUDE'] +list_order_fields = ['PACKAGES'] +defaultval_map = {'PKG': 'PKG', 'PKGE': 'PE', 'PKGV': 'PV', 'PKGR': 'PR'} +numeric_fields = ['PKGSIZE', 'IMAGESIZE'] +# Fields to monitor +monitor_fields = ['RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RREPLACES', 'RCONFLICTS', 'PACKAGES', 'FILELIST', 'PKGSIZE', 'IMAGESIZE', 'PKG'] +ver_monitor_fields = ['PKGE', 'PKGV', 'PKGR'] +# Percentage change to alert for numeric fields +monitor_numeric_threshold = 10 +# Image files to monitor (note that image-info.txt is handled separately) +img_monitor_files = ['installed-package-names.txt', 'files-in-image.txt'] +# Related context fields for reporting (note: PE, PV & PR are always reported for monitored package fields) +related_fields = {} +related_fields['RDEPENDS'] = ['DEPENDS'] +related_fields['RRECOMMENDS'] = ['DEPENDS'] +related_fields['FILELIST'] = ['FILES'] +related_fields['PKGSIZE'] = ['FILELIST'] +related_fields['files-in-image.txt'] = ['installed-package-names.txt', 'USER_CLASSES', 'IMAGE_CLASSES', 'ROOTFS_POSTPROCESS_COMMAND', 'IMAGE_POSTPROCESS_COMMAND'] +related_fields['installed-package-names.txt'] = ['IMAGE_FEATURES', 'IMAGE_LINGUAS', 'IMAGE_INSTALL', 'BAD_RECOMMENDATIONS', 'NO_RECOMMENDATIONS', 'PACKAGE_EXCLUDE'] + + +class ChangeRecord: + def __init__(self, path, fieldname, oldvalue, newvalue, monitored): + self.path = path + self.fieldname = fieldname + self.oldvalue = oldvalue + self.newvalue = newvalue + self.monitored = monitored + self.related = [] + self.filechanges = None + + def __str__(self): + return self._str_internal(True) + + def _str_internal(self, outer): + if outer: + if '/image-files/' in self.path: + prefix = '%s: ' % self.path.split('/image-files/')[0] + else: + prefix = '%s: ' % self.path + else: + prefix = '' + + def pkglist_combine(depver): + pkglist = [] + for k,v in depver.iteritems(): + if v: + pkglist.append("%s (%s)" % (k,v)) + else: + pkglist.append(k) + return pkglist + + if self.fieldname in list_fields or self.fieldname in list_order_fields: + if self.fieldname in ['RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RREPLACES', 'RCONFLICTS']: + (depvera, depverb) = compare_pkg_lists(self.oldvalue, self.newvalue) + aitems = pkglist_combine(depvera) + bitems = pkglist_combine(depverb) + else: + aitems = self.oldvalue.split() + bitems = self.newvalue.split() + removed = list(set(aitems) - set(bitems)) + added = list(set(bitems) - set(aitems)) + + if removed or added: + if removed and not bitems: + out = '%s: removed all items "%s"' % (self.fieldname, ' '.join(removed)) + else: + out = '%s:%s%s' % (self.fieldname, ' removed "%s"' % ' '.join(removed) if removed else '', ' added "%s"' % ' '.join(added) if added else '') + else: + out = '%s changed order' % self.fieldname + elif self.fieldname in numeric_fields: + aval = int(self.oldvalue or 0) + bval = int(self.newvalue or 0) + if aval != 0: + percentchg = ((bval - aval) / float(aval)) * 100 + else: + percentchg = 100 + out = '%s changed from %s to %s (%s%d%%)' % (self.fieldname, self.oldvalue or "''", self.newvalue or "''", '+' if percentchg > 0 else '', percentchg) + elif self.fieldname in defaultval_map: + out = '%s changed from %s to %s' % (self.fieldname, self.oldvalue, self.newvalue) + if self.fieldname == 'PKG' and '[default]' in self.newvalue: + out += ' - may indicate debian renaming failure' + elif self.fieldname in ['pkg_preinst', 'pkg_postinst', 'pkg_prerm', 'pkg_postrm']: + if self.oldvalue and self.newvalue: + out = '%s changed:\n ' % self.fieldname + elif self.newvalue: + out = '%s added:\n ' % self.fieldname + elif self.oldvalue: + out = '%s cleared:\n ' % self.fieldname + alines = self.oldvalue.splitlines() + blines = self.newvalue.splitlines() + diff = difflib.unified_diff(alines, blines, self.fieldname, self.fieldname, lineterm='') + out += '\n '.join(list(diff)[2:]) + out += '\n --' + elif self.fieldname in img_monitor_files or '/image-files/' in self.path: + fieldname = self.fieldname + if '/image-files/' in self.path: + fieldname = os.path.join('/' + self.path.split('/image-files/')[1], self.fieldname) + out = 'Changes to %s:\n ' % fieldname + else: + if outer: + prefix = 'Changes to %s ' % self.path + out = '(%s):\n ' % self.fieldname + if self.filechanges: + out += '\n '.join(['%s' % i for i in self.filechanges]) + else: + alines = self.oldvalue.splitlines() + blines = self.newvalue.splitlines() + diff = difflib.unified_diff(alines, blines, fieldname, fieldname, lineterm='') + out += '\n '.join(list(diff)) + out += '\n --' + else: + out = '%s changed from "%s" to "%s"' % (self.fieldname, self.oldvalue, self.newvalue) + + if self.related: + for chg in self.related: + if not outer and chg.fieldname in ['PE', 'PV', 'PR']: + continue + for line in chg._str_internal(False).splitlines(): + out += '\n * %s' % line + + return '%s%s' % (prefix, out) + +class FileChange: + changetype_add = 'A' + changetype_remove = 'R' + changetype_type = 'T' + changetype_perms = 'P' + changetype_ownergroup = 'O' + changetype_link = 'L' + + def __init__(self, path, changetype, oldvalue = None, newvalue = None): + self.path = path + self.changetype = changetype + self.oldvalue = oldvalue + self.newvalue = newvalue + + def _ftype_str(self, ftype): + if ftype == '-': + return 'file' + elif ftype == 'd': + return 'directory' + elif ftype == 'l': + return 'symlink' + elif ftype == 'c': + return 'char device' + elif ftype == 'b': + return 'block device' + elif ftype == 'p': + return 'fifo' + elif ftype == 's': + return 'socket' + else: + return 'unknown (%s)' % ftype + + def __str__(self): + if self.changetype == self.changetype_add: + return '%s was added' % self.path + elif self.changetype == self.changetype_remove: + return '%s was removed' % self.path + elif self.changetype == self.changetype_type: + return '%s changed type from %s to %s' % (self.path, self._ftype_str(self.oldvalue), self._ftype_str(self.newvalue)) + elif self.changetype == self.changetype_perms: + return '%s changed permissions from %s to %s' % (self.path, self.oldvalue, self.newvalue) + elif self.changetype == self.changetype_ownergroup: + return '%s changed owner/group from %s to %s' % (self.path, self.oldvalue, self.newvalue) + elif self.changetype == self.changetype_link: + return '%s changed symlink target from %s to %s' % (self.path, self.oldvalue, self.newvalue) + else: + return '%s changed (unknown)' % self.path + + +def blob_to_dict(blob): + alines = blob.data_stream.read().splitlines() + adict = {} + for line in alines: + splitv = [i.strip() for i in line.split('=',1)] + if len(splitv) > 1: + adict[splitv[0]] = splitv[1] + return adict + + +def file_list_to_dict(lines): + adict = {} + for line in lines: + # Leave the last few fields intact so we handle file names containing spaces + splitv = line.split(None,4) + # Grab the path and remove the leading . + path = splitv[4][1:].strip() + # Handle symlinks + if(' -> ' in path): + target = path.split(' -> ')[1] + path = path.split(' -> ')[0] + adict[path] = splitv[0:3] + [target] + else: + adict[path] = splitv[0:3] + return adict + + +def compare_file_lists(alines, blines): + adict = file_list_to_dict(alines) + bdict = file_list_to_dict(blines) + filechanges = [] + for path, splitv in adict.iteritems(): + newsplitv = bdict.pop(path, None) + if newsplitv: + # Check type + oldvalue = splitv[0][0] + newvalue = newsplitv[0][0] + if oldvalue != newvalue: + filechanges.append(FileChange(path, FileChange.changetype_type, oldvalue, newvalue)) + # Check permissions + oldvalue = splitv[0][1:] + newvalue = newsplitv[0][1:] + if oldvalue != newvalue: + filechanges.append(FileChange(path, FileChange.changetype_perms, oldvalue, newvalue)) + # Check owner/group + oldvalue = '%s/%s' % (splitv[1], splitv[2]) + newvalue = '%s/%s' % (newsplitv[1], newsplitv[2]) + if oldvalue != newvalue: + filechanges.append(FileChange(path, FileChange.changetype_ownergroup, oldvalue, newvalue)) + # Check symlink target + if newsplitv[0][0] == 'l': + if len(splitv) > 3: + oldvalue = splitv[3] + else: + oldvalue = None + newvalue = newsplitv[3] + if oldvalue != newvalue: + filechanges.append(FileChange(path, FileChange.changetype_link, oldvalue, newvalue)) + else: + filechanges.append(FileChange(path, FileChange.changetype_remove)) + + # Whatever is left over has been added + for path in bdict: + filechanges.append(FileChange(path, FileChange.changetype_add)) + + return filechanges + + +def compare_lists(alines, blines): + removed = list(set(alines) - set(blines)) + added = list(set(blines) - set(alines)) + + filechanges = [] + for pkg in removed: + filechanges.append(FileChange(pkg, FileChange.changetype_remove)) + for pkg in added: + filechanges.append(FileChange(pkg, FileChange.changetype_add)) + + return filechanges + + +def compare_pkg_lists(astr, bstr): + depvera = bb.utils.explode_dep_versions2(astr) + depverb = bb.utils.explode_dep_versions2(bstr) + + # Strip out changes where the version has increased + remove = [] + for k in depvera: + if k in depverb: + dva = depvera[k] + dvb = depverb[k] + if dva and dvb and len(dva) == len(dvb): + # Since length is the same, sort so that prefixes (e.g. >=) will line up + dva.sort() + dvb.sort() + removeit = True + for dvai, dvbi in zip(dva, dvb): + if dvai != dvbi: + aiprefix = dvai.split(' ')[0] + biprefix = dvbi.split(' ')[0] + if aiprefix == biprefix and aiprefix in ['>=', '=']: + if bb.utils.vercmp(bb.utils.split_version(dvai), bb.utils.split_version(dvbi)) > 0: + removeit = False + break + else: + removeit = False + break + if removeit: + remove.append(k) + + for k in remove: + depvera.pop(k) + depverb.pop(k) + + return (depvera, depverb) + + +def compare_dict_blobs(path, ablob, bblob, report_all, report_ver): + adict = blob_to_dict(ablob) + bdict = blob_to_dict(bblob) + + pkgname = os.path.basename(path) + + defaultvals = {} + defaultvals['PKG'] = pkgname + defaultvals['PKGE'] = '0' + + changes = [] + keys = list(set(adict.keys()) | set(bdict.keys()) | set(defaultval_map.keys())) + for key in keys: + astr = adict.get(key, '') + bstr = bdict.get(key, '') + if key in ver_monitor_fields: + monitored = report_ver or astr or bstr + else: + monitored = key in monitor_fields + mapped_key = defaultval_map.get(key, '') + if mapped_key: + if not astr: + astr = '%s [default]' % adict.get(mapped_key, defaultvals.get(key, '')) + if not bstr: + bstr = '%s [default]' % bdict.get(mapped_key, defaultvals.get(key, '')) + + if astr != bstr: + if (not report_all) and key in numeric_fields: + aval = int(astr or 0) + bval = int(bstr or 0) + if aval != 0: + percentchg = ((bval - aval) / float(aval)) * 100 + else: + percentchg = 100 + if abs(percentchg) < monitor_numeric_threshold: + continue + elif (not report_all) and key in list_fields: + if key == "FILELIST" and path.endswith("-dbg") and bstr.strip() != '': + continue + if key in ['RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RREPLACES', 'RCONFLICTS']: + (depvera, depverb) = compare_pkg_lists(astr, bstr) + if depvera == depverb: + continue + alist = astr.split() + alist.sort() + blist = bstr.split() + blist.sort() + # We don't care about the removal of self-dependencies + if pkgname in alist and not pkgname in blist: + alist.remove(pkgname) + if ' '.join(alist) == ' '.join(blist): + continue + + chg = ChangeRecord(path, key, astr, bstr, monitored) + changes.append(chg) + return changes + + +def process_changes(repopath, revision1, revision2='HEAD', report_all=False, report_ver=False): + repo = git.Repo(repopath) + assert repo.bare == False + commit = repo.commit(revision1) + diff = commit.diff(revision2) + + changes = [] + for d in diff.iter_change_type('M'): + path = os.path.dirname(d.a_blob.path) + if path.startswith('packages/'): + filename = os.path.basename(d.a_blob.path) + if filename == 'latest': + changes.extend(compare_dict_blobs(path, d.a_blob, d.b_blob, report_all, report_ver)) + elif filename.startswith('latest.'): + chg = ChangeRecord(path, filename, d.a_blob.data_stream.read(), d.b_blob.data_stream.read(), True) + changes.append(chg) + elif path.startswith('images/'): + filename = os.path.basename(d.a_blob.path) + if filename in img_monitor_files: + if filename == 'files-in-image.txt': + alines = d.a_blob.data_stream.read().splitlines() + blines = d.b_blob.data_stream.read().splitlines() + filechanges = compare_file_lists(alines,blines) + if filechanges: + chg = ChangeRecord(path, filename, None, None, True) + chg.filechanges = filechanges + changes.append(chg) + elif filename == 'installed-package-names.txt': + alines = d.a_blob.data_stream.read().splitlines() + blines = d.b_blob.data_stream.read().splitlines() + filechanges = compare_lists(alines,blines) + if filechanges: + chg = ChangeRecord(path, filename, None, None, True) + chg.filechanges = filechanges + changes.append(chg) + else: + chg = ChangeRecord(path, filename, d.a_blob.data_stream.read(), d.b_blob.data_stream.read(), True) + changes.append(chg) + elif filename == 'image-info.txt': + changes.extend(compare_dict_blobs(path, d.a_blob, d.b_blob, report_all, report_ver)) + elif '/image-files/' in path: + chg = ChangeRecord(path, filename, d.a_blob.data_stream.read(), d.b_blob.data_stream.read(), True) + changes.append(chg) + + # Look for added preinst/postinst/prerm/postrm + # (without reporting newly added recipes) + addedpkgs = [] + addedchanges = [] + for d in diff.iter_change_type('A'): + path = os.path.dirname(d.b_blob.path) + if path.startswith('packages/'): + filename = os.path.basename(d.b_blob.path) + if filename == 'latest': + addedpkgs.append(path) + elif filename.startswith('latest.'): + chg = ChangeRecord(path, filename[7:], '', d.b_blob.data_stream.read(), True) + addedchanges.append(chg) + for chg in addedchanges: + found = False + for pkg in addedpkgs: + if chg.path.startswith(pkg): + found = True + break + if not found: + changes.append(chg) + + # Look for cleared preinst/postinst/prerm/postrm + for d in diff.iter_change_type('D'): + path = os.path.dirname(d.a_blob.path) + if path.startswith('packages/'): + filename = os.path.basename(d.a_blob.path) + if filename != 'latest' and filename.startswith('latest.'): + chg = ChangeRecord(path, filename[7:], d.a_blob.data_stream.read(), '', True) + changes.append(chg) + + # Link related changes + for chg in changes: + if chg.monitored: + for chg2 in changes: + # (Check dirname in the case of fields from recipe info files) + if chg.path == chg2.path or os.path.dirname(chg.path) == chg2.path: + if chg2.fieldname in related_fields.get(chg.fieldname, []): + chg.related.append(chg2) + elif chg.path == chg2.path and chg.path.startswith('packages/') and chg2.fieldname in ['PE', 'PV', 'PR']: + chg.related.append(chg2) + + if report_all: + return changes + else: + return [chg for chg in changes if chg.monitored] diff --git a/meta/lib/oe/cachedpath.py b/meta/lib/oe/cachedpath.py new file mode 100644 index 0000000000..0840cc4c3f --- /dev/null +++ b/meta/lib/oe/cachedpath.py @@ -0,0 +1,233 @@ +# +# Based on standard python library functions but avoid +# repeated stat calls. Its assumed the files will not change from under us +# so we can cache stat calls. +# + +import os +import errno +import stat as statmod + +class CachedPath(object): + def __init__(self): + self.statcache = {} + self.lstatcache = {} + self.normpathcache = {} + return + + def updatecache(self, x): + x = self.normpath(x) + if x in self.statcache: + del self.statcache[x] + if x in self.lstatcache: + del self.lstatcache[x] + + def normpath(self, path): + if path in self.normpathcache: + return self.normpathcache[path] + newpath = os.path.normpath(path) + self.normpathcache[path] = newpath + return newpath + + def _callstat(self, path): + if path in self.statcache: + return self.statcache[path] + try: + st = os.stat(path) + self.statcache[path] = st + return st + except os.error: + self.statcache[path] = False + return False + + # We might as well call lstat and then only + # call stat as well in the symbolic link case + # since this turns out to be much more optimal + # in real world usage of this cache + def callstat(self, path): + path = self.normpath(path) + self.calllstat(path) + return self.statcache[path] + + def calllstat(self, path): + path = self.normpath(path) + if path in self.lstatcache: + return self.lstatcache[path] + #bb.error("LStatpath:" + path) + try: + lst = os.lstat(path) + self.lstatcache[path] = lst + if not statmod.S_ISLNK(lst.st_mode): + self.statcache[path] = lst + else: + self._callstat(path) + return lst + except (os.error, AttributeError): + self.lstatcache[path] = False + self.statcache[path] = False + return False + + # This follows symbolic links, so both islink() and isdir() can be true + # for the same path ono systems that support symlinks + def isfile(self, path): + """Test whether a path is a regular file""" + st = self.callstat(path) + if not st: + return False + return statmod.S_ISREG(st.st_mode) + + # Is a path a directory? + # This follows symbolic links, so both islink() and isdir() + # can be true for the same path on systems that support symlinks + def isdir(self, s): + """Return true if the pathname refers to an existing directory.""" + st = self.callstat(s) + if not st: + return False + return statmod.S_ISDIR(st.st_mode) + + def islink(self, path): + """Test whether a path is a symbolic link""" + st = self.calllstat(path) + if not st: + return False + return statmod.S_ISLNK(st.st_mode) + + # Does a path exist? + # This is false for dangling symbolic links on systems that support them. + def exists(self, path): + """Test whether a path exists. Returns False for broken symbolic links""" + if self.callstat(path): + return True + return False + + def lexists(self, path): + """Test whether a path exists. Returns True for broken symbolic links""" + if self.calllstat(path): + return True + return False + + def stat(self, path): + return self.callstat(path) + + def lstat(self, path): + return self.calllstat(path) + + def walk(self, top, topdown=True, onerror=None, followlinks=False): + # Matches os.walk, not os.path.walk() + + # We may not have read permission for top, in which case we can't + # get a list of the files the directory contains. os.path.walk + # always suppressed the exception then, rather than blow up for a + # minor reason when (say) a thousand readable directories are still + # left to visit. That logic is copied here. + try: + names = os.listdir(top) + except os.error as err: + if onerror is not None: + onerror(err) + return + + dirs, nondirs = [], [] + for name in names: + if self.isdir(os.path.join(top, name)): + dirs.append(name) + else: + nondirs.append(name) + + if topdown: + yield top, dirs, nondirs + for name in dirs: + new_path = os.path.join(top, name) + if followlinks or not self.islink(new_path): + for x in self.walk(new_path, topdown, onerror, followlinks): + yield x + if not topdown: + yield top, dirs, nondirs + + ## realpath() related functions + def __is_path_below(self, file, root): + return (file + os.path.sep).startswith(root) + + def __realpath_rel(self, start, rel_path, root, loop_cnt, assume_dir): + """Calculates real path of symlink 'start' + 'rel_path' below + 'root'; no part of 'start' below 'root' must contain symlinks. """ + have_dir = True + + for d in rel_path.split(os.path.sep): + if not have_dir and not assume_dir: + raise OSError(errno.ENOENT, "no such directory %s" % start) + + if d == os.path.pardir: # '..' + if len(start) >= len(root): + # do not follow '..' before root + start = os.path.dirname(start) + else: + # emit warning? + pass + else: + (start, have_dir) = self.__realpath(os.path.join(start, d), + root, loop_cnt, assume_dir) + + assert(self.__is_path_below(start, root)) + + return start + + def __realpath(self, file, root, loop_cnt, assume_dir): + while self.islink(file) and len(file) >= len(root): + if loop_cnt == 0: + raise OSError(errno.ELOOP, file) + + loop_cnt -= 1 + target = os.path.normpath(os.readlink(file)) + + if not os.path.isabs(target): + tdir = os.path.dirname(file) + assert(self.__is_path_below(tdir, root)) + else: + tdir = root + + file = self.__realpath_rel(tdir, target, root, loop_cnt, assume_dir) + + try: + is_dir = self.isdir(file) + except: + is_dir = False + + return (file, is_dir) + + def realpath(self, file, root, use_physdir = True, loop_cnt = 100, assume_dir = False): + """ Returns the canonical path of 'file' with assuming a + toplevel 'root' directory. When 'use_physdir' is set, all + preceding path components of 'file' will be resolved first; + this flag should be set unless it is guaranteed that there is + no symlink in the path. When 'assume_dir' is not set, missing + path components will raise an ENOENT error""" + + root = os.path.normpath(root) + file = os.path.normpath(file) + + if not root.endswith(os.path.sep): + # letting root end with '/' makes some things easier + root = root + os.path.sep + + if not self.__is_path_below(file, root): + raise OSError(errno.EINVAL, "file '%s' is not below root" % file) + + try: + if use_physdir: + file = self.__realpath_rel(root, file[(len(root) - 1):], root, loop_cnt, assume_dir) + else: + file = self.__realpath(file, root, loop_cnt, assume_dir)[0] + except OSError as e: + if e.errno == errno.ELOOP: + # make ELOOP more readable; without catching it, there will + # be printed a backtrace with 100s of OSError exceptions + # else + raise OSError(errno.ELOOP, + "too much recursions while resolving '%s'; loop in '%s'" % + (file, e.strerror)) + + raise + + return file diff --git a/meta/lib/oe/classextend.py b/meta/lib/oe/classextend.py new file mode 100644 index 0000000000..e2ae7e9f94 --- /dev/null +++ b/meta/lib/oe/classextend.py @@ -0,0 +1,104 @@ +class ClassExtender(object): + def __init__(self, extname, d): + self.extname = extname + self.d = d + self.pkgs_mapping = [] + + def extend_name(self, name): + if name.startswith("kernel-") or name == "virtual/kernel": + return name + if name.startswith("rtld"): + return name + if name.endswith("-" + self.extname): + name = name.replace("-" + self.extname, "") + if name.startswith("virtual/"): + subs = name.split("/", 1)[1] + if not subs.startswith(self.extname): + return "virtual/" + self.extname + "-" + subs + return name + if not name.startswith(self.extname): + return self.extname + "-" + name + return name + + def map_variable(self, varname, setvar = True): + var = self.d.getVar(varname, True) + if not var: + return "" + var = var.split() + newvar = [] + for v in var: + newvar.append(self.extend_name(v)) + newdata = " ".join(newvar) + if setvar: + self.d.setVar(varname, newdata) + return newdata + + def map_regexp_variable(self, varname, setvar = True): + var = self.d.getVar(varname, True) + if not var: + return "" + var = var.split() + newvar = [] + for v in var: + if v.startswith("^" + self.extname): + newvar.append(v) + elif v.startswith("^"): + newvar.append("^" + self.extname + "-" + v[1:]) + else: + newvar.append(self.extend_name(v)) + newdata = " ".join(newvar) + if setvar: + self.d.setVar(varname, newdata) + return newdata + + def map_depends(self, dep): + if dep.endswith(("-native", "-native-runtime", "-crosssdk")) or ('nativesdk-' in dep) or ('cross-canadian' in dep): + return dep + else: + return self.extend_name(dep) + + def map_depends_variable(self, varname, suffix = ""): + if suffix: + varname = varname + "_" + suffix + deps = self.d.getVar(varname, True) + if not deps: + return + deps = bb.utils.explode_dep_versions2(deps) + newdeps = {} + for dep in deps: + newdeps[self.map_depends(dep)] = deps[dep] + + self.d.setVar(varname, bb.utils.join_deps(newdeps, False)) + + def map_packagevars(self): + for pkg in (self.d.getVar("PACKAGES", True).split() + [""]): + self.map_depends_variable("RDEPENDS", pkg) + self.map_depends_variable("RRECOMMENDS", pkg) + self.map_depends_variable("RSUGGESTS", pkg) + self.map_depends_variable("RPROVIDES", pkg) + self.map_depends_variable("RREPLACES", pkg) + self.map_depends_variable("RCONFLICTS", pkg) + self.map_depends_variable("PKG", pkg) + + def rename_packages(self): + for pkg in (self.d.getVar("PACKAGES", True) or "").split(): + if pkg.startswith(self.extname): + self.pkgs_mapping.append([pkg.split(self.extname + "-")[1], pkg]) + continue + self.pkgs_mapping.append([pkg, self.extend_name(pkg)]) + + self.d.setVar("PACKAGES", " ".join([row[1] for row in self.pkgs_mapping])) + + def rename_package_variables(self, variables): + for pkg_mapping in self.pkgs_mapping: + for subs in variables: + self.d.renameVar("%s_%s" % (subs, pkg_mapping[0]), "%s_%s" % (subs, pkg_mapping[1])) + +class NativesdkClassExtender(ClassExtender): + def map_depends(self, dep): + if dep.endswith(("-native", "-native-runtime", "-cross", "-crosssdk")) or ('nativesdk-' in dep): + return dep + elif dep.endswith(("-gcc-intermediate", "-gcc-initial", "-gcc", "-g++")): + return dep + "-crosssdk" + else: + return self.extend_name(dep) diff --git a/meta/lib/oe/classutils.py b/meta/lib/oe/classutils.py new file mode 100644 index 0000000000..58188fdd6e --- /dev/null +++ b/meta/lib/oe/classutils.py @@ -0,0 +1,43 @@ +class ClassRegistry(type): + """Maintain a registry of classes, indexed by name. + +Note that this implementation requires that the names be unique, as it uses +a dictionary to hold the classes by name. + +The name in the registry can be overridden via the 'name' attribute of the +class, and the 'priority' attribute controls priority. The prioritized() +method returns the registered classes in priority order. + +Subclasses of ClassRegistry may define an 'implemented' property to exert +control over whether the class will be added to the registry (e.g. to keep +abstract base classes out of the registry).""" + priority = 0 + class __metaclass__(type): + """Give each ClassRegistry their own registry""" + def __init__(cls, name, bases, attrs): + cls.registry = {} + type.__init__(cls, name, bases, attrs) + + def __init__(cls, name, bases, attrs): + super(ClassRegistry, cls).__init__(name, bases, attrs) + try: + if not cls.implemented: + return + except AttributeError: + pass + + try: + cls.name + except AttributeError: + cls.name = name + cls.registry[cls.name] = cls + + @classmethod + def prioritized(tcls): + return sorted(tcls.registry.values(), + key=lambda v: v.priority, reverse=True) + + def unregister(cls): + for key in cls.registry.keys(): + if cls.registry[key] is cls: + del cls.registry[key] diff --git a/meta/lib/oe/data.py b/meta/lib/oe/data.py new file mode 100644 index 0000000000..4cc0e02968 --- /dev/null +++ b/meta/lib/oe/data.py @@ -0,0 +1,17 @@ +import oe.maketype + +def typed_value(key, d): + """Construct a value for the specified metadata variable, using its flags + to determine the type and parameters for construction.""" + var_type = d.getVarFlag(key, 'type') + flags = d.getVarFlags(key) + if flags is not None: + flags = dict((flag, d.expand(value)) + for flag, value in flags.iteritems()) + else: + flags = {} + + try: + return oe.maketype.create(d.getVar(key, True) or '', var_type, **flags) + except (TypeError, ValueError), exc: + bb.msg.fatal("Data", "%s: %s" % (key, str(exc))) diff --git a/meta/lib/oe/distro_check.py b/meta/lib/oe/distro_check.py new file mode 100644 index 0000000000..8ed5b0ec80 --- /dev/null +++ b/meta/lib/oe/distro_check.py @@ -0,0 +1,383 @@ +def get_links_from_url(url): + "Return all the href links found on the web location" + + import urllib, sgmllib + + class LinksParser(sgmllib.SGMLParser): + def parse(self, s): + "Parse the given string 's'." + self.feed(s) + self.close() + + def __init__(self, verbose=0): + "Initialise an object passing 'verbose' to the superclass." + sgmllib.SGMLParser.__init__(self, verbose) + self.hyperlinks = [] + + def start_a(self, attributes): + "Process a hyperlink and its 'attributes'." + for name, value in attributes: + if name == "href": + self.hyperlinks.append(value.strip('/')) + + def get_hyperlinks(self): + "Return the list of hyperlinks." + return self.hyperlinks + + sock = urllib.urlopen(url) + webpage = sock.read() + sock.close() + + linksparser = LinksParser() + linksparser.parse(webpage) + return linksparser.get_hyperlinks() + +def find_latest_numeric_release(url): + "Find the latest listed numeric release on the given url" + max=0 + maxstr="" + for link in get_links_from_url(url): + try: + release = float(link) + except: + release = 0 + if release > max: + max = release + maxstr = link + return maxstr + +def is_src_rpm(name): + "Check if the link is pointing to a src.rpm file" + if name[-8:] == ".src.rpm": + return True + else: + return False + +def package_name_from_srpm(srpm): + "Strip out the package name from the src.rpm filename" + strings = srpm.split('-') + package_name = strings[0] + for i in range(1, len (strings) - 1): + str = strings[i] + if not str[0].isdigit(): + package_name += '-' + str + return package_name + +def clean_package_list(package_list): + "Removes multiple entries of packages and sorts the list" + set = {} + map(set.__setitem__, package_list, []) + return set.keys() + + +def get_latest_released_meego_source_package_list(): + "Returns list of all the name os packages in the latest meego distro" + + package_names = [] + try: + f = open("/tmp/Meego-1.1", "r") + for line in f: + package_names.append(line[:-1] + ":" + "main") # Also strip the '\n' at the end + except IOError: pass + package_list=clean_package_list(package_names) + return "1.0", package_list + +def get_source_package_list_from_url(url, section): + "Return a sectioned list of package names from a URL list" + + bb.note("Reading %s: %s" % (url, section)) + links = get_links_from_url(url) + srpms = filter(is_src_rpm, links) + names_list = map(package_name_from_srpm, srpms) + + new_pkgs = [] + for pkgs in names_list: + new_pkgs.append(pkgs + ":" + section) + + return new_pkgs + +def get_latest_released_fedora_source_package_list(): + "Returns list of all the name os packages in the latest fedora distro" + latest = find_latest_numeric_release("http://archive.fedoraproject.org/pub/fedora/linux/releases/") + + package_names = get_source_package_list_from_url("http://archive.fedoraproject.org/pub/fedora/linux/releases/%s/Fedora/source/SRPMS/" % latest, "main") + +# package_names += get_source_package_list_from_url("http://download.fedora.redhat.com/pub/fedora/linux/releases/%s/Everything/source/SPRMS/" % latest, "everything") + package_names += get_source_package_list_from_url("http://archive.fedoraproject.org/pub/fedora/linux/updates/%s/SRPMS/" % latest, "updates") + + package_list=clean_package_list(package_names) + + return latest, package_list + +def get_latest_released_opensuse_source_package_list(): + "Returns list of all the name os packages in the latest opensuse distro" + latest = find_latest_numeric_release("http://download.opensuse.org/source/distribution/") + + package_names = get_source_package_list_from_url("http://download.opensuse.org/source/distribution/%s/repo/oss/suse/src/" % latest, "main") + package_names += get_source_package_list_from_url("http://download.opensuse.org/update/%s/rpm/src/" % latest, "updates") + + package_list=clean_package_list(package_names) + return latest, package_list + +def get_latest_released_mandriva_source_package_list(): + "Returns list of all the name os packages in the latest mandriva distro" + latest = find_latest_numeric_release("http://distrib-coffee.ipsl.jussieu.fr/pub/linux/MandrivaLinux/official/") + package_names = get_source_package_list_from_url("http://distrib-coffee.ipsl.jussieu.fr/pub/linux/MandrivaLinux/official/%s/SRPMS/main/release/" % latest, "main") +# package_names += get_source_package_list_from_url("http://distrib-coffee.ipsl.jussieu.fr/pub/linux/MandrivaLinux/official/%s/SRPMS/contrib/release/" % latest, "contrib") + package_names += get_source_package_list_from_url("http://distrib-coffee.ipsl.jussieu.fr/pub/linux/MandrivaLinux/official/%s/SRPMS/main/updates/" % latest, "updates") + + package_list=clean_package_list(package_names) + return latest, package_list + +def find_latest_debian_release(url): + "Find the latest listed debian release on the given url" + + releases = [] + for link in get_links_from_url(url): + if link[:6] == "Debian": + if ';' not in link: + releases.append(link) + releases.sort() + try: + return releases.pop()[6:] + except: + return "_NotFound_" + +def get_debian_style_source_package_list(url, section): + "Return the list of package-names stored in the debian style Sources.gz file" + import urllib + sock = urllib.urlopen(url) + import tempfile + tmpfile = tempfile.NamedTemporaryFile(mode='wb', prefix='oecore.', suffix='.tmp', delete=False) + tmpfilename=tmpfile.name + tmpfile.write(sock.read()) + sock.close() + tmpfile.close() + import gzip + bb.note("Reading %s: %s" % (url, section)) + + f = gzip.open(tmpfilename) + package_names = [] + for line in f: + if line[:9] == "Package: ": + package_names.append(line[9:-1] + ":" + section) # Also strip the '\n' at the end + os.unlink(tmpfilename) + + return package_names + +def get_latest_released_debian_source_package_list(): + "Returns list of all the name os packages in the latest debian distro" + latest = find_latest_debian_release("http://ftp.debian.org/debian/dists/") + url = "http://ftp.debian.org/debian/dists/stable/main/source/Sources.gz" + package_names = get_debian_style_source_package_list(url, "main") +# url = "http://ftp.debian.org/debian/dists/stable/contrib/source/Sources.gz" +# package_names += get_debian_style_source_package_list(url, "contrib") + url = "http://ftp.debian.org/debian/dists/stable-proposed-updates/main/source/Sources.gz" + package_names += get_debian_style_source_package_list(url, "updates") + package_list=clean_package_list(package_names) + return latest, package_list + +def find_latest_ubuntu_release(url): + "Find the latest listed ubuntu release on the given url" + url += "?C=M;O=D" # Descending Sort by Last Modified + for link in get_links_from_url(url): + if link[-8:] == "-updates": + return link[:-8] + return "_NotFound_" + +def get_latest_released_ubuntu_source_package_list(): + "Returns list of all the name os packages in the latest ubuntu distro" + latest = find_latest_ubuntu_release("http://archive.ubuntu.com/ubuntu/dists/") + url = "http://archive.ubuntu.com/ubuntu/dists/%s/main/source/Sources.gz" % latest + package_names = get_debian_style_source_package_list(url, "main") +# url = "http://archive.ubuntu.com/ubuntu/dists/%s/multiverse/source/Sources.gz" % latest +# package_names += get_debian_style_source_package_list(url, "multiverse") +# url = "http://archive.ubuntu.com/ubuntu/dists/%s/universe/source/Sources.gz" % latest +# package_names += get_debian_style_source_package_list(url, "universe") + url = "http://archive.ubuntu.com/ubuntu/dists/%s-updates/main/source/Sources.gz" % latest + package_names += get_debian_style_source_package_list(url, "updates") + package_list=clean_package_list(package_names) + return latest, package_list + +def create_distro_packages_list(distro_check_dir): + pkglst_dir = os.path.join(distro_check_dir, "package_lists") + if not os.path.isdir (pkglst_dir): + os.makedirs(pkglst_dir) + # first clear old stuff + for file in os.listdir(pkglst_dir): + os.unlink(os.path.join(pkglst_dir, file)) + + per_distro_functions = [ + ["Debian", get_latest_released_debian_source_package_list], + ["Ubuntu", get_latest_released_ubuntu_source_package_list], + ["Fedora", get_latest_released_fedora_source_package_list], + ["OpenSuSE", get_latest_released_opensuse_source_package_list], + ["Mandriva", get_latest_released_mandriva_source_package_list], + ["Meego", get_latest_released_meego_source_package_list] + ] + + from datetime import datetime + begin = datetime.now() + for distro in per_distro_functions: + name = distro[0] + release, package_list = distro[1]() + bb.note("Distro: %s, Latest Release: %s, # src packages: %d" % (name, release, len(package_list))) + package_list_file = os.path.join(pkglst_dir, name + "-" + release) + f = open(package_list_file, "w+b") + for pkg in package_list: + f.write(pkg + "\n") + f.close() + end = datetime.now() + delta = end - begin + bb.note("package_list generatiosn took this much time: %d seconds" % delta.seconds) + +def update_distro_data(distro_check_dir, datetime): + """ + If distro packages list data is old then rebuild it. + The operations has to be protected by a lock so that + only one thread performes it at a time. + """ + if not os.path.isdir (distro_check_dir): + try: + bb.note ("Making new directory: %s" % distro_check_dir) + os.makedirs (distro_check_dir) + except OSError: + raise Exception('Unable to create directory %s' % (distro_check_dir)) + + + datetime_file = os.path.join(distro_check_dir, "build_datetime") + saved_datetime = "_invalid_" + import fcntl + try: + if not os.path.exists(datetime_file): + open(datetime_file, 'w+b').close() # touch the file so that the next open won't fail + + f = open(datetime_file, "r+b") + fcntl.lockf(f, fcntl.LOCK_EX) + saved_datetime = f.read() + if saved_datetime[0:8] != datetime[0:8]: + bb.note("The build datetime did not match: saved:%s current:%s" % (saved_datetime, datetime)) + bb.note("Regenerating distro package lists") + create_distro_packages_list(distro_check_dir) + f.seek(0) + f.write(datetime) + + except OSError: + raise Exception('Unable to read/write this file: %s' % (datetime_file)) + finally: + fcntl.lockf(f, fcntl.LOCK_UN) + f.close() + +def compare_in_distro_packages_list(distro_check_dir, d): + if not os.path.isdir(distro_check_dir): + raise Exception("compare_in_distro_packages_list: invalid distro_check_dir passed") + + localdata = bb.data.createCopy(d) + pkglst_dir = os.path.join(distro_check_dir, "package_lists") + matching_distros = [] + pn = d.getVar('PN', True) + recipe_name = d.getVar('PN', True) + bb.note("Checking: %s" % pn) + + trim_dict = dict({"-native":"-native", "-cross":"-cross", "-initial":"-initial"}) + + if pn.find("-native") != -1: + pnstripped = pn.split("-native") + localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True)) + bb.data.update_data(localdata) + recipe_name = pnstripped[0] + + if pn.startswith("nativesdk-"): + pnstripped = pn.split("nativesdk-") + localdata.setVar('OVERRIDES', "pn-" + pnstripped[1] + ":" + d.getVar('OVERRIDES', True)) + bb.data.update_data(localdata) + recipe_name = pnstripped[1] + + if pn.find("-cross") != -1: + pnstripped = pn.split("-cross") + localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True)) + bb.data.update_data(localdata) + recipe_name = pnstripped[0] + + if pn.find("-initial") != -1: + pnstripped = pn.split("-initial") + localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True)) + bb.data.update_data(localdata) + recipe_name = pnstripped[0] + + bb.note("Recipe: %s" % recipe_name) + tmp = localdata.getVar('DISTRO_PN_ALIAS', True) + + distro_exceptions = dict({"OE-Core":'OE-Core', "OpenedHand":'OpenedHand', "Intel":'Intel', "Upstream":'Upstream', "Windriver":'Windriver', "OSPDT":'OSPDT Approved', "Poky":'poky'}) + + if tmp: + list = tmp.split(' ') + for str in list: + if str and str.find("=") == -1 and distro_exceptions[str]: + matching_distros.append(str) + + distro_pn_aliases = {} + if tmp: + list = tmp.split(' ') + for str in list: + if str.find("=") != -1: + (dist, pn_alias) = str.split('=') + distro_pn_aliases[dist.strip().lower()] = pn_alias.strip() + + for file in os.listdir(pkglst_dir): + (distro, distro_release) = file.split("-") + f = open(os.path.join(pkglst_dir, file), "rb") + for line in f: + (pkg, section) = line.split(":") + if distro.lower() in distro_pn_aliases: + pn = distro_pn_aliases[distro.lower()] + else: + pn = recipe_name + if pn == pkg: + matching_distros.append(distro + "-" + section[:-1]) # strip the \n at the end + f.close() + break + f.close() + + + if tmp != None: + list = tmp.split(' ') + for item in list: + matching_distros.append(item) + bb.note("Matching: %s" % matching_distros) + return matching_distros + +def create_log_file(d, logname): + import subprocess + logpath = d.getVar('LOG_DIR', True) + bb.utils.mkdirhier(logpath) + logfn, logsuffix = os.path.splitext(logname) + logfile = os.path.join(logpath, "%s.%s%s" % (logfn, d.getVar('DATETIME', True), logsuffix)) + if not os.path.exists(logfile): + slogfile = os.path.join(logpath, logname) + if os.path.exists(slogfile): + os.remove(slogfile) + subprocess.call("touch %s" % logfile, shell=True) + os.symlink(logfile, slogfile) + d.setVar('LOG_FILE', logfile) + return logfile + + +def save_distro_check_result(result, datetime, result_file, d): + pn = d.getVar('PN', True) + logdir = d.getVar('LOG_DIR', True) + if not logdir: + bb.error("LOG_DIR variable is not defined, can't write the distro_check results") + return + if not os.path.isdir(logdir): + os.makedirs(logdir) + line = pn + for i in result: + line = line + "," + i + f = open(result_file, "a") + import fcntl + fcntl.lockf(f, fcntl.LOCK_EX) + f.seek(0, os.SEEK_END) # seek to the end of file + f.write(line + "\n") + fcntl.lockf(f, fcntl.LOCK_UN) + f.close() diff --git a/meta/lib/oe/image.py b/meta/lib/oe/image.py new file mode 100644 index 0000000000..c9b9033132 --- /dev/null +++ b/meta/lib/oe/image.py @@ -0,0 +1,337 @@ +from oe.utils import execute_pre_post_process +import os +import subprocess +import multiprocessing + + +def generate_image(arg): + (type, subimages, create_img_cmd) = arg + + bb.note("Running image creation script for %s: %s ..." % + (type, create_img_cmd)) + + try: + subprocess.check_output(create_img_cmd, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + return("Error: The image creation script '%s' returned %d:\n%s" % + (e.cmd, e.returncode, e.output)) + + return None + + +""" +This class will help compute IMAGE_FSTYPE dependencies and group them in batches +that can be executed in parallel. + +The next example is for illustration purposes, highly unlikely to happen in real life. +It's just one of the test cases I used to test the algorithm: + +For: +IMAGE_FSTYPES = "i1 i2 i3 i4 i5" +IMAGE_TYPEDEP_i4 = "i2" +IMAGE_TYPEDEP_i5 = "i6 i4" +IMAGE_TYPEDEP_i6 = "i7" +IMAGE_TYPEDEP_i7 = "i2" + +We get the following list of batches that can be executed in parallel, having the +dependencies satisfied: + +[['i1', 'i3', 'i2'], ['i4', 'i7'], ['i6'], ['i5']] +""" +class ImageDepGraph(object): + def __init__(self, d): + self.d = d + self.graph = dict() + self.deps_array = dict() + + def _construct_dep_graph(self, image_fstypes): + graph = dict() + + def add_node(node): + deps = (self.d.getVar('IMAGE_TYPEDEP_' + node, True) or "") + if deps != "": + graph[node] = deps + + for dep in deps.split(): + if not dep in graph: + add_node(dep) + else: + graph[node] = "" + + for fstype in image_fstypes: + add_node(fstype) + + return graph + + def _clean_graph(self): + # Live and VMDK images will be processed via inheriting + # bbclass and does not get processed here. Remove them from the fstypes + # graph. Their dependencies are already added, so no worries here. + remove_list = (self.d.getVar('IMAGE_TYPES_MASKED', True) or "").split() + + for item in remove_list: + self.graph.pop(item, None) + + def _compute_dependencies(self): + """ + returns dict object of nodes with [no_of_depends_on, no_of_depended_by] + for each node + """ + deps_array = dict() + for node in self.graph: + deps_array[node] = [0, 0] + + for node in self.graph: + deps = self.graph[node].split() + deps_array[node][0] += len(deps) + for dep in deps: + deps_array[dep][1] += 1 + + return deps_array + + def _sort_graph(self): + sorted_list = [] + group = [] + for node in self.graph: + if node not in self.deps_array: + continue + + depends_on = self.deps_array[node][0] + + if depends_on == 0: + group.append(node) + + if len(group) == 0 and len(self.deps_array) != 0: + bb.fatal("possible fstype circular dependency...") + + sorted_list.append(group) + + # remove added nodes from deps_array + for item in group: + for node in self.graph: + if item in self.graph[node]: + self.deps_array[node][0] -= 1 + + self.deps_array.pop(item, None) + + if len(self.deps_array): + # recursive call, to find the next group + sorted_list += self._sort_graph() + + return sorted_list + + def group_fstypes(self, image_fstypes): + self.graph = self._construct_dep_graph(image_fstypes) + + self._clean_graph() + + self.deps_array = self._compute_dependencies() + + alltypes = [node for node in self.graph] + + return (alltypes, self._sort_graph()) + + +class Image(ImageDepGraph): + def __init__(self, d): + self.d = d + + super(Image, self).__init__(d) + + def _get_rootfs_size(self): + """compute the rootfs size""" + rootfs_alignment = int(self.d.getVar('IMAGE_ROOTFS_ALIGNMENT', True)) + overhead_factor = float(self.d.getVar('IMAGE_OVERHEAD_FACTOR', True)) + rootfs_req_size = int(self.d.getVar('IMAGE_ROOTFS_SIZE', True)) + rootfs_extra_space = eval(self.d.getVar('IMAGE_ROOTFS_EXTRA_SPACE', True)) + + output = subprocess.check_output(['du', '-ks', + self.d.getVar('IMAGE_ROOTFS', True)]) + size_kb = int(output.split()[0]) + base_size = size_kb * overhead_factor + base_size = (base_size, rootfs_req_size)[base_size < rootfs_req_size] + \ + rootfs_extra_space + + if base_size != int(base_size): + base_size = int(base_size + 1) + + base_size += rootfs_alignment - 1 + base_size -= base_size % rootfs_alignment + + return base_size + + def _create_symlinks(self, subimages): + """create symlinks to the newly created image""" + deploy_dir = self.d.getVar('DEPLOY_DIR_IMAGE', True) + img_name = self.d.getVar('IMAGE_NAME', True) + link_name = self.d.getVar('IMAGE_LINK_NAME', True) + manifest_name = self.d.getVar('IMAGE_MANIFEST', True) + + os.chdir(deploy_dir) + + if link_name is not None: + for type in subimages: + if os.path.exists(img_name + ".rootfs." + type): + dst = link_name + "." + type + src = img_name + ".rootfs." + type + bb.note("Creating symlink: %s -> %s" % (dst, src)) + os.symlink(src, dst) + + if manifest_name is not None and \ + os.path.exists(manifest_name) and \ + not os.path.exists(link_name + ".manifest"): + os.symlink(os.path.basename(manifest_name), + link_name + ".manifest") + + def _remove_old_symlinks(self): + """remove the symlinks to old binaries""" + + if self.d.getVar('IMAGE_LINK_NAME', True): + deploy_dir = self.d.getVar('DEPLOY_DIR_IMAGE', True) + for img in os.listdir(deploy_dir): + if img.find(self.d.getVar('IMAGE_LINK_NAME', True)) == 0: + img = os.path.join(deploy_dir, img) + if os.path.islink(img): + if self.d.getVar('RM_OLD_IMAGE', True) == "1" and \ + os.path.exists(os.path.realpath(img)): + os.remove(os.path.realpath(img)) + + os.remove(img) + + """ + This function will just filter out the compressed image types from the + fstype groups returning a (filtered_fstype_groups, cimages) tuple. + """ + def _filter_out_commpressed(self, fstype_groups): + ctypes = self.d.getVar('COMPRESSIONTYPES', True).split() + cimages = {} + + filtered_groups = [] + for group in fstype_groups: + filtered_group = [] + for type in group: + basetype = None + for ctype in ctypes: + if type.endswith("." + ctype): + basetype = type[:-len("." + ctype)] + if basetype not in filtered_group: + filtered_group.append(basetype) + if basetype not in cimages: + cimages[basetype] = [] + if ctype not in cimages[basetype]: + cimages[basetype].append(ctype) + break + if not basetype and type not in filtered_group: + filtered_group.append(type) + + filtered_groups.append(filtered_group) + + return (filtered_groups, cimages) + + def _get_image_types(self): + """returns a (types, cimages) tuple""" + + alltypes, fstype_groups = self.group_fstypes(self.d.getVar('IMAGE_FSTYPES', True).split()) + + filtered_groups, cimages = self._filter_out_commpressed(fstype_groups) + + return (alltypes, filtered_groups, cimages) + + def _write_script(self, type, cmds): + tempdir = self.d.getVar('T', True) + script_name = os.path.join(tempdir, "create_image." + type) + + self.d.setVar('img_creation_func', '\n'.join(cmds)) + self.d.setVarFlag('img_creation_func', 'func', 1) + self.d.setVarFlag('img_creation_func', 'fakeroot', 1) + + with open(script_name, "w+") as script: + script.write("%s" % bb.build.shell_trap_code()) + script.write("export ROOTFS_SIZE=%d\n" % self._get_rootfs_size()) + bb.data.emit_func('img_creation_func', script, self.d) + script.write("img_creation_func\n") + + os.chmod(script_name, 0775) + + return script_name + + def _get_imagecmds(self): + old_overrides = self.d.getVar('OVERRIDES', 0) + + alltypes, fstype_groups, cimages = self._get_image_types() + + image_cmd_groups = [] + + bb.note("The image creation groups are: %s" % str(fstype_groups)) + for fstype_group in fstype_groups: + image_cmds = [] + for type in fstype_group: + cmds = [] + subimages = [] + + localdata = bb.data.createCopy(self.d) + localdata.setVar('OVERRIDES', '%s:%s' % (type, old_overrides)) + bb.data.update_data(localdata) + localdata.setVar('type', type) + + cmds.append("\t" + localdata.getVar("IMAGE_CMD", True)) + cmds.append(localdata.expand("\tcd ${DEPLOY_DIR_IMAGE}")) + + if type in cimages: + for ctype in cimages[type]: + cmds.append("\t" + localdata.getVar("COMPRESS_CMD_" + ctype, True)) + subimages.append(type + "." + ctype) + + if type not in alltypes: + cmds.append(localdata.expand("\trm ${IMAGE_NAME}.rootfs.${type}")) + else: + subimages.append(type) + + script_name = self._write_script(type, cmds) + + image_cmds.append((type, subimages, script_name)) + + image_cmd_groups.append(image_cmds) + + return image_cmd_groups + + def create(self): + bb.note("###### Generate images #######") + pre_process_cmds = self.d.getVar("IMAGE_PREPROCESS_COMMAND", True) + post_process_cmds = self.d.getVar("IMAGE_POSTPROCESS_COMMAND", True) + + execute_pre_post_process(self.d, pre_process_cmds) + + self._remove_old_symlinks() + + image_cmd_groups = self._get_imagecmds() + + for image_cmds in image_cmd_groups: + # create the images in parallel + nproc = multiprocessing.cpu_count() + pool = bb.utils.multiprocessingpool(nproc) + results = list(pool.imap(generate_image, image_cmds)) + pool.close() + pool.join() + + for result in results: + if result is not None: + bb.fatal(result) + + for image_type, subimages, script in image_cmds: + bb.note("Creating symlinks for %s image ..." % image_type) + self._create_symlinks(subimages) + + execute_pre_post_process(self.d, post_process_cmds) + + +def create_image(d): + Image(d).create() + +if __name__ == "__main__": + """ + Image creation can be called independent from bitbake environment. + """ + """ + TBD + """ diff --git a/meta/lib/oe/license.py b/meta/lib/oe/license.py new file mode 100644 index 0000000000..340da61102 --- /dev/null +++ b/meta/lib/oe/license.py @@ -0,0 +1,116 @@ +# vi:sts=4:sw=4:et +"""Code for parsing OpenEmbedded license strings""" + +import ast +import re +from fnmatch import fnmatchcase as fnmatch + +class LicenseError(Exception): + pass + +class LicenseSyntaxError(LicenseError): + def __init__(self, licensestr, exc): + self.licensestr = licensestr + self.exc = exc + LicenseError.__init__(self) + + def __str__(self): + return "error in '%s': %s" % (self.licensestr, self.exc) + +class InvalidLicense(LicenseError): + def __init__(self, license): + self.license = license + LicenseError.__init__(self) + + def __str__(self): + return "invalid characters in license '%s'" % self.license + +license_operator = re.compile('([&|() ])') +license_pattern = re.compile('[a-zA-Z0-9.+_\-]+$') + +class LicenseVisitor(ast.NodeVisitor): + """Syntax tree visitor which can accept OpenEmbedded license strings""" + def visit_string(self, licensestr): + new_elements = [] + elements = filter(lambda x: x.strip(), license_operator.split(licensestr)) + for pos, element in enumerate(elements): + if license_pattern.match(element): + if pos > 0 and license_pattern.match(elements[pos-1]): + new_elements.append('&') + element = '"' + element + '"' + elif not license_operator.match(element): + raise InvalidLicense(element) + new_elements.append(element) + + self.visit(ast.parse(' '.join(new_elements))) + +class FlattenVisitor(LicenseVisitor): + """Flatten a license tree (parsed from a string) by selecting one of each + set of OR options, in the way the user specifies""" + def __init__(self, choose_licenses): + self.choose_licenses = choose_licenses + self.licenses = [] + LicenseVisitor.__init__(self) + + def visit_Str(self, node): + self.licenses.append(node.s) + + def visit_BinOp(self, node): + if isinstance(node.op, ast.BitOr): + left = FlattenVisitor(self.choose_licenses) + left.visit(node.left) + + right = FlattenVisitor(self.choose_licenses) + right.visit(node.right) + + selected = self.choose_licenses(left.licenses, right.licenses) + self.licenses.extend(selected) + else: + self.generic_visit(node) + +def flattened_licenses(licensestr, choose_licenses): + """Given a license string and choose_licenses function, return a flat list of licenses""" + flatten = FlattenVisitor(choose_licenses) + try: + flatten.visit_string(licensestr) + except SyntaxError as exc: + raise LicenseSyntaxError(licensestr, exc) + return flatten.licenses + +def is_included(licensestr, whitelist=None, blacklist=None): + """Given a license string and whitelist and blacklist, determine if the + license string matches the whitelist and does not match the blacklist. + + Returns a tuple holding the boolean state and a list of the applicable + licenses which were excluded (or None, if the state is True) + """ + + def include_license(license): + return any(fnmatch(license, pattern) for pattern in whitelist) + + def exclude_license(license): + return any(fnmatch(license, pattern) for pattern in blacklist) + + def choose_licenses(alpha, beta): + """Select the option in an OR which is the 'best' (has the most + included licenses).""" + alpha_weight = len(filter(include_license, alpha)) + beta_weight = len(filter(include_license, beta)) + if alpha_weight > beta_weight: + return alpha + else: + return beta + + if not whitelist: + whitelist = ['*'] + + if not blacklist: + blacklist = [] + + licenses = flattened_licenses(licensestr, choose_licenses) + excluded = filter(lambda lic: exclude_license(lic), licenses) + included = filter(lambda lic: include_license(lic), licenses) + if excluded: + return False, excluded + else: + return True, included diff --git a/meta/lib/oe/lsb.py b/meta/lib/oe/lsb.py new file mode 100644 index 0000000000..b53f361035 --- /dev/null +++ b/meta/lib/oe/lsb.py @@ -0,0 +1,81 @@ +def release_dict(): + """Return the output of lsb_release -ir as a dictionary""" + from subprocess import PIPE + + try: + output, err = bb.process.run(['lsb_release', '-ir'], stderr=PIPE) + except bb.process.CmdError as exc: + return None + + data = {} + for line in output.splitlines(): + try: + key, value = line.split(":\t", 1) + except ValueError: + continue + else: + data[key] = value + return data + +def release_dict_file(): + """ Try to gather LSB release information manually when lsb_release tool is unavailable """ + data = None + try: + if os.path.exists('/etc/lsb-release'): + data = {} + with open('/etc/lsb-release') as f: + for line in f: + key, value = line.split("=", 1) + data[key] = value.strip() + elif os.path.exists('/etc/redhat-release'): + data = {} + with open('/etc/redhat-release') as f: + distro = f.readline().strip() + import re + match = re.match(r'(.*) release (.*) \((.*)\)', distro) + if match: + data['DISTRIB_ID'] = match.group(1) + data['DISTRIB_RELEASE'] = match.group(2) + elif os.path.exists('/etc/SuSE-release'): + data = {} + data['DISTRIB_ID'] = 'SUSE LINUX' + with open('/etc/SuSE-release') as f: + for line in f: + if line.startswith('VERSION = '): + data['DISTRIB_RELEASE'] = line[10:].rstrip() + break + elif os.path.exists('/etc/os-release'): + data = {} + with open('/etc/os-release') as f: + for line in f: + if line.startswith('NAME='): + data['DISTRIB_ID'] = line[5:].rstrip().strip('"') + if line.startswith('VERSION_ID='): + data['DISTRIB_RELEASE'] = line[11:].rstrip().strip('"') + except IOError: + return None + return data + +def distro_identifier(adjust_hook=None): + """Return a distro identifier string based upon lsb_release -ri, + with optional adjustment via a hook""" + + lsb_data = release_dict() + if lsb_data: + distro_id, release = lsb_data['Distributor ID'], lsb_data['Release'] + else: + lsb_data_file = release_dict_file() + if lsb_data_file: + distro_id, release = lsb_data_file['DISTRIB_ID'], lsb_data_file.get('DISTRIB_RELEASE', None) + else: + distro_id, release = None, None + + if adjust_hook: + distro_id, release = adjust_hook(distro_id, release) + if not distro_id: + return "Unknown" + if release: + id_str = '{0}-{1}'.format(distro_id, release) + else: + id_str = distro_id + return id_str.replace(' ','-').replace('/','-') diff --git a/meta/lib/oe/maketype.py b/meta/lib/oe/maketype.py new file mode 100644 index 0000000000..139f333691 --- /dev/null +++ b/meta/lib/oe/maketype.py @@ -0,0 +1,99 @@ +"""OpenEmbedded variable typing support + +Types are defined in the metadata by name, using the 'type' flag on a +variable. Other flags may be utilized in the construction of the types. See +the arguments of the type's factory for details. +""" + +import inspect +import types + +available_types = {} + +class MissingFlag(TypeError): + """A particular flag is required to construct the type, but has not been + provided.""" + def __init__(self, flag, type): + self.flag = flag + self.type = type + TypeError.__init__(self) + + def __str__(self): + return "Type '%s' requires flag '%s'" % (self.type, self.flag) + +def factory(var_type): + """Return the factory for a specified type.""" + if var_type is None: + raise TypeError("No type specified. Valid types: %s" % + ', '.join(available_types)) + try: + return available_types[var_type] + except KeyError: + raise TypeError("Invalid type '%s':\n Valid types: %s" % + (var_type, ', '.join(available_types))) + +def create(value, var_type, **flags): + """Create an object of the specified type, given the specified flags and + string value.""" + obj = factory(var_type) + objflags = {} + for flag in obj.flags: + if flag not in flags: + if flag not in obj.optflags: + raise MissingFlag(flag, var_type) + else: + objflags[flag] = flags[flag] + + return obj(value, **objflags) + +def get_callable_args(obj): + """Grab all but the first argument of the specified callable, returning + the list, as well as a list of which of the arguments have default + values.""" + if type(obj) is type: + obj = obj.__init__ + + args, varargs, keywords, defaults = inspect.getargspec(obj) + flaglist = [] + if args: + if len(args) > 1 and args[0] == 'self': + args = args[1:] + flaglist.extend(args) + + optional = set() + if defaults: + optional |= set(flaglist[-len(defaults):]) + return flaglist, optional + +def factory_setup(name, obj): + """Prepare a factory for use.""" + args, optional = get_callable_args(obj) + extra_args = args[1:] + if extra_args: + obj.flags, optional = extra_args, optional + obj.optflags = set(optional) + else: + obj.flags = obj.optflags = () + + if not hasattr(obj, 'name'): + obj.name = name + +def register(name, factory): + """Register a type, given its name and a factory callable. + + Determines the required and optional flags from the factory's + arguments.""" + factory_setup(name, factory) + available_types[factory.name] = factory + + +# Register all our included types +for name in dir(types): + if name.startswith('_'): + continue + + obj = getattr(types, name) + if not callable(obj): + continue + + register(name, obj) diff --git a/meta/lib/oe/manifest.py b/meta/lib/oe/manifest.py new file mode 100644 index 0000000000..afda76be66 --- /dev/null +++ b/meta/lib/oe/manifest.py @@ -0,0 +1,345 @@ +from abc import ABCMeta, abstractmethod +import os +import re +import bb + + +class Manifest(object): + """ + This is an abstract class. Do not instantiate this directly. + """ + __metaclass__ = ABCMeta + + PKG_TYPE_MUST_INSTALL = "mip" + PKG_TYPE_MULTILIB = "mlp" + PKG_TYPE_LANGUAGE = "lgp" + PKG_TYPE_ATTEMPT_ONLY = "aop" + + MANIFEST_TYPE_IMAGE = "image" + MANIFEST_TYPE_SDK_HOST = "sdk_host" + MANIFEST_TYPE_SDK_TARGET = "sdk_target" + + var_maps = { + MANIFEST_TYPE_IMAGE: { + "PACKAGE_INSTALL": PKG_TYPE_MUST_INSTALL, + "PACKAGE_INSTALL_ATTEMPTONLY": PKG_TYPE_ATTEMPT_ONLY, + "LINGUAS_INSTALL": PKG_TYPE_LANGUAGE + }, + MANIFEST_TYPE_SDK_HOST: { + "TOOLCHAIN_HOST_TASK": PKG_TYPE_MUST_INSTALL, + "TOOLCHAIN_HOST_TASK_ATTEMPTONLY": PKG_TYPE_ATTEMPT_ONLY + }, + MANIFEST_TYPE_SDK_TARGET: { + "TOOLCHAIN_TARGET_TASK": PKG_TYPE_MUST_INSTALL, + "TOOLCHAIN_TARGET_ATTEMPTONLY": PKG_TYPE_ATTEMPT_ONLY + } + } + + INSTALL_ORDER = [ + PKG_TYPE_LANGUAGE, + PKG_TYPE_MUST_INSTALL, + PKG_TYPE_ATTEMPT_ONLY, + PKG_TYPE_MULTILIB + ] + + initial_manifest_file_header = \ + "# This file was generated automatically and contains the packages\n" \ + "# passed on to the package manager in order to create the rootfs.\n\n" \ + "# Format:\n" \ + "# ,\n" \ + "# where:\n" \ + "# can be:\n" \ + "# 'mip' = must install package\n" \ + "# 'aop' = attempt only package\n" \ + "# 'mlp' = multilib package\n" \ + "# 'lgp' = language package\n\n" + + def __init__(self, d, manifest_dir=None, manifest_type=MANIFEST_TYPE_IMAGE): + self.d = d + self.manifest_type = manifest_type + + if manifest_dir is None: + if manifest_type != self.MANIFEST_TYPE_IMAGE: + self.manifest_dir = self.d.getVar('SDK_DIR', True) + else: + self.manifest_dir = self.d.getVar('WORKDIR', True) + else: + self.manifest_dir = manifest_dir + + bb.utils.mkdirhier(self.manifest_dir) + + self.initial_manifest = os.path.join(self.manifest_dir, "%s_initial_manifest" % manifest_type) + self.final_manifest = os.path.join(self.manifest_dir, "%s_final_manifest" % manifest_type) + self.full_manifest = os.path.join(self.manifest_dir, "%s_full_manifest" % manifest_type) + + # packages in the following vars will be split in 'must install' and + # 'multilib' + self.vars_to_split = ["PACKAGE_INSTALL", + "TOOLCHAIN_HOST_TASK", + "TOOLCHAIN_TARGET_TASK"] + + """ + This creates a standard initial manifest for core-image-(minimal|sato|sato-sdk). + This will be used for testing until the class is implemented properly! + """ + def _create_dummy_initial(self): + image_rootfs = self.d.getVar('IMAGE_ROOTFS', True) + pkg_list = dict() + if image_rootfs.find("core-image-sato-sdk") > 0: + pkg_list[self.PKG_TYPE_MUST_INSTALL] = \ + "packagegroup-core-x11-sato-games packagegroup-base-extended " \ + "packagegroup-core-x11-sato packagegroup-core-x11-base " \ + "packagegroup-core-sdk packagegroup-core-tools-debug " \ + "packagegroup-core-boot packagegroup-core-tools-testapps " \ + "packagegroup-core-eclipse-debug packagegroup-core-qt-demoapps " \ + "apt packagegroup-core-tools-profile psplash " \ + "packagegroup-core-standalone-sdk-target " \ + "packagegroup-core-ssh-openssh dpkg kernel-dev" + pkg_list[self.PKG_TYPE_LANGUAGE] = \ + "locale-base-en-us locale-base-en-gb" + elif image_rootfs.find("core-image-sato") > 0: + pkg_list[self.PKG_TYPE_MUST_INSTALL] = \ + "packagegroup-core-ssh-dropbear packagegroup-core-x11-sato-games " \ + "packagegroup-core-x11-base psplash apt dpkg packagegroup-base-extended " \ + "packagegroup-core-x11-sato packagegroup-core-boot" + pkg_list['lgp'] = \ + "locale-base-en-us locale-base-en-gb" + elif image_rootfs.find("core-image-minimal") > 0: + pkg_list[self.PKG_TYPE_MUST_INSTALL] = "run-postinsts packagegroup-core-boot" + + with open(self.initial_manifest, "w+") as manifest: + manifest.write(self.initial_manifest_file_header) + + for pkg_type in pkg_list: + for pkg in pkg_list[pkg_type].split(): + manifest.write("%s,%s\n" % (pkg_type, pkg)) + + """ + This will create the initial manifest which will be used by Rootfs class to + generate the rootfs + """ + @abstractmethod + def create_initial(self): + pass + + """ + This creates the manifest after everything has been installed. + """ + @abstractmethod + def create_final(self): + pass + + """ + This creates the manifest after the package in initial manifest has been + dummy installed. It lists all *to be installed* packages. There is no real + installation, just a test. + """ + @abstractmethod + def create_full(self, pm): + pass + + """ + The following function parses an initial manifest and returns a dictionary + object with the must install, attempt only, multilib and language packages. + """ + def parse_initial_manifest(self): + pkgs = dict() + + with open(self.initial_manifest) as manifest: + for line in manifest.read().split('\n'): + comment = re.match("^#.*", line) + pattern = "^(%s|%s|%s|%s),(.*)$" % \ + (self.PKG_TYPE_MUST_INSTALL, + self.PKG_TYPE_ATTEMPT_ONLY, + self.PKG_TYPE_MULTILIB, + self.PKG_TYPE_LANGUAGE) + pkg = re.match(pattern, line) + + if comment is not None: + continue + + if pkg is not None: + pkg_type = pkg.group(1) + pkg_name = pkg.group(2) + + if not pkg_type in pkgs: + pkgs[pkg_type] = [pkg_name] + else: + pkgs[pkg_type].append(pkg_name) + + return pkgs + + ''' + This following function parses a full manifest and return a list + object with packages. + ''' + def parse_full_manifest(self): + installed_pkgs = list() + if not os.path.exists(self.full_manifest): + bb.note('full manifest not exist') + return installed_pkgs + + with open(self.full_manifest, 'r') as manifest: + for pkg in manifest.read().split('\n'): + installed_pkgs.append(pkg.strip()) + + return installed_pkgs + + +class RpmManifest(Manifest): + """ + Returns a dictionary object with mip and mlp packages. + """ + def _split_multilib(self, pkg_list): + pkgs = dict() + + for pkg in pkg_list.split(): + pkg_type = self.PKG_TYPE_MUST_INSTALL + + ml_variants = self.d.getVar('MULTILIB_VARIANTS', True).split() + + for ml_variant in ml_variants: + if pkg.startswith(ml_variant + '-'): + pkg_type = self.PKG_TYPE_MULTILIB + + if not pkg_type in pkgs: + pkgs[pkg_type] = pkg + else: + pkgs[pkg_type] += " " + pkg + + return pkgs + + def create_initial(self): + pkgs = dict() + + with open(self.initial_manifest, "w+") as manifest: + manifest.write(self.initial_manifest_file_header) + + for var in self.var_maps[self.manifest_type]: + if var in self.vars_to_split: + split_pkgs = self._split_multilib(self.d.getVar(var, True)) + if split_pkgs is not None: + pkgs = dict(pkgs.items() + split_pkgs.items()) + else: + pkg_list = self.d.getVar(var, True) + if pkg_list is not None: + pkgs[self.var_maps[self.manifest_type][var]] = self.d.getVar(var, True) + + for pkg_type in pkgs: + for pkg in pkgs[pkg_type].split(): + manifest.write("%s,%s\n" % (pkg_type, pkg)) + + def create_final(self): + pass + + def create_full(self, pm): + pass + + +class OpkgManifest(Manifest): + """ + Returns a dictionary object with mip and mlp packages. + """ + def _split_multilib(self, pkg_list): + pkgs = dict() + + for pkg in pkg_list.split(): + pkg_type = self.PKG_TYPE_MUST_INSTALL + + ml_variants = self.d.getVar('MULTILIB_VARIANTS', True).split() + + for ml_variant in ml_variants: + if pkg.startswith(ml_variant + '-'): + pkg_type = self.PKG_TYPE_MULTILIB + + if not pkg_type in pkgs: + pkgs[pkg_type] = pkg + else: + pkgs[pkg_type] += " " + pkg + + return pkgs + + def create_initial(self): + pkgs = dict() + + with open(self.initial_manifest, "w+") as manifest: + manifest.write(self.initial_manifest_file_header) + + for var in self.var_maps[self.manifest_type]: + if var in self.vars_to_split: + split_pkgs = self._split_multilib(self.d.getVar(var, True)) + if split_pkgs is not None: + pkgs = dict(pkgs.items() + split_pkgs.items()) + else: + pkg_list = self.d.getVar(var, True) + if pkg_list is not None: + pkgs[self.var_maps[self.manifest_type][var]] = self.d.getVar(var, True) + + for pkg_type in pkgs: + for pkg in pkgs[pkg_type].split(): + manifest.write("%s,%s\n" % (pkg_type, pkg)) + + def create_final(self): + pass + + def create_full(self, pm): + if not os.path.exists(self.initial_manifest): + self.create_initial() + + initial_manifest = self.parse_initial_manifest() + pkgs_to_install = list() + for pkg_type in initial_manifest: + pkgs_to_install += initial_manifest[pkg_type] + if len(pkgs_to_install) == 0: + return + + output = pm.dummy_install(pkgs_to_install) + + with open(self.full_manifest, 'w+') as manifest: + pkg_re = re.compile('^Installing ([^ ]+) [^ ].*') + for line in set(output.split('\n')): + m = pkg_re.match(line) + if m: + manifest.write(m.group(1) + '\n') + + return + + +class DpkgManifest(Manifest): + def create_initial(self): + with open(self.initial_manifest, "w+") as manifest: + manifest.write(self.initial_manifest_file_header) + + for var in self.var_maps[self.manifest_type]: + pkg_list = self.d.getVar(var, True) + + if pkg_list is None: + continue + + for pkg in pkg_list.split(): + manifest.write("%s,%s\n" % + (self.var_maps[self.manifest_type][var], pkg)) + + def create_final(self): + pass + + def create_full(self, pm): + pass + + +def create_manifest(d, final_manifest=False, manifest_dir=None, + manifest_type=Manifest.MANIFEST_TYPE_IMAGE): + manifest_map = {'rpm': RpmManifest, + 'ipk': OpkgManifest, + 'deb': DpkgManifest} + + manifest = manifest_map[d.getVar('IMAGE_PKGTYPE', True)](d, manifest_dir, manifest_type) + + if final_manifest: + manifest.create_final() + else: + manifest.create_initial() + + +if __name__ == "__main__": + pass diff --git a/meta/lib/oe/package.py b/meta/lib/oe/package.py new file mode 100644 index 0000000000..f8b532220a --- /dev/null +++ b/meta/lib/oe/package.py @@ -0,0 +1,99 @@ +def runstrip(arg): + # Function to strip a single file, called from split_and_strip_files below + # A working 'file' (one which works on the target architecture) + # + # The elftype is a bit pattern (explained in split_and_strip_files) to tell + # us what type of file we're processing... + # 4 - executable + # 8 - shared library + # 16 - kernel module + + import commands, stat, subprocess + + (file, elftype, strip) = arg + + newmode = None + if not os.access(file, os.W_OK) or os.access(file, os.R_OK): + origmode = os.stat(file)[stat.ST_MODE] + newmode = origmode | stat.S_IWRITE | stat.S_IREAD + os.chmod(file, newmode) + + extraflags = "" + + # kernel module + if elftype & 16: + extraflags = "--strip-debug --remove-section=.comment --remove-section=.note --preserve-dates" + # .so and shared library + elif ".so" in file and elftype & 8: + extraflags = "--remove-section=.comment --remove-section=.note --strip-unneeded" + # shared or executable: + elif elftype & 8 or elftype & 4: + extraflags = "--remove-section=.comment --remove-section=.note" + + stripcmd = "'%s' %s '%s'" % (strip, extraflags, file) + bb.debug(1, "runstrip: %s" % stripcmd) + + ret = subprocess.call(stripcmd, shell=True) + + if newmode: + os.chmod(file, origmode) + + if ret: + bb.error("runstrip: '%s' strip command failed" % stripcmd) + + return + + +def file_translate(file): + ft = file.replace("@", "@at@") + ft = ft.replace(" ", "@space@") + ft = ft.replace("\t", "@tab@") + ft = ft.replace("[", "@openbrace@") + ft = ft.replace("]", "@closebrace@") + ft = ft.replace("_", "@underscore@") + return ft + +def filedeprunner(arg): + import re, subprocess, shlex + + (pkg, pkgfiles, rpmdeps, pkgdest) = arg + provides = {} + requires = {} + + r = re.compile(r'[<>=]+ +[^ ]*') + + def process_deps(pipe, pkg, pkgdest, provides, requires): + for line in pipe: + f = line.split(" ", 1)[0].strip() + line = line.split(" ", 1)[1].strip() + + if line.startswith("Requires:"): + i = requires + elif line.startswith("Provides:"): + i = provides + else: + continue + + file = f.replace(pkgdest + "/" + pkg, "") + file = file_translate(file) + value = line.split(":", 1)[1].strip() + value = r.sub(r'(\g<0>)', value) + + if value.startswith("rpmlib("): + continue + if value == "python": + continue + if file not in i: + i[file] = [] + i[file].append(value) + + return provides, requires + + try: + dep_popen = subprocess.Popen(shlex.split(rpmdeps) + pkgfiles, stdout=subprocess.PIPE) + provides, requires = process_deps(dep_popen.stdout, pkg, pkgdest, provides, requires) + except OSError as e: + bb.error("rpmdeps: '%s' command failed, '%s'" % (shlex.split(rpmdeps) + pkgfiles, e)) + raise e + + return (pkg, provides, requires) diff --git a/meta/lib/oe/package_manager.py b/meta/lib/oe/package_manager.py new file mode 100644 index 0000000000..a8360fe983 --- /dev/null +++ b/meta/lib/oe/package_manager.py @@ -0,0 +1,1721 @@ +from abc import ABCMeta, abstractmethod +import os +import glob +import subprocess +import shutil +import multiprocessing +import re +import bb + + +# this can be used by all PM backends to create the index files in parallel +def create_index(arg): + index_cmd = arg + + try: + bb.note("Executing '%s' ..." % index_cmd) + subprocess.check_output(index_cmd, stderr=subprocess.STDOUT, shell=True) + except subprocess.CalledProcessError as e: + return("Index creation command '%s' failed with return code %d:\n%s" % + (e.cmd, e.returncode, e.output)) + + return None + + +class Indexer(object): + __metaclass__ = ABCMeta + + def __init__(self, d, deploy_dir): + self.d = d + self.deploy_dir = deploy_dir + + @abstractmethod + def write_index(self): + pass + + +class RpmIndexer(Indexer): + def get_ml_prefix_and_os_list(self, arch_var=None, os_var=None): + package_archs = { + 'default': [], + } + + target_os = { + 'default': "", + } + + if arch_var is not None and os_var is not None: + package_archs['default'] = self.d.getVar(arch_var, True).split() + package_archs['default'].reverse() + target_os['default'] = self.d.getVar(os_var, True).strip() + else: + package_archs['default'] = self.d.getVar("PACKAGE_ARCHS", True).split() + # arch order is reversed. This ensures the -best- match is + # listed first! + package_archs['default'].reverse() + target_os['default'] = self.d.getVar("TARGET_OS", True).strip() + multilibs = self.d.getVar('MULTILIBS', True) or "" + for ext in multilibs.split(): + eext = ext.split(':') + if len(eext) > 1 and eext[0] == 'multilib': + localdata = bb.data.createCopy(self.d) + default_tune_key = "DEFAULTTUNE_virtclass-multilib-" + eext[1] + default_tune = localdata.getVar(default_tune_key, False) + if default_tune: + localdata.setVar("DEFAULTTUNE", default_tune) + bb.data.update_data(localdata) + package_archs[eext[1]] = localdata.getVar('PACKAGE_ARCHS', + True).split() + package_archs[eext[1]].reverse() + target_os[eext[1]] = localdata.getVar("TARGET_OS", + True).strip() + + ml_prefix_list = dict() + for mlib in package_archs: + if mlib == 'default': + ml_prefix_list[mlib] = package_archs[mlib] + else: + ml_prefix_list[mlib] = list() + for arch in package_archs[mlib]: + if arch in ['all', 'noarch', 'any']: + ml_prefix_list[mlib].append(arch) + else: + ml_prefix_list[mlib].append(mlib + "_" + arch) + + return (ml_prefix_list, target_os) + + def write_index(self): + sdk_pkg_archs = (self.d.getVar('SDK_PACKAGE_ARCHS', True) or "").replace('-', '_').split() + all_mlb_pkg_archs = (self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS', True) or "").replace('-', '_').split() + + mlb_prefix_list = self.get_ml_prefix_and_os_list()[0] + + archs = set() + for item in mlb_prefix_list: + archs = archs.union(set(i.replace('-', '_') for i in mlb_prefix_list[item])) + + if len(archs) == 0: + archs = archs.union(set(all_mlb_pkg_archs)) + + archs = archs.union(set(sdk_pkg_archs)) + + rpm_createrepo = bb.utils.which(os.getenv('PATH'), "createrepo") + index_cmds = [] + rpm_dirs_found = False + for arch in archs: + arch_dir = os.path.join(self.deploy_dir, arch) + if not os.path.isdir(arch_dir): + continue + + index_cmds.append("%s --update -q %s" % (rpm_createrepo, arch_dir)) + + rpm_dirs_found = True + + if not rpm_dirs_found: + bb.note("There are no packages in %s" % self.deploy_dir) + return + + nproc = multiprocessing.cpu_count() + pool = bb.utils.multiprocessingpool(nproc) + results = list(pool.imap(create_index, index_cmds)) + pool.close() + pool.join() + + for result in results: + if result is not None: + return(result) + + +class OpkgIndexer(Indexer): + def write_index(self): + arch_vars = ["ALL_MULTILIB_PACKAGE_ARCHS", + "SDK_PACKAGE_ARCHS", + "MULTILIB_ARCHS"] + + opkg_index_cmd = bb.utils.which(os.getenv('PATH'), "opkg-make-index") + + if not os.path.exists(os.path.join(self.deploy_dir, "Packages")): + open(os.path.join(self.deploy_dir, "Packages"), "w").close() + + index_cmds = [] + for arch_var in arch_vars: + archs = self.d.getVar(arch_var, True) + if archs is None: + continue + + for arch in archs.split(): + pkgs_dir = os.path.join(self.deploy_dir, arch) + pkgs_file = os.path.join(pkgs_dir, "Packages") + + if not os.path.isdir(pkgs_dir): + continue + + if not os.path.exists(pkgs_file): + open(pkgs_file, "w").close() + + index_cmds.append('%s -r %s -p %s -m %s' % + (opkg_index_cmd, pkgs_file, pkgs_file, pkgs_dir)) + + if len(index_cmds) == 0: + bb.note("There are no packages in %s!" % self.deploy_dir) + return + + nproc = multiprocessing.cpu_count() + pool = bb.utils.multiprocessingpool(nproc) + results = list(pool.imap(create_index, index_cmds)) + pool.close() + pool.join() + + for result in results: + if result is not None: + return(result) + + +class DpkgIndexer(Indexer): + def write_index(self): + pkg_archs = self.d.getVar('PACKAGE_ARCHS', True) + if pkg_archs is not None: + arch_list = pkg_archs.split() + sdk_pkg_archs = self.d.getVar('SDK_PACKAGE_ARCHS', True) + if sdk_pkg_archs is not None: + for a in sdk_pkg_archs.split(): + if a not in pkg_archs: + arch_list.append(a) + + apt_ftparchive = bb.utils.which(os.getenv('PATH'), "apt-ftparchive") + gzip = bb.utils.which(os.getenv('PATH'), "gzip") + + index_cmds = [] + deb_dirs_found = False + for arch in arch_list: + arch_dir = os.path.join(self.deploy_dir, arch) + if not os.path.isdir(arch_dir): + continue + + cmd = "cd %s; PSEUDO_UNLOAD=1 %s packages . > Packages;" % (arch_dir, apt_ftparchive) + + cmd += "%s -fc Packages > Packages.gz;" % gzip + + with open(os.path.join(arch_dir, "Release"), "w+") as release: + release.write("Label: %s\n" % arch) + + cmd += "PSEUDO_UNLOAD=1 %s release . >> Release" % apt_ftparchive + + index_cmds.append(cmd) + + deb_dirs_found = True + + if not deb_dirs_found: + bb.note("There are no packages in %s" % self.deploy_dir) + return + + nproc = multiprocessing.cpu_count() + pool = bb.utils.multiprocessingpool(nproc) + results = list(pool.imap(create_index, index_cmds)) + pool.close() + pool.join() + + for result in results: + if result is not None: + return(result) + + +class PkgsList(object): + __metaclass__ = ABCMeta + + def __init__(self, d, rootfs_dir): + self.d = d + self.rootfs_dir = rootfs_dir + + @abstractmethod + def list(self, format=None): + pass + + +class RpmPkgsList(PkgsList): + def __init__(self, d, rootfs_dir, arch_var=None, os_var=None): + super(RpmPkgsList, self).__init__(d, rootfs_dir) + + self.rpm_cmd = bb.utils.which(os.getenv('PATH'), "rpm") + self.image_rpmlib = os.path.join(self.rootfs_dir, 'var/lib/rpm') + + self.ml_prefix_list, self.ml_os_list = \ + RpmIndexer(d, rootfs_dir).get_ml_prefix_and_os_list(arch_var, os_var) + + ''' + Translate the RPM/Smart format names to the OE multilib format names + ''' + def _pkg_translate_smart_to_oe(self, pkg, arch): + new_pkg = pkg + fixed_arch = arch.replace('_', '-') + found = 0 + for mlib in self.ml_prefix_list: + for cmp_arch in self.ml_prefix_list[mlib]: + fixed_cmp_arch = cmp_arch.replace('_', '-') + if fixed_arch == fixed_cmp_arch: + if mlib == 'default': + new_pkg = pkg + new_arch = cmp_arch + else: + new_pkg = mlib + '-' + pkg + # We need to strip off the ${mlib}_ prefix on the arch + new_arch = cmp_arch.replace(mlib + '_', '') + + # Workaround for bug 3565. Simply look to see if we + # know of a package with that name, if not try again! + filename = os.path.join(self.d.getVar('PKGDATA_DIR', True), + 'runtime-reverse', + new_pkg) + if os.path.exists(filename): + found = 1 + break + + if found == 1 and fixed_arch == fixed_cmp_arch: + break + #bb.note('%s, %s -> %s, %s' % (pkg, arch, new_pkg, new_arch)) + return new_pkg, new_arch + + def _list_pkg_deps(self): + cmd = [bb.utils.which(os.getenv('PATH'), "rpmresolve"), + "-t", self.image_rpmlib] + + try: + output = subprocess.check_output(cmd, stderr=subprocess.STDOUT).strip() + except subprocess.CalledProcessError as e: + bb.fatal("Cannot get the package dependencies. Command '%s' " + "returned %d:\n%s" % (' '.join(cmd), e.returncode, e.output)) + + return output + + def list(self, format=None): + if format == "deps": + return self._list_pkg_deps() + + cmd = self.rpm_cmd + ' --root ' + self.rootfs_dir + cmd += ' -D "_dbpath /var/lib/rpm" -qa' + cmd += " --qf '[%{NAME} %{ARCH} %{VERSION} %{PACKAGEORIGIN}\n]'" + + try: + # bb.note(cmd) + tmp_output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).strip() + + except subprocess.CalledProcessError as e: + bb.fatal("Cannot get the installed packages list. Command '%s' " + "returned %d:\n%s" % (cmd, e.returncode, e.output)) + + output = list() + for line in tmp_output.split('\n'): + if len(line.strip()) == 0: + continue + pkg = line.split()[0] + arch = line.split()[1] + ver = line.split()[2] + pkgorigin = line.split()[3] + new_pkg, new_arch = self._pkg_translate_smart_to_oe(pkg, arch) + + if format == "arch": + output.append('%s %s' % (new_pkg, new_arch)) + elif format == "file": + output.append('%s %s %s' % (new_pkg, pkgorigin, new_arch)) + elif format == "ver": + output.append('%s %s %s' % (new_pkg, new_arch, ver)) + else: + output.append('%s' % (new_pkg)) + + output.sort() + + return '\n'.join(output) + + +class OpkgPkgsList(PkgsList): + def __init__(self, d, rootfs_dir, config_file): + super(OpkgPkgsList, self).__init__(d, rootfs_dir) + + self.opkg_cmd = bb.utils.which(os.getenv('PATH'), "opkg-cl") + self.opkg_args = "-f %s -o %s " % (config_file, rootfs_dir) + self.opkg_args += self.d.getVar("OPKG_ARGS", True) + + def list(self, format=None): + opkg_query_cmd = bb.utils.which(os.getenv('PATH'), "opkg-query-helper.py") + + if format == "arch": + cmd = "%s %s status | %s -a" % \ + (self.opkg_cmd, self.opkg_args, opkg_query_cmd) + elif format == "file": + cmd = "%s %s status | %s -f" % \ + (self.opkg_cmd, self.opkg_args, opkg_query_cmd) + elif format == "ver": + cmd = "%s %s status | %s -v" % \ + (self.opkg_cmd, self.opkg_args, opkg_query_cmd) + elif format == "deps": + cmd = "%s %s status | %s" % \ + (self.opkg_cmd, self.opkg_args, opkg_query_cmd) + else: + cmd = "%s %s list_installed | cut -d' ' -f1" % \ + (self.opkg_cmd, self.opkg_args) + + try: + output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).strip() + except subprocess.CalledProcessError as e: + bb.fatal("Cannot get the installed packages list. Command '%s' " + "returned %d:\n%s" % (cmd, e.returncode, e.output)) + + if output and format == "file": + tmp_output = "" + for line in output.split('\n'): + pkg, pkg_file, pkg_arch = line.split() + full_path = os.path.join(self.rootfs_dir, pkg_arch, pkg_file) + if os.path.exists(full_path): + tmp_output += "%s %s %s\n" % (pkg, full_path, pkg_arch) + else: + tmp_output += "%s %s %s\n" % (pkg, pkg_file, pkg_arch) + + output = tmp_output + + return output + + +class DpkgPkgsList(PkgsList): + def list(self, format=None): + cmd = [bb.utils.which(os.getenv('PATH'), "dpkg-query"), + "--admindir=%s/var/lib/dpkg" % self.rootfs_dir, + "-W"] + + if format == "arch": + cmd.append("-f=${Package} ${PackageArch}\n") + elif format == "file": + cmd.append("-f=${Package} ${Package}_${Version}_${Architecture}.deb ${PackageArch}\n") + elif format == "ver": + cmd.append("-f=${Package} ${PackageArch} ${Version}\n") + elif format == "deps": + cmd.append("-f=Package: ${Package}\nDepends: ${Depends}\nRecommends: ${Recommends}\n\n") + else: + cmd.append("-f=${Package}\n") + + try: + output = subprocess.check_output(cmd, stderr=subprocess.STDOUT).strip() + except subprocess.CalledProcessError as e: + bb.fatal("Cannot get the installed packages list. Command '%s' " + "returned %d:\n%s" % (' '.join(cmd), e.returncode, e.output)) + + if format == "file": + tmp_output = "" + for line in tuple(output.split('\n')): + pkg, pkg_file, pkg_arch = line.split() + full_path = os.path.join(self.rootfs_dir, pkg_arch, pkg_file) + if os.path.exists(full_path): + tmp_output += "%s %s %s\n" % (pkg, full_path, pkg_arch) + else: + tmp_output += "%s %s %s\n" % (pkg, pkg_file, pkg_arch) + + output = tmp_output + elif format == "deps": + opkg_query_cmd = bb.utils.which(os.getenv('PATH'), "opkg-query-helper.py") + + try: + output = subprocess.check_output("echo -e '%s' | %s" % + (output, opkg_query_cmd), + stderr=subprocess.STDOUT, + shell=True) + except subprocess.CalledProcessError as e: + bb.fatal("Cannot compute packages dependencies. Command '%s' " + "returned %d:\n%s" % (e.cmd, e.returncode, e.output)) + + return output + + +class PackageManager(object): + """ + This is an abstract class. Do not instantiate this directly. + """ + __metaclass__ = ABCMeta + + def __init__(self, d): + self.d = d + self.deploy_dir = None + self.deploy_lock = None + self.feed_uris = self.d.getVar('PACKAGE_FEED_URIS', True) or "" + + """ + Update the package manager package database. + """ + @abstractmethod + def update(self): + pass + + """ + Install a list of packages. 'pkgs' is a list object. If 'attempt_only' is + True, installation failures are ignored. + """ + @abstractmethod + def install(self, pkgs, attempt_only=False): + pass + + """ + Remove a list of packages. 'pkgs' is a list object. If 'with_dependencies' + is False, the any dependencies are left in place. + """ + @abstractmethod + def remove(self, pkgs, with_dependencies=True): + pass + + """ + This function creates the index files + """ + @abstractmethod + def write_index(self): + pass + + @abstractmethod + def remove_packaging_data(self): + pass + + @abstractmethod + def list_installed(self, format=None): + pass + + @abstractmethod + def insert_feeds_uris(self): + pass + + """ + Install complementary packages based upon the list of currently installed + packages e.g. locales, *-dev, *-dbg, etc. This will only attempt to install + these packages, if they don't exist then no error will occur. Note: every + backend needs to call this function explicitly after the normal package + installation + """ + def install_complementary(self, globs=None): + # we need to write the list of installed packages to a file because the + # oe-pkgdata-util reads it from a file + installed_pkgs_file = os.path.join(self.d.getVar('WORKDIR', True), + "installed_pkgs.txt") + with open(installed_pkgs_file, "w+") as installed_pkgs: + installed_pkgs.write(self.list_installed("arch")) + + if globs is None: + globs = self.d.getVar('IMAGE_INSTALL_COMPLEMENTARY', True) + split_linguas = set() + + for translation in self.d.getVar('IMAGE_LINGUAS', True).split(): + split_linguas.add(translation) + split_linguas.add(translation.split('-')[0]) + + split_linguas = sorted(split_linguas) + + for lang in split_linguas: + globs += " *-locale-%s" % lang + + if globs is None: + return + + cmd = [bb.utils.which(os.getenv('PATH'), "oe-pkgdata-util"), + "glob", self.d.getVar('PKGDATA_DIR', True), installed_pkgs_file, + globs] + try: + bb.note("Installing complementary packages ...") + complementary_pkgs = subprocess.check_output(cmd, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + bb.fatal("Could not compute complementary packages list. Command " + "'%s' returned %d:\n%s" % + (' '.join(cmd), e.returncode, e.output)) + + self.install(complementary_pkgs.split(), attempt_only=True) + + def deploy_dir_lock(self): + if self.deploy_dir is None: + raise RuntimeError("deploy_dir is not set!") + + lock_file_name = os.path.join(self.deploy_dir, "deploy.lock") + + self.deploy_lock = bb.utils.lockfile(lock_file_name) + + def deploy_dir_unlock(self): + if self.deploy_lock is None: + return + + bb.utils.unlockfile(self.deploy_lock) + + self.deploy_lock = None + + +class RpmPM(PackageManager): + def __init__(self, + d, + target_rootfs, + target_vendor, + task_name='target', + providename=None, + arch_var=None, + os_var=None): + super(RpmPM, self).__init__(d) + self.target_rootfs = target_rootfs + self.target_vendor = target_vendor + self.task_name = task_name + self.providename = providename + self.fullpkglist = list() + self.deploy_dir = self.d.getVar('DEPLOY_DIR_RPM', True) + self.etcrpm_dir = os.path.join(self.target_rootfs, "etc/rpm") + self.install_dir = os.path.join(self.target_rootfs, "install") + self.rpm_cmd = bb.utils.which(os.getenv('PATH'), "rpm") + self.smart_cmd = bb.utils.which(os.getenv('PATH'), "smart") + self.smart_opt = "--data-dir=" + os.path.join(target_rootfs, + 'var/lib/smart') + self.scriptlet_wrapper = self.d.expand('${WORKDIR}/scriptlet_wrapper') + self.solution_manifest = self.d.expand('${T}/saved/%s_solution' % + self.task_name) + self.saved_rpmlib = self.d.expand('${T}/saved/%s' % self.task_name) + self.image_rpmlib = os.path.join(self.target_rootfs, 'var/lib/rpm') + + if not os.path.exists(self.d.expand('${T}/saved')): + bb.utils.mkdirhier(self.d.expand('${T}/saved')) + + self.indexer = RpmIndexer(self.d, self.deploy_dir) + self.pkgs_list = RpmPkgsList(self.d, self.target_rootfs, arch_var, os_var) + + self.ml_prefix_list, self.ml_os_list = self.indexer.get_ml_prefix_and_os_list(arch_var, os_var) + + def insert_feeds_uris(self): + if self.feed_uris == "": + return + + # List must be prefered to least preferred order + default_platform_extra = set() + platform_extra = set() + bbextendvariant = self.d.getVar('BBEXTENDVARIANT', True) or "" + for mlib in self.ml_os_list: + for arch in self.ml_prefix_list[mlib]: + plt = arch.replace('-', '_') + '-.*-' + self.ml_os_list[mlib] + if mlib == bbextendvariant: + default_platform_extra.add(plt) + else: + platform_extra.add(plt) + + platform_extra = platform_extra.union(default_platform_extra) + + arch_list = [] + for canonical_arch in platform_extra: + arch = canonical_arch.split('-')[0] + if not os.path.exists(os.path.join(self.deploy_dir, arch)): + continue + arch_list.append(arch) + + uri_iterator = 0 + channel_priority = 10 + 5 * len(self.feed_uris.split()) * len(arch_list) + + for uri in self.feed_uris.split(): + for arch in arch_list: + bb.note('Note: adding Smart channel url%d%s (%s)' % + (uri_iterator, arch, channel_priority)) + self._invoke_smart('channel --add url%d-%s type=rpm-md baseurl=%s/rpm/%s -y' + % (uri_iterator, arch, uri, arch)) + self._invoke_smart('channel --set url%d-%s priority=%d' % + (uri_iterator, arch, channel_priority)) + channel_priority -= 5 + uri_iterator += 1 + + ''' + Create configs for rpm and smart, and multilib is supported + ''' + def create_configs(self): + target_arch = self.d.getVar('TARGET_ARCH', True) + platform = '%s%s-%s' % (target_arch.replace('-', '_'), + self.target_vendor, + self.ml_os_list['default']) + + # List must be prefered to least preferred order + default_platform_extra = list() + platform_extra = list() + bbextendvariant = self.d.getVar('BBEXTENDVARIANT', True) or "" + for mlib in self.ml_os_list: + for arch in self.ml_prefix_list[mlib]: + plt = arch.replace('-', '_') + '-.*-' + self.ml_os_list[mlib] + if mlib == bbextendvariant: + if plt not in default_platform_extra: + default_platform_extra.append(plt) + else: + if plt not in platform_extra: + platform_extra.append(plt) + platform_extra = default_platform_extra + platform_extra + + self._create_configs(platform, platform_extra) + + def _invoke_smart(self, args): + cmd = "%s %s %s" % (self.smart_cmd, self.smart_opt, args) + # bb.note(cmd) + try: + complementary_pkgs = subprocess.check_output(cmd, + stderr=subprocess.STDOUT, + shell=True) + # bb.note(complementary_pkgs) + return complementary_pkgs + except subprocess.CalledProcessError as e: + bb.fatal("Could not invoke smart. Command " + "'%s' returned %d:\n%s" % (cmd, e.returncode, e.output)) + + def _search_pkg_name_in_feeds(self, pkg, feed_archs): + for arch in feed_archs: + arch = arch.replace('-', '_') + for p in self.fullpkglist: + regex_match = r"^%s-[^-]*-[^-]*@%s$" % \ + (re.escape(pkg), re.escape(arch)) + if re.match(regex_match, p) is not None: + # First found is best match + # bb.note('%s -> %s' % (pkg, pkg + '@' + arch)) + return pkg + '@' + arch + + return "" + + ''' + Translate the OE multilib format names to the RPM/Smart format names + It searched the RPM/Smart format names in probable multilib feeds first, + and then searched the default base feed. + ''' + def _pkg_translate_oe_to_smart(self, pkgs, attempt_only=False): + new_pkgs = list() + + for pkg in pkgs: + new_pkg = pkg + # Search new_pkg in probable multilibs first + for mlib in self.ml_prefix_list: + # Jump the default archs + if mlib == 'default': + continue + + subst = pkg.replace(mlib + '-', '') + # if the pkg in this multilib feed + if subst != pkg: + feed_archs = self.ml_prefix_list[mlib] + new_pkg = self._search_pkg_name_in_feeds(subst, feed_archs) + if not new_pkg: + # Failed to translate, package not found! + err_msg = '%s not found in the %s feeds (%s).\n' % \ + (pkg, mlib, " ".join(feed_archs)) + if not attempt_only: + err_msg += " ".join(self.fullpkglist) + bb.fatal(err_msg) + bb.warn(err_msg) + else: + new_pkgs.append(new_pkg) + + break + + # Apparently not a multilib package... + if pkg == new_pkg: + # Search new_pkg in default archs + default_archs = self.ml_prefix_list['default'] + new_pkg = self._search_pkg_name_in_feeds(pkg, default_archs) + if not new_pkg: + err_msg = '%s not found in the base feeds (%s).\n' % \ + (pkg, ' '.join(default_archs)) + if not attempt_only: + err_msg += " ".join(self.fullpkglist) + bb.fatal(err_msg) + bb.warn(err_msg) + else: + new_pkgs.append(new_pkg) + + return new_pkgs + + def _create_configs(self, platform, platform_extra): + # Setup base system configuration + bb.note("configuring RPM platform settings") + + # Configure internal RPM environment when using Smart + os.environ['RPM_ETCRPM'] = self.etcrpm_dir + bb.utils.mkdirhier(self.etcrpm_dir) + + # Setup temporary directory -- install... + if os.path.exists(self.install_dir): + bb.utils.remove(self.install_dir, True) + bb.utils.mkdirhier(os.path.join(self.install_dir, 'tmp')) + + channel_priority = 5 + platform_dir = os.path.join(self.etcrpm_dir, "platform") + with open(platform_dir, "w+") as platform_fd: + platform_fd.write(platform + '\n') + for pt in platform_extra: + channel_priority += 5 + platform_fd.write(re.sub("-linux.*$", "-linux.*\n", pt)) + + # Tell RPM that the "/" directory exist and is available + bb.note("configuring RPM system provides") + sysinfo_dir = os.path.join(self.etcrpm_dir, "sysinfo") + bb.utils.mkdirhier(sysinfo_dir) + with open(os.path.join(sysinfo_dir, "Dirnames"), "w+") as dirnames: + dirnames.write("/\n") + + if self.providename: + providename_dir = os.path.join(sysinfo_dir, "Providename") + if not os.path.exists(providename_dir): + providename_content = '\n'.join(self.providename) + providename_content += '\n' + open(providename_dir, "w+").write(providename_content) + + # Configure RPM... we enforce these settings! + bb.note("configuring RPM DB settings") + # After change the __db.* cache size, log file will not be + # generated automatically, that will raise some warnings, + # so touch a bare log for rpm write into it. + rpmlib_log = os.path.join(self.image_rpmlib, 'log', 'log.0000000001') + if not os.path.exists(rpmlib_log): + bb.utils.mkdirhier(os.path.join(self.image_rpmlib, 'log')) + open(rpmlib_log, 'w+').close() + + DB_CONFIG_CONTENT = "# ================ Environment\n" \ + "set_data_dir .\n" \ + "set_create_dir .\n" \ + "set_lg_dir ./log\n" \ + "set_tmp_dir ./tmp\n" \ + "set_flags db_log_autoremove on\n" \ + "\n" \ + "# -- thread_count must be >= 8\n" \ + "set_thread_count 64\n" \ + "\n" \ + "# ================ Logging\n" \ + "\n" \ + "# ================ Memory Pool\n" \ + "set_cachesize 0 1048576 0\n" \ + "set_mp_mmapsize 268435456\n" \ + "\n" \ + "# ================ Locking\n" \ + "set_lk_max_locks 16384\n" \ + "set_lk_max_lockers 16384\n" \ + "set_lk_max_objects 16384\n" \ + "mutex_set_max 163840\n" \ + "\n" \ + "# ================ Replication\n" + + db_config_dir = os.path.join(self.image_rpmlib, 'DB_CONFIG') + if not os.path.exists(db_config_dir): + open(db_config_dir, 'w+').write(DB_CONFIG_CONTENT) + + # Create database so that smart doesn't complain (lazy init) + cmd = "%s --root %s --dbpath /var/lib/rpm -qa > /dev/null" % ( + self.rpm_cmd, + self.target_rootfs) + try: + subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) + except subprocess.CalledProcessError as e: + bb.fatal("Create rpm database failed. Command '%s' " + "returned %d:\n%s" % (cmd, e.returncode, e.output)) + + # Configure smart + bb.note("configuring Smart settings") + bb.utils.remove(os.path.join(self.target_rootfs, 'var/lib/smart'), + True) + self._invoke_smart('config --set rpm-root=%s' % self.target_rootfs) + self._invoke_smart('config --set rpm-dbpath=/var/lib/rpm') + self._invoke_smart('config --set rpm-extra-macros._var=%s' % + self.d.getVar('localstatedir', True)) + cmd = 'config --set rpm-extra-macros._tmppath=/install/tmp' + self._invoke_smart(cmd) + + # Write common configuration for host and target usage + self._invoke_smart('config --set rpm-nolinktos=1') + self._invoke_smart('config --set rpm-noparentdirs=1') + for i in self.d.getVar('BAD_RECOMMENDATIONS', True).split(): + self._invoke_smart('flag --set ignore-recommends %s' % i) + + # Do the following configurations here, to avoid them being + # saved for field upgrade + if self.d.getVar('NO_RECOMMENDATIONS', True).strip() == "1": + self._invoke_smart('config --set ignore-all-recommends=1') + pkg_exclude = self.d.getVar('PACKAGE_EXCLUDE', True) or "" + for i in pkg_exclude.split(): + self._invoke_smart('flag --set exclude-packages %s' % i) + + # Optional debugging + # self._invoke_smart('config --set rpm-log-level=debug') + # cmd = 'config --set rpm-log-file=/tmp/smart-debug-logfile' + # self._invoke_smart(cmd) + ch_already_added = [] + for canonical_arch in platform_extra: + arch = canonical_arch.split('-')[0] + arch_channel = os.path.join(self.deploy_dir, arch) + if os.path.exists(arch_channel) and not arch in ch_already_added: + bb.note('Note: adding Smart channel %s (%s)' % + (arch, channel_priority)) + self._invoke_smart('channel --add %s type=rpm-md baseurl=%s -y' + % (arch, arch_channel)) + self._invoke_smart('channel --set %s priority=%d' % + (arch, channel_priority)) + channel_priority -= 5 + + ch_already_added.append(arch) + + bb.note('adding Smart RPM DB channel') + self._invoke_smart('channel --add rpmsys type=rpm-sys -y') + + # Construct install scriptlet wrapper. + # Scripts need to be ordered when executed, this ensures numeric order. + # If we ever run into needing more the 899 scripts, we'll have to. + # change num to start with 1000. + # + SCRIPTLET_FORMAT = "#!/bin/bash\n" \ + "\n" \ + "export PATH=%s\n" \ + "export D=%s\n" \ + 'export OFFLINE_ROOT="$D"\n' \ + 'export IPKG_OFFLINE_ROOT="$D"\n' \ + 'export OPKG_OFFLINE_ROOT="$D"\n' \ + "export INTERCEPT_DIR=%s\n" \ + "export NATIVE_ROOT=%s\n" \ + "\n" \ + "$2 $1/$3 $4\n" \ + "if [ $? -ne 0 ]; then\n" \ + " if [ $4 -eq 1 ]; then\n" \ + " mkdir -p $1/etc/rpm-postinsts\n" \ + " num=100\n" \ + " while [ -e $1/etc/rpm-postinsts/${num}-* ]; do num=$((num + 1)); done\n" \ + " name=`head -1 $1/$3 | cut -d\' \' -f 2`\n" \ + ' echo "#!$2" > $1/etc/rpm-postinsts/${num}-${name}\n' \ + ' echo "# Arg: $4" >> $1/etc/rpm-postinsts/${num}-${name}\n' \ + " cat $1/$3 >> $1/etc/rpm-postinsts/${num}-${name}\n" \ + " chmod +x $1/etc/rpm-postinsts/${num}-${name}\n" \ + " else\n" \ + ' echo "Error: pre/post remove scriptlet failed"\n' \ + " fi\n" \ + "fi\n" + + intercept_dir = self.d.expand('${WORKDIR}/intercept_scripts') + native_root = self.d.getVar('STAGING_DIR_NATIVE', True) + scriptlet_content = SCRIPTLET_FORMAT % (os.environ['PATH'], + self.target_rootfs, + intercept_dir, + native_root) + open(self.scriptlet_wrapper, 'w+').write(scriptlet_content) + + bb.note("Note: configuring RPM cross-install scriptlet_wrapper") + os.chmod(self.scriptlet_wrapper, 0755) + cmd = 'config --set rpm-extra-macros._cross_scriptlet_wrapper=%s' % \ + self.scriptlet_wrapper + self._invoke_smart(cmd) + + # Debug to show smart config info + # bb.note(self._invoke_smart('config --show')) + + def update(self): + self._invoke_smart('update rpmsys') + + ''' + Install pkgs with smart, the pkg name is oe format + ''' + def install(self, pkgs, attempt_only=False): + + bb.note("Installing the following packages: %s" % ' '.join(pkgs)) + if attempt_only and len(pkgs) == 0: + return + pkgs = self._pkg_translate_oe_to_smart(pkgs, attempt_only) + + if not attempt_only: + bb.note('to be installed: %s' % ' '.join(pkgs)) + cmd = "%s %s install -y %s" % \ + (self.smart_cmd, self.smart_opt, ' '.join(pkgs)) + bb.note(cmd) + else: + bb.note('installing attempt only packages...') + bb.note('Attempting %s' % ' '.join(pkgs)) + cmd = "%s %s install --attempt -y %s" % \ + (self.smart_cmd, self.smart_opt, ' '.join(pkgs)) + try: + output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) + bb.note(output) + except subprocess.CalledProcessError as e: + bb.fatal("Unable to install packages. Command '%s' " + "returned %d:\n%s" % (cmd, e.returncode, e.output)) + + ''' + Remove pkgs with smart, the pkg name is smart/rpm format + ''' + def remove(self, pkgs, with_dependencies=True): + bb.note('to be removed: ' + ' '.join(pkgs)) + + if not with_dependencies: + cmd = "%s -e --nodeps " % self.rpm_cmd + cmd += "--root=%s " % self.target_rootfs + cmd += "--dbpath=/var/lib/rpm " + cmd += "--define='_cross_scriptlet_wrapper %s' " % \ + self.scriptlet_wrapper + cmd += "--define='_tmppath /install/tmp' %s" % ' '.join(pkgs) + else: + # for pkg in pkgs: + # bb.note('Debug: What required: %s' % pkg) + # bb.note(self._invoke_smart('query %s --show-requiredby' % pkg)) + + cmd = "%s %s remove -y %s" % (self.smart_cmd, + self.smart_opt, + ' '.join(pkgs)) + + try: + bb.note(cmd) + output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) + bb.note(output) + except subprocess.CalledProcessError as e: + bb.note("Unable to remove packages. Command '%s' " + "returned %d:\n%s" % (cmd, e.returncode, e.output)) + + def upgrade(self): + bb.note('smart upgrade') + self._invoke_smart('upgrade') + + def write_index(self): + result = self.indexer.write_index() + + if result is not None: + bb.fatal(result) + + def remove_packaging_data(self): + bb.utils.remove(self.image_rpmlib, True) + bb.utils.remove(os.path.join(self.target_rootfs, 'var/lib/smart'), + True) + bb.utils.remove(os.path.join(self.target_rootfs, 'var/lib/opkg'), True) + + # remove temp directory + bb.utils.remove(self.d.expand('${IMAGE_ROOTFS}/install'), True) + + def backup_packaging_data(self): + # Save the rpmlib for increment rpm image generation + if os.path.exists(self.saved_rpmlib): + bb.utils.remove(self.saved_rpmlib, True) + shutil.copytree(self.image_rpmlib, + self.saved_rpmlib, + symlinks=True) + + def recovery_packaging_data(self): + # Move the rpmlib back + if os.path.exists(self.saved_rpmlib): + if os.path.exists(self.image_rpmlib): + bb.utils.remove(self.image_rpmlib, True) + + bb.note('Recovery packaging data') + shutil.copytree(self.saved_rpmlib, + self.image_rpmlib, + symlinks=True) + + def list_installed(self, format=None): + return self.pkgs_list.list(format) + + ''' + If incremental install, we need to determine what we've got, + what we need to add, and what to remove... + The dump_install_solution will dump and save the new install + solution. + ''' + def dump_install_solution(self, pkgs): + bb.note('creating new install solution for incremental install') + if len(pkgs) == 0: + return + + pkgs = self._pkg_translate_oe_to_smart(pkgs, False) + install_pkgs = list() + + cmd = "%s %s install -y --dump %s 2>%s" % \ + (self.smart_cmd, + self.smart_opt, + ' '.join(pkgs), + self.solution_manifest) + try: + # Disable rpmsys channel for the fake install + self._invoke_smart('channel --disable rpmsys') + + subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) + with open(self.solution_manifest, 'r') as manifest: + for pkg in manifest.read().split('\n'): + if '@' in pkg: + install_pkgs.append(pkg) + except subprocess.CalledProcessError as e: + bb.note("Unable to dump install packages. Command '%s' " + "returned %d:\n%s" % (cmd, e.returncode, e.output)) + # Recovery rpmsys channel + self._invoke_smart('channel --enable rpmsys') + return install_pkgs + + ''' + If incremental install, we need to determine what we've got, + what we need to add, and what to remove... + The load_old_install_solution will load the previous install + solution + ''' + def load_old_install_solution(self): + bb.note('load old install solution for incremental install') + installed_pkgs = list() + if not os.path.exists(self.solution_manifest): + bb.note('old install solution not exist') + return installed_pkgs + + with open(self.solution_manifest, 'r') as manifest: + for pkg in manifest.read().split('\n'): + if '@' in pkg: + installed_pkgs.append(pkg.strip()) + + return installed_pkgs + + ''' + Dump all available packages in feeds, it should be invoked after the + newest rpm index was created + ''' + def dump_all_available_pkgs(self): + available_manifest = self.d.expand('${T}/saved/available_pkgs.txt') + available_pkgs = list() + cmd = "%s %s query --output %s" % \ + (self.smart_cmd, self.smart_opt, available_manifest) + try: + subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) + with open(available_manifest, 'r') as manifest: + for pkg in manifest.read().split('\n'): + if '@' in pkg: + available_pkgs.append(pkg.strip()) + except subprocess.CalledProcessError as e: + bb.note("Unable to list all available packages. Command '%s' " + "returned %d:\n%s" % (cmd, e.returncode, e.output)) + + self.fullpkglist = available_pkgs + + return + + def save_rpmpostinst(self, pkg): + mlibs = (self.d.getVar('MULTILIB_GLOBAL_VARIANTS') or "").split() + + new_pkg = pkg + # Remove any multilib prefix from the package name + for mlib in mlibs: + if mlib in pkg: + new_pkg = pkg.replace(mlib + '-', '') + break + + bb.note(' * postponing %s' % new_pkg) + saved_dir = self.target_rootfs + self.d.expand('${sysconfdir}/rpm-postinsts/') + new_pkg + + cmd = self.rpm_cmd + ' -q --scripts --root ' + self.target_rootfs + cmd += ' --dbpath=/var/lib/rpm ' + new_pkg + cmd += ' | sed -n -e "/^postinstall scriptlet (using .*):$/,/^.* scriptlet (using .*):$/ {/.*/p}"' + cmd += ' | sed -e "/postinstall scriptlet (using \(.*\)):$/d"' + cmd += ' -e "/^.* scriptlet (using .*):$/d" > %s' % saved_dir + + try: + bb.note(cmd) + output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).strip() + bb.note(output) + os.chmod(saved_dir, 0755) + except subprocess.CalledProcessError as e: + bb.fatal("Invoke save_rpmpostinst failed. Command '%s' " + "returned %d:\n%s" % (cmd, e.returncode, e.output)) + + '''Write common configuration for target usage''' + def rpm_setup_smart_target_config(self): + bb.utils.remove(os.path.join(self.target_rootfs, 'var/lib/smart'), + True) + + self._invoke_smart('config --set rpm-nolinktos=1') + self._invoke_smart('config --set rpm-noparentdirs=1') + for i in self.d.getVar('BAD_RECOMMENDATIONS', True).split(): + self._invoke_smart('flag --set ignore-recommends %s' % i) + self._invoke_smart('channel --add rpmsys type=rpm-sys -y') + + ''' + The rpm db lock files were produced after invoking rpm to query on + build system, and they caused the rpm on target didn't work, so we + need to unlock the rpm db by removing the lock files. + ''' + def unlock_rpm_db(self): + # Remove rpm db lock files + rpm_db_locks = glob.glob('%s/var/lib/rpm/__db.*' % self.target_rootfs) + for f in rpm_db_locks: + bb.utils.remove(f, True) + + +class OpkgPM(PackageManager): + def __init__(self, d, target_rootfs, config_file, archs, task_name='target'): + super(OpkgPM, self).__init__(d) + + self.target_rootfs = target_rootfs + self.config_file = config_file + self.pkg_archs = archs + self.task_name = task_name + + self.deploy_dir = self.d.getVar("DEPLOY_DIR_IPK", True) + self.deploy_lock_file = os.path.join(self.deploy_dir, "deploy.lock") + self.opkg_cmd = bb.utils.which(os.getenv('PATH'), "opkg-cl") + self.opkg_args = "-f %s -o %s " % (self.config_file, target_rootfs) + self.opkg_args += self.d.getVar("OPKG_ARGS", True) + + opkg_lib_dir = self.d.getVar('OPKGLIBDIR', True) + if opkg_lib_dir[0] == "/": + opkg_lib_dir = opkg_lib_dir[1:] + + self.opkg_dir = os.path.join(target_rootfs, opkg_lib_dir, "opkg") + + bb.utils.mkdirhier(self.opkg_dir) + + self.saved_opkg_dir = self.d.expand('${T}/saved/%s' % self.task_name) + if not os.path.exists(self.d.expand('${T}/saved')): + bb.utils.mkdirhier(self.d.expand('${T}/saved')) + + if (self.d.getVar('BUILD_IMAGES_FROM_FEEDS', True) or "") != "1": + self._create_config() + else: + self._create_custom_config() + + self.indexer = OpkgIndexer(self.d, self.deploy_dir) + + """ + This function will change a package's status in /var/lib/opkg/status file. + If 'packages' is None then the new_status will be applied to all + packages + """ + def mark_packages(self, status_tag, packages=None): + status_file = os.path.join(self.opkg_dir, "status") + + with open(status_file, "r") as sf: + with open(status_file + ".tmp", "w+") as tmp_sf: + if packages is None: + tmp_sf.write(re.sub(r"Package: (.*?)\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)", + r"Package: \1\n\2Status: \3%s" % status_tag, + sf.read())) + else: + if type(packages).__name__ != "list": + raise TypeError("'packages' should be a list object") + + status = sf.read() + for pkg in packages: + status = re.sub(r"Package: %s\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)" % pkg, + r"Package: %s\n\1Status: \2%s" % (pkg, status_tag), + status) + + tmp_sf.write(status) + + os.rename(status_file + ".tmp", status_file) + + def _create_custom_config(self): + bb.note("Building from feeds activated!") + + with open(self.config_file, "w+") as config_file: + priority = 1 + for arch in self.pkg_archs.split(): + config_file.write("arch %s %d\n" % (arch, priority)) + priority += 5 + + for line in (self.d.getVar('IPK_FEED_URIS', True) or "").split(): + feed_match = re.match("^[ \t]*(.*)##([^ \t]*)[ \t]*$", line) + + if feed_match is not None: + feed_name = feed_match.group(1) + feed_uri = feed_match.group(2) + + bb.note("Add %s feed with URL %s" % (feed_name, feed_uri)) + + config_file.write("src/gz %s %s\n" % (feed_name, feed_uri)) + + """ + Allow to use package deploy directory contents as quick devel-testing + feed. This creates individual feed configs for each arch subdir of those + specified as compatible for the current machine. + NOTE: Development-helper feature, NOT a full-fledged feed. + """ + if (self.d.getVar('FEED_DEPLOYDIR_BASE_URI', True) or "") != "": + for arch in self.pkg_archs.split(): + cfg_file_name = os.path.join(self.target_rootfs, + self.d.getVar("sysconfdir", True), + "opkg", + "local-%s-feed.conf" % arch) + + with open(cfg_file_name, "w+") as cfg_file: + cfg_file.write("src/gz local-%s %s/%s" % + arch, + self.d.getVar('FEED_DEPLOYDIR_BASE_URI', True), + arch) + + def _create_config(self): + with open(self.config_file, "w+") as config_file: + priority = 1 + for arch in self.pkg_archs.split(): + config_file.write("arch %s %d\n" % (arch, priority)) + priority += 5 + + config_file.write("src oe file:%s\n" % self.deploy_dir) + + for arch in self.pkg_archs.split(): + pkgs_dir = os.path.join(self.deploy_dir, arch) + if os.path.isdir(pkgs_dir): + config_file.write("src oe-%s file:%s\n" % + (arch, pkgs_dir)) + + def insert_feeds_uris(self): + if self.feed_uris == "": + return + + rootfs_config = os.path.join('%s/etc/opkg/base-feeds.conf' + % self.target_rootfs) + + with open(rootfs_config, "w+") as config_file: + uri_iterator = 0 + for uri in self.feed_uris.split(): + config_file.write("src/gz url-%d %s/ipk\n" % + (uri_iterator, uri)) + + for arch in self.pkg_archs.split(): + if not os.path.exists(os.path.join(self.deploy_dir, arch)): + continue + bb.note('Note: adding opkg channel url-%s-%d (%s)' % + (arch, uri_iterator, uri)) + + config_file.write("src/gz uri-%s-%d %s/ipk/%s\n" % + (arch, uri_iterator, uri, arch)) + uri_iterator += 1 + + def update(self): + self.deploy_dir_lock() + + cmd = "%s %s update" % (self.opkg_cmd, self.opkg_args) + + try: + subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + self.deploy_dir_unlock() + bb.fatal("Unable to update the package index files. Command '%s' " + "returned %d:\n%s" % (cmd, e.returncode, e.output)) + + self.deploy_dir_unlock() + + def install(self, pkgs, attempt_only=False): + if attempt_only and len(pkgs) == 0: + return + + cmd = "%s %s install %s" % (self.opkg_cmd, self.opkg_args, ' '.join(pkgs)) + + os.environ['D'] = self.target_rootfs + os.environ['OFFLINE_ROOT'] = self.target_rootfs + os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs + os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs + os.environ['INTERCEPT_DIR'] = os.path.join(self.d.getVar('WORKDIR', True), + "intercept_scripts") + os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE', True) + + try: + bb.note("Installing the following packages: %s" % ' '.join(pkgs)) + bb.note(cmd) + output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) + bb.note(output) + except subprocess.CalledProcessError as e: + (bb.fatal, bb.note)[attempt_only]("Unable to install packages. " + "Command '%s' returned %d:\n%s" % + (cmd, e.returncode, e.output)) + + def remove(self, pkgs, with_dependencies=True): + if with_dependencies: + cmd = "%s %s --force-depends --force-remove --force-removal-of-dependent-packages remove %s" % \ + (self.opkg_cmd, self.opkg_args, ' '.join(pkgs)) + else: + cmd = "%s %s --force-depends remove %s" % \ + (self.opkg_cmd, self.opkg_args, ' '.join(pkgs)) + + try: + bb.note(cmd) + output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) + bb.note(output) + except subprocess.CalledProcessError as e: + bb.fatal("Unable to remove packages. Command '%s' " + "returned %d:\n%s" % (e.cmd, e.returncode, e.output)) + + def write_index(self): + self.deploy_dir_lock() + + result = self.indexer.write_index() + + self.deploy_dir_unlock() + + if result is not None: + bb.fatal(result) + + def remove_packaging_data(self): + bb.utils.remove(self.opkg_dir, True) + # create the directory back, it's needed by PM lock + bb.utils.mkdirhier(self.opkg_dir) + + def list_installed(self, format=None): + return OpkgPkgsList(self.d, self.target_rootfs, self.config_file).list(format) + + def handle_bad_recommendations(self): + bad_recommendations = self.d.getVar("BAD_RECOMMENDATIONS", True) or "" + if bad_recommendations.strip() == "": + return + + status_file = os.path.join(self.opkg_dir, "status") + + # If status file existed, it means the bad recommendations has already + # been handled + if os.path.exists(status_file): + return + + cmd = "%s %s info " % (self.opkg_cmd, self.opkg_args) + + with open(status_file, "w+") as status: + for pkg in bad_recommendations.split(): + pkg_info = cmd + pkg + + try: + output = subprocess.check_output(pkg_info.split(), stderr=subprocess.STDOUT).strip() + except subprocess.CalledProcessError as e: + bb.fatal("Cannot get package info. Command '%s' " + "returned %d:\n%s" % (pkg_info, e.returncode, e.output)) + + if output == "": + bb.note("Ignored bad recommendation: '%s' is " + "not a package" % pkg) + continue + + for line in output.split('\n'): + if line.startswith("Status:"): + status.write("Status: deinstall hold not-installed\n") + else: + status.write(line + "\n") + + ''' + The following function dummy installs pkgs and returns the log of output. + ''' + def dummy_install(self, pkgs): + if len(pkgs) == 0: + return + + # Create an temp dir as opkg root for dummy installation + temp_rootfs = self.d.expand('${T}/opkg') + temp_opkg_dir = os.path.join(temp_rootfs, 'var/lib/opkg') + bb.utils.mkdirhier(temp_opkg_dir) + + opkg_args = "-f %s -o %s " % (self.config_file, temp_rootfs) + opkg_args += self.d.getVar("OPKG_ARGS", True) + + cmd = "%s %s update" % (self.opkg_cmd, opkg_args) + try: + subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) + except subprocess.CalledProcessError as e: + bb.fatal("Unable to update. Command '%s' " + "returned %d:\n%s" % (cmd, e.returncode, e.output)) + + # Dummy installation + cmd = "%s %s --noaction install %s " % (self.opkg_cmd, + opkg_args, + ' '.join(pkgs)) + try: + output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) + except subprocess.CalledProcessError as e: + bb.fatal("Unable to dummy install packages. Command '%s' " + "returned %d:\n%s" % (cmd, e.returncode, e.output)) + + bb.utils.remove(temp_rootfs, True) + + return output + + def backup_packaging_data(self): + # Save the opkglib for increment ipk image generation + if os.path.exists(self.saved_opkg_dir): + bb.utils.remove(self.saved_opkg_dir, True) + shutil.copytree(self.opkg_dir, + self.saved_opkg_dir, + symlinks=True) + + def recover_packaging_data(self): + # Move the opkglib back + if os.path.exists(self.saved_opkg_dir): + if os.path.exists(self.opkg_dir): + bb.utils.remove(self.opkg_dir, True) + + bb.note('Recover packaging data') + shutil.copytree(self.saved_opkg_dir, + self.opkg_dir, + symlinks=True) + + +class DpkgPM(PackageManager): + def __init__(self, d, target_rootfs, archs, base_archs, apt_conf_dir=None): + super(DpkgPM, self).__init__(d) + self.target_rootfs = target_rootfs + self.deploy_dir = self.d.getVar('DEPLOY_DIR_DEB', True) + if apt_conf_dir is None: + self.apt_conf_dir = self.d.expand("${APTCONF_TARGET}/apt") + else: + self.apt_conf_dir = apt_conf_dir + self.apt_conf_file = os.path.join(self.apt_conf_dir, "apt.conf") + self.apt_get_cmd = bb.utils.which(os.getenv('PATH'), "apt-get") + + self.apt_args = d.getVar("APT_ARGS", True) + + self._create_configs(archs, base_archs) + + self.indexer = DpkgIndexer(self.d, self.deploy_dir) + + """ + This function will change a package's status in /var/lib/dpkg/status file. + If 'packages' is None then the new_status will be applied to all + packages + """ + def mark_packages(self, status_tag, packages=None): + status_file = self.target_rootfs + "/var/lib/dpkg/status" + + with open(status_file, "r") as sf: + with open(status_file + ".tmp", "w+") as tmp_sf: + if packages is None: + tmp_sf.write(re.sub(r"Package: (.*?)\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)", + r"Package: \1\n\2Status: \3%s" % status_tag, + sf.read())) + else: + if type(packages).__name__ != "list": + raise TypeError("'packages' should be a list object") + + status = sf.read() + for pkg in packages: + status = re.sub(r"Package: %s\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)" % pkg, + r"Package: %s\n\1Status: \2%s" % (pkg, status_tag), + status) + + tmp_sf.write(status) + + os.rename(status_file + ".tmp", status_file) + + """ + Run the pre/post installs for package "package_name". If package_name is + None, then run all pre/post install scriptlets. + """ + def run_pre_post_installs(self, package_name=None): + info_dir = self.target_rootfs + "/var/lib/dpkg/info" + suffixes = [(".preinst", "Preinstall"), (".postinst", "Postinstall")] + status_file = self.target_rootfs + "/var/lib/dpkg/status" + installed_pkgs = [] + + with open(status_file, "r") as status: + for line in status.read().split('\n'): + m = re.match("^Package: (.*)", line) + if m is not None: + installed_pkgs.append(m.group(1)) + + if package_name is not None and not package_name in installed_pkgs: + return + + os.environ['D'] = self.target_rootfs + os.environ['OFFLINE_ROOT'] = self.target_rootfs + os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs + os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs + os.environ['INTERCEPT_DIR'] = os.path.join(self.d.getVar('WORKDIR', True), + "intercept_scripts") + os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE', True) + + failed_pkgs = [] + for pkg_name in installed_pkgs: + for suffix in suffixes: + p_full = os.path.join(info_dir, pkg_name + suffix[0]) + if os.path.exists(p_full): + try: + bb.note("Executing %s for package: %s ..." % + (suffix[1].lower(), pkg_name)) + subprocess.check_output(p_full, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + bb.note("%s for package %s failed with %d:\n%s" % + (suffix[1], pkg_name, e.returncode, e.output)) + failed_pkgs.append(pkg_name) + break + + if len(failed_pkgs): + self.mark_packages("unpacked", failed_pkgs) + + def update(self): + os.environ['APT_CONFIG'] = self.apt_conf_file + + self.deploy_dir_lock() + + cmd = "%s update" % self.apt_get_cmd + + try: + subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + bb.fatal("Unable to update the package index files. Command '%s' " + "returned %d:\n%s" % (e.cmd, e.returncode, e.output)) + + self.deploy_dir_unlock() + + def install(self, pkgs, attempt_only=False): + if attempt_only and len(pkgs) == 0: + return + + os.environ['APT_CONFIG'] = self.apt_conf_file + + cmd = "%s %s install --force-yes --allow-unauthenticated %s" % \ + (self.apt_get_cmd, self.apt_args, ' '.join(pkgs)) + + try: + bb.note("Installing the following packages: %s" % ' '.join(pkgs)) + subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + (bb.fatal, bb.note)[attempt_only]("Unable to install packages. " + "Command '%s' returned %d:\n%s" % + (cmd, e.returncode, e.output)) + + # rename *.dpkg-new files/dirs + for root, dirs, files in os.walk(self.target_rootfs): + for dir in dirs: + new_dir = re.sub("\.dpkg-new", "", dir) + if dir != new_dir: + os.rename(os.path.join(root, dir), + os.path.join(root, new_dir)) + + for file in files: + new_file = re.sub("\.dpkg-new", "", file) + if file != new_file: + os.rename(os.path.join(root, file), + os.path.join(root, new_file)) + + + def remove(self, pkgs, with_dependencies=True): + if with_dependencies: + os.environ['APT_CONFIG'] = self.apt_conf_file + cmd = "%s remove %s" % (self.apt_get_cmd, ' '.join(pkgs)) + else: + cmd = "%s --admindir=%s/var/lib/dpkg --instdir=%s" \ + " -r --force-depends %s" % \ + (bb.utils.which(os.getenv('PATH'), "dpkg"), + self.target_rootfs, self.target_rootfs, ' '.join(pkgs)) + + try: + subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + bb.fatal("Unable to remove packages. Command '%s' " + "returned %d:\n%s" % (e.cmd, e.returncode, e.output)) + + def write_index(self): + self.deploy_dir_lock() + + result = self.indexer.write_index() + + self.deploy_dir_unlock() + + if result is not None: + bb.fatal(result) + + def insert_feeds_uris(self): + if self.feed_uris == "": + return + + sources_conf = os.path.join("%s/etc/apt/sources.list" + % self.target_rootfs) + arch_list = [] + archs = self.d.getVar('PACKAGE_ARCHS', True) + for arch in archs.split(): + if not os.path.exists(os.path.join(self.deploy_dir, arch)): + continue + arch_list.append(arch) + + with open(sources_conf, "w+") as sources_file: + for uri in self.feed_uris.split(): + for arch in arch_list: + bb.note('Note: adding dpkg channel at (%s)' % uri) + sources_file.write("deb %s/deb/%s ./\n" % + (uri, arch)) + + def _create_configs(self, archs, base_archs): + base_archs = re.sub("_", "-", base_archs) + + if os.path.exists(self.apt_conf_dir): + bb.utils.remove(self.apt_conf_dir, True) + + bb.utils.mkdirhier(self.apt_conf_dir) + bb.utils.mkdirhier(self.apt_conf_dir + "/lists/partial/") + bb.utils.mkdirhier(self.apt_conf_dir + "/apt.conf.d/") + + arch_list = [] + for arch in archs.split(): + if not os.path.exists(os.path.join(self.deploy_dir, arch)): + continue + arch_list.append(arch) + + with open(os.path.join(self.apt_conf_dir, "preferences"), "w+") as prefs_file: + priority = 801 + for arch in arch_list: + prefs_file.write( + "Package: *\n" + "Pin: release l=%s\n" + "Pin-Priority: %d\n\n" % (arch, priority)) + + priority += 5 + + for pkg in self.d.getVar('PACKAGE_EXCLUDE', True).split(): + prefs_file.write( + "Package: %s\n" + "Pin: release *\n" + "Pin-Priority: -1\n\n" % pkg) + + arch_list.reverse() + + with open(os.path.join(self.apt_conf_dir, "sources.list"), "w+") as sources_file: + for arch in arch_list: + sources_file.write("deb file:%s/ ./\n" % + os.path.join(self.deploy_dir, arch)) + + with open(self.apt_conf_file, "w+") as apt_conf: + with open(self.d.expand("${STAGING_ETCDIR_NATIVE}/apt/apt.conf.sample")) as apt_conf_sample: + for line in apt_conf_sample.read().split("\n"): + line = re.sub("Architecture \".*\";", + "Architecture \"%s\";" % base_archs, line) + line = re.sub("#ROOTFS#", self.target_rootfs, line) + line = re.sub("#APTCONF#", self.apt_conf_dir, line) + + apt_conf.write(line + "\n") + + target_dpkg_dir = "%s/var/lib/dpkg" % self.target_rootfs + bb.utils.mkdirhier(os.path.join(target_dpkg_dir, "info")) + + bb.utils.mkdirhier(os.path.join(target_dpkg_dir, "updates")) + + if not os.path.exists(os.path.join(target_dpkg_dir, "status")): + open(os.path.join(target_dpkg_dir, "status"), "w+").close() + if not os.path.exists(os.path.join(target_dpkg_dir, "available")): + open(os.path.join(target_dpkg_dir, "available"), "w+").close() + + def remove_packaging_data(self): + bb.utils.remove(os.path.join(self.target_rootfs, + self.d.getVar('opkglibdir', True)), True) + bb.utils.remove(self.target_rootfs + "/var/lib/dpkg/", True) + + def fix_broken_dependencies(self): + os.environ['APT_CONFIG'] = self.apt_conf_file + + cmd = "%s %s -f install" % (self.apt_get_cmd, self.apt_args) + + try: + subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + bb.fatal("Cannot fix broken dependencies. Command '%s' " + "returned %d:\n%s" % (cmd, e.returncode, e.output)) + + def list_installed(self, format=None): + return DpkgPkgsList(self.d, self.target_rootfs).list() + + +def generate_index_files(d): + classes = d.getVar('PACKAGE_CLASSES', True).replace("package_", "").split() + + indexer_map = { + "rpm": (RpmIndexer, d.getVar('DEPLOY_DIR_RPM', True)), + "ipk": (OpkgIndexer, d.getVar('DEPLOY_DIR_IPK', True)), + "deb": (DpkgIndexer, d.getVar('DEPLOY_DIR_DEB', True)) + } + + result = None + + for pkg_class in classes: + if not pkg_class in indexer_map: + continue + + if os.path.exists(indexer_map[pkg_class][1]): + result = indexer_map[pkg_class][0](d, indexer_map[pkg_class][1]).write_index() + + if result is not None: + bb.fatal(result) + +if __name__ == "__main__": + """ + We should be able to run this as a standalone script, from outside bitbake + environment. + """ + """ + TBD + """ diff --git a/meta/lib/oe/packagedata.py b/meta/lib/oe/packagedata.py new file mode 100644 index 0000000000..cd5f0445f5 --- /dev/null +++ b/meta/lib/oe/packagedata.py @@ -0,0 +1,94 @@ +import codecs + +def packaged(pkg, d): + return os.access(get_subpkgedata_fn(pkg, d) + '.packaged', os.R_OK) + +def read_pkgdatafile(fn): + pkgdata = {} + + def decode(str): + c = codecs.getdecoder("string_escape") + return c(str)[0] + + if os.access(fn, os.R_OK): + import re + f = open(fn, 'r') + lines = f.readlines() + f.close() + r = re.compile("([^:]+):\s*(.*)") + for l in lines: + m = r.match(l) + if m: + pkgdata[m.group(1)] = decode(m.group(2)) + + return pkgdata + +def get_subpkgedata_fn(pkg, d): + return d.expand('${PKGDATA_DIR}/runtime/%s' % pkg) + +def has_subpkgdata(pkg, d): + return os.access(get_subpkgedata_fn(pkg, d), os.R_OK) + +def read_subpkgdata(pkg, d): + return read_pkgdatafile(get_subpkgedata_fn(pkg, d)) + +def has_pkgdata(pn, d): + fn = d.expand('${PKGDATA_DIR}/%s' % pn) + return os.access(fn, os.R_OK) + +def read_pkgdata(pn, d): + fn = d.expand('${PKGDATA_DIR}/%s' % pn) + return read_pkgdatafile(fn) + +# +# Collapse FOO_pkg variables into FOO +# +def read_subpkgdata_dict(pkg, d): + ret = {} + subd = read_pkgdatafile(get_subpkgedata_fn(pkg, d)) + for var in subd: + newvar = var.replace("_" + pkg, "") + if newvar == var and var + "_" + pkg in subd: + continue + ret[newvar] = subd[var] + return ret + +def _pkgmap(d): + """Return a dictionary mapping package to recipe name.""" + + pkgdatadir = d.getVar("PKGDATA_DIR", True) + + pkgmap = {} + try: + files = os.listdir(pkgdatadir) + except OSError: + bb.warn("No files in %s?" % pkgdatadir) + files = [] + + for pn in filter(lambda f: not os.path.isdir(os.path.join(pkgdatadir, f)), files): + try: + pkgdata = read_pkgdatafile(os.path.join(pkgdatadir, pn)) + except OSError: + continue + + packages = pkgdata.get("PACKAGES") or "" + for pkg in packages.split(): + pkgmap[pkg] = pn + + return pkgmap + +def pkgmap(d): + """Return a dictionary mapping package to recipe name. + Cache the mapping in the metadata""" + + pkgmap_data = d.getVar("__pkgmap_data", False) + if pkgmap_data is None: + pkgmap_data = _pkgmap(d) + d.setVar("__pkgmap_data", pkgmap_data) + + return pkgmap_data + +def recipename(pkg, d): + """Return the recipe name for the given binary package name.""" + + return pkgmap(d).get(pkg) diff --git a/meta/lib/oe/packagegroup.py b/meta/lib/oe/packagegroup.py new file mode 100644 index 0000000000..12eb4212ff --- /dev/null +++ b/meta/lib/oe/packagegroup.py @@ -0,0 +1,36 @@ +import itertools + +def is_optional(feature, d): + packages = d.getVar("FEATURE_PACKAGES_%s" % feature, True) + if packages: + return bool(d.getVarFlag("FEATURE_PACKAGES_%s" % feature, "optional")) + else: + return bool(d.getVarFlag("PACKAGE_GROUP_%s" % feature, "optional")) + +def packages(features, d): + for feature in features: + packages = d.getVar("FEATURE_PACKAGES_%s" % feature, True) + if not packages: + packages = d.getVar("PACKAGE_GROUP_%s" % feature, True) + for pkg in (packages or "").split(): + yield pkg + +def required_packages(features, d): + req = filter(lambda feature: not is_optional(feature, d), features) + return packages(req, d) + +def optional_packages(features, d): + opt = filter(lambda feature: is_optional(feature, d), features) + return packages(opt, d) + +def active_packages(features, d): + return itertools.chain(required_packages(features, d), + optional_packages(features, d)) + +def active_recipes(features, d): + import oe.packagedata + + for pkg in active_packages(features, d): + recipe = oe.packagedata.recipename(pkg, d) + if recipe: + yield recipe diff --git a/meta/lib/oe/patch.py b/meta/lib/oe/patch.py new file mode 100644 index 0000000000..b085c9d6b5 --- /dev/null +++ b/meta/lib/oe/patch.py @@ -0,0 +1,447 @@ +import oe.path + +class NotFoundError(bb.BBHandledException): + def __init__(self, path): + self.path = path + + def __str__(self): + return "Error: %s not found." % self.path + +class CmdError(bb.BBHandledException): + def __init__(self, exitstatus, output): + self.status = exitstatus + self.output = output + + def __str__(self): + return "Command Error: exit status: %d Output:\n%s" % (self.status, self.output) + + +def runcmd(args, dir = None): + import pipes + + if dir: + olddir = os.path.abspath(os.curdir) + if not os.path.exists(dir): + raise NotFoundError(dir) + os.chdir(dir) + # print("cwd: %s -> %s" % (olddir, dir)) + + try: + args = [ pipes.quote(str(arg)) for arg in args ] + cmd = " ".join(args) + # print("cmd: %s" % cmd) + (exitstatus, output) = oe.utils.getstatusoutput(cmd) + if exitstatus != 0: + raise CmdError(exitstatus >> 8, output) + return output + + finally: + if dir: + os.chdir(olddir) + +class PatchError(Exception): + def __init__(self, msg): + self.msg = msg + + def __str__(self): + return "Patch Error: %s" % self.msg + +class PatchSet(object): + defaults = { + "strippath": 1 + } + + def __init__(self, dir, d): + self.dir = dir + self.d = d + self.patches = [] + self._current = None + + def current(self): + return self._current + + def Clean(self): + """ + Clean out the patch set. Generally includes unapplying all + patches and wiping out all associated metadata. + """ + raise NotImplementedError() + + def Import(self, patch, force): + if not patch.get("file"): + if not patch.get("remote"): + raise PatchError("Patch file must be specified in patch import.") + else: + patch["file"] = bb.fetch2.localpath(patch["remote"], self.d) + + for param in PatchSet.defaults: + if not patch.get(param): + patch[param] = PatchSet.defaults[param] + + if patch.get("remote"): + patch["file"] = bb.data.expand(bb.fetch2.localpath(patch["remote"], self.d), self.d) + + patch["filemd5"] = bb.utils.md5_file(patch["file"]) + + def Push(self, force): + raise NotImplementedError() + + def Pop(self, force): + raise NotImplementedError() + + def Refresh(self, remote = None, all = None): + raise NotImplementedError() + + +class PatchTree(PatchSet): + def __init__(self, dir, d): + PatchSet.__init__(self, dir, d) + self.patchdir = os.path.join(self.dir, 'patches') + self.seriespath = os.path.join(self.dir, 'patches', 'series') + bb.utils.mkdirhier(self.patchdir) + + def _appendPatchFile(self, patch, strippath): + with open(self.seriespath, 'a') as f: + f.write(os.path.basename(patch) + "," + strippath + "\n") + shellcmd = ["cat", patch, ">" , self.patchdir + "/" + os.path.basename(patch)] + runcmd(["sh", "-c", " ".join(shellcmd)], self.dir) + + def _removePatch(self, p): + patch = {} + patch['file'] = p.split(",")[0] + patch['strippath'] = p.split(",")[1] + self._applypatch(patch, False, True) + + def _removePatchFile(self, all = False): + if not os.path.exists(self.seriespath): + return + patches = open(self.seriespath, 'r+').readlines() + if all: + for p in reversed(patches): + self._removePatch(os.path.join(self.patchdir, p.strip())) + patches = [] + else: + self._removePatch(os.path.join(self.patchdir, patches[-1].strip())) + patches.pop() + with open(self.seriespath, 'w') as f: + for p in patches: + f.write(p) + + def Import(self, patch, force = None): + """""" + PatchSet.Import(self, patch, force) + + if self._current is not None: + i = self._current + 1 + else: + i = 0 + self.patches.insert(i, patch) + + def _applypatch(self, patch, force = False, reverse = False, run = True): + shellcmd = ["cat", patch['file'], "|", "patch", "-p", patch['strippath']] + if reverse: + shellcmd.append('-R') + + if not run: + return "sh" + "-c" + " ".join(shellcmd) + + if not force: + shellcmd.append('--dry-run') + + output = runcmd(["sh", "-c", " ".join(shellcmd)], self.dir) + + if force: + return + + shellcmd.pop(len(shellcmd) - 1) + output = runcmd(["sh", "-c", " ".join(shellcmd)], self.dir) + + if not reverse: + self._appendPatchFile(patch['file'], patch['strippath']) + + return output + + def Push(self, force = False, all = False, run = True): + bb.note("self._current is %s" % self._current) + bb.note("patches is %s" % self.patches) + if all: + for i in self.patches: + bb.note("applying patch %s" % i) + self._applypatch(i, force) + self._current = i + else: + if self._current is not None: + next = self._current + 1 + else: + next = 0 + + bb.note("applying patch %s" % self.patches[next]) + ret = self._applypatch(self.patches[next], force) + + self._current = next + return ret + + def Pop(self, force = None, all = None): + if all: + self._removePatchFile(True) + self._current = None + else: + self._removePatchFile(False) + + if self._current == 0: + self._current = None + + if self._current is not None: + self._current = self._current - 1 + + def Clean(self): + """""" + self.Pop(all=True) + +class GitApplyTree(PatchTree): + def __init__(self, dir, d): + PatchTree.__init__(self, dir, d) + + def _applypatch(self, patch, force = False, reverse = False, run = True): + def _applypatchhelper(shellcmd, patch, force = False, reverse = False, run = True): + if reverse: + shellcmd.append('-R') + + shellcmd.append(patch['file']) + + if not run: + return "sh" + "-c" + " ".join(shellcmd) + + return runcmd(["sh", "-c", " ".join(shellcmd)], self.dir) + + try: + shellcmd = ["git", "--work-tree=.", "am", "-3", "-p%s" % patch['strippath']] + return _applypatchhelper(shellcmd, patch, force, reverse, run) + except CmdError: + shellcmd = ["git", "--git-dir=.", "apply", "-p%s" % patch['strippath']] + return _applypatchhelper(shellcmd, patch, force, reverse, run) + + +class QuiltTree(PatchSet): + def _runcmd(self, args, run = True): + quiltrc = self.d.getVar('QUILTRCFILE', True) + if not run: + return ["quilt"] + ["--quiltrc"] + [quiltrc] + args + runcmd(["quilt"] + ["--quiltrc"] + [quiltrc] + args, self.dir) + + def _quiltpatchpath(self, file): + return os.path.join(self.dir, "patches", os.path.basename(file)) + + + def __init__(self, dir, d): + PatchSet.__init__(self, dir, d) + self.initialized = False + p = os.path.join(self.dir, 'patches') + if not os.path.exists(p): + os.makedirs(p) + + def Clean(self): + try: + self._runcmd(["pop", "-a", "-f"]) + oe.path.remove(os.path.join(self.dir, "patches","series")) + except Exception: + pass + self.initialized = True + + def InitFromDir(self): + # read series -> self.patches + seriespath = os.path.join(self.dir, 'patches', 'series') + if not os.path.exists(self.dir): + raise NotFoundError(self.dir) + if os.path.exists(seriespath): + series = file(seriespath, 'r') + for line in series.readlines(): + patch = {} + parts = line.strip().split() + patch["quiltfile"] = self._quiltpatchpath(parts[0]) + patch["quiltfilemd5"] = bb.utils.md5_file(patch["quiltfile"]) + if len(parts) > 1: + patch["strippath"] = parts[1][2:] + self.patches.append(patch) + series.close() + + # determine which patches are applied -> self._current + try: + output = runcmd(["quilt", "applied"], self.dir) + except CmdError: + import sys + if sys.exc_value.output.strip() == "No patches applied": + return + else: + raise + output = [val for val in output.split('\n') if not val.startswith('#')] + for patch in self.patches: + if os.path.basename(patch["quiltfile"]) == output[-1]: + self._current = self.patches.index(patch) + self.initialized = True + + def Import(self, patch, force = None): + if not self.initialized: + self.InitFromDir() + PatchSet.Import(self, patch, force) + oe.path.symlink(patch["file"], self._quiltpatchpath(patch["file"]), force=True) + f = open(os.path.join(self.dir, "patches","series"), "a"); + f.write(os.path.basename(patch["file"]) + " -p" + patch["strippath"]+"\n") + f.close() + patch["quiltfile"] = self._quiltpatchpath(patch["file"]) + patch["quiltfilemd5"] = bb.utils.md5_file(patch["quiltfile"]) + + # TODO: determine if the file being imported: + # 1) is already imported, and is the same + # 2) is already imported, but differs + + self.patches.insert(self._current or 0, patch) + + + def Push(self, force = False, all = False, run = True): + # quilt push [-f] + + args = ["push"] + if force: + args.append("-f") + if all: + args.append("-a") + if not run: + return self._runcmd(args, run) + + self._runcmd(args) + + if self._current is not None: + self._current = self._current + 1 + else: + self._current = 0 + + def Pop(self, force = None, all = None): + # quilt pop [-f] + args = ["pop"] + if force: + args.append("-f") + if all: + args.append("-a") + + self._runcmd(args) + + if self._current == 0: + self._current = None + + if self._current is not None: + self._current = self._current - 1 + + def Refresh(self, **kwargs): + if kwargs.get("remote"): + patch = self.patches[kwargs["patch"]] + if not patch: + raise PatchError("No patch found at index %s in patchset." % kwargs["patch"]) + (type, host, path, user, pswd, parm) = bb.fetch.decodeurl(patch["remote"]) + if type == "file": + import shutil + if not patch.get("file") and patch.get("remote"): + patch["file"] = bb.fetch2.localpath(patch["remote"], self.d) + + shutil.copyfile(patch["quiltfile"], patch["file"]) + else: + raise PatchError("Unable to do a remote refresh of %s, unsupported remote url scheme %s." % (os.path.basename(patch["quiltfile"]), type)) + else: + # quilt refresh + args = ["refresh"] + if kwargs.get("quiltfile"): + args.append(os.path.basename(kwargs["quiltfile"])) + elif kwargs.get("patch"): + args.append(os.path.basename(self.patches[kwargs["patch"]]["quiltfile"])) + self._runcmd(args) + +class Resolver(object): + def __init__(self, patchset, terminal): + raise NotImplementedError() + + def Resolve(self): + raise NotImplementedError() + + def Revert(self): + raise NotImplementedError() + + def Finalize(self): + raise NotImplementedError() + +class NOOPResolver(Resolver): + def __init__(self, patchset, terminal): + self.patchset = patchset + self.terminal = terminal + + def Resolve(self): + olddir = os.path.abspath(os.curdir) + os.chdir(self.patchset.dir) + try: + self.patchset.Push() + except Exception: + import sys + os.chdir(olddir) + raise + +# Patch resolver which relies on the user doing all the work involved in the +# resolution, with the exception of refreshing the remote copy of the patch +# files (the urls). +class UserResolver(Resolver): + def __init__(self, patchset, terminal): + self.patchset = patchset + self.terminal = terminal + + # Force a push in the patchset, then drop to a shell for the user to + # resolve any rejected hunks + def Resolve(self): + olddir = os.path.abspath(os.curdir) + os.chdir(self.patchset.dir) + try: + self.patchset.Push(False) + except CmdError as v: + # Patch application failed + patchcmd = self.patchset.Push(True, False, False) + + t = self.patchset.d.getVar('T', True) + if not t: + bb.msg.fatal("Build", "T not set") + bb.utils.mkdirhier(t) + import random + rcfile = "%s/bashrc.%s.%s" % (t, str(os.getpid()), random.random()) + f = open(rcfile, "w") + f.write("echo '*** Manual patch resolution mode ***'\n") + f.write("echo 'Dropping to a shell, so patch rejects can be fixed manually.'\n") + f.write("echo 'Run \"quilt refresh\" when patch is corrected, press CTRL+D to exit.'\n") + f.write("echo ''\n") + f.write(" ".join(patchcmd) + "\n") + f.close() + os.chmod(rcfile, 0775) + + self.terminal("bash --rcfile " + rcfile, 'Patch Rejects: Please fix patch rejects manually', self.patchset.d) + + # Construct a new PatchSet after the user's changes, compare the + # sets, checking patches for modifications, and doing a remote + # refresh on each. + oldpatchset = self.patchset + self.patchset = oldpatchset.__class__(self.patchset.dir, self.patchset.d) + + for patch in self.patchset.patches: + oldpatch = None + for opatch in oldpatchset.patches: + if opatch["quiltfile"] == patch["quiltfile"]: + oldpatch = opatch + + if oldpatch: + patch["remote"] = oldpatch["remote"] + if patch["quiltfile"] == oldpatch["quiltfile"]: + if patch["quiltfilemd5"] != oldpatch["quiltfilemd5"]: + bb.note("Patch %s has changed, updating remote url %s" % (os.path.basename(patch["quiltfile"]), patch["remote"])) + # user change? remote refresh + self.patchset.Refresh(remote=True, patch=self.patchset.patches.index(patch)) + else: + # User did not fix the problem. Abort. + raise PatchError("Patch application failed, and user did not fix and refresh the patch.") + except Exception: + os.chdir(olddir) + raise + os.chdir(olddir) diff --git a/meta/lib/oe/path.py b/meta/lib/oe/path.py new file mode 100644 index 0000000000..413ebfb395 --- /dev/null +++ b/meta/lib/oe/path.py @@ -0,0 +1,243 @@ +import errno +import glob +import shutil +import subprocess +import os.path + +def join(*paths): + """Like os.path.join but doesn't treat absolute RHS specially""" + return os.path.normpath("/".join(paths)) + +def relative(src, dest): + """ Return a relative path from src to dest. + + >>> relative("/usr/bin", "/tmp/foo/bar") + ../../tmp/foo/bar + + >>> relative("/usr/bin", "/usr/lib") + ../lib + + >>> relative("/tmp", "/tmp/foo/bar") + foo/bar + """ + + return os.path.relpath(dest, src) + +def make_relative_symlink(path): + """ Convert an absolute symlink to a relative one """ + if not os.path.islink(path): + return + link = os.readlink(path) + if not os.path.isabs(link): + return + + # find the common ancestor directory + ancestor = path + depth = 0 + while ancestor and not link.startswith(ancestor): + ancestor = ancestor.rpartition('/')[0] + depth += 1 + + if not ancestor: + print("make_relative_symlink() Error: unable to find the common ancestor of %s and its target" % path) + return + + base = link.partition(ancestor)[2].strip('/') + while depth > 1: + base = "../" + base + depth -= 1 + + os.remove(path) + os.symlink(base, path) + +def format_display(path, metadata): + """ Prepare a path for display to the user. """ + rel = relative(metadata.getVar("TOPDIR", True), path) + if len(rel) > len(path): + return path + else: + return rel + +def copytree(src, dst): + # We could use something like shutil.copytree here but it turns out to + # to be slow. It takes twice as long copying to an empty directory. + # If dst already has contents performance can be 15 time slower + # This way we also preserve hardlinks between files in the tree. + + bb.utils.mkdirhier(dst) + cmd = 'tar -cf - -C %s -p . | tar -xf - -C %s' % (src, dst) + check_output(cmd, shell=True, stderr=subprocess.STDOUT) + +def copyhardlinktree(src, dst): + """ Make the hard link when possible, otherwise copy. """ + bb.utils.mkdirhier(dst) + if os.path.isdir(src) and not len(os.listdir(src)): + return + + if (os.stat(src).st_dev == os.stat(dst).st_dev): + # Need to copy directories only with tar first since cp will error if two + # writers try and create a directory at the same time + cmd = 'cd %s; find . -type d -print | tar -cf - -C %s -p --files-from - --no-recursion | tar -xf - -C %s' % (src, src, dst) + check_output(cmd, shell=True, stderr=subprocess.STDOUT) + cmd = 'cd %s; find . -print0 | cpio --null -pdlu %s' % (src, dst) + check_output(cmd, shell=True, stderr=subprocess.STDOUT) + else: + copytree(src, dst) + +def remove(path, recurse=True): + """Equivalent to rm -f or rm -rf""" + for name in glob.glob(path): + try: + os.unlink(name) + except OSError as exc: + if recurse and exc.errno == errno.EISDIR: + shutil.rmtree(name) + elif exc.errno != errno.ENOENT: + raise + +def symlink(source, destination, force=False): + """Create a symbolic link""" + try: + if force: + remove(destination) + os.symlink(source, destination) + except OSError as e: + if e.errno != errno.EEXIST or os.readlink(destination) != source: + raise + +class CalledProcessError(Exception): + def __init__(self, retcode, cmd, output = None): + self.retcode = retcode + self.cmd = cmd + self.output = output + def __str__(self): + return "Command '%s' returned non-zero exit status %d with output %s" % (self.cmd, self.retcode, self.output) + +# Not needed when we move to python 2.7 +def check_output(*popenargs, **kwargs): + r"""Run command with arguments and return its output as a byte string. + + If the exit code was non-zero it raises a CalledProcessError. The + CalledProcessError object will have the return code in the returncode + attribute and output in the output attribute. + + The arguments are the same as for the Popen constructor. Example: + + >>> check_output(["ls", "-l", "/dev/null"]) + 'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n' + + The stdout argument is not allowed as it is used internally. + To capture standard error in the result, use stderr=STDOUT. + + >>> check_output(["/bin/sh", "-c", + ... "ls -l non_existent_file ; exit 0"], + ... stderr=STDOUT) + 'ls: non_existent_file: No such file or directory\n' + """ + if 'stdout' in kwargs: + raise ValueError('stdout argument not allowed, it will be overridden.') + process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs) + output, unused_err = process.communicate() + retcode = process.poll() + if retcode: + cmd = kwargs.get("args") + if cmd is None: + cmd = popenargs[0] + raise CalledProcessError(retcode, cmd, output=output) + return output + +def find(dir, **walkoptions): + """ Given a directory, recurses into that directory, + returning all files as absolute paths. """ + + for root, dirs, files in os.walk(dir, **walkoptions): + for file in files: + yield os.path.join(root, file) + + +## realpath() related functions +def __is_path_below(file, root): + return (file + os.path.sep).startswith(root) + +def __realpath_rel(start, rel_path, root, loop_cnt, assume_dir): + """Calculates real path of symlink 'start' + 'rel_path' below + 'root'; no part of 'start' below 'root' must contain symlinks. """ + have_dir = True + + for d in rel_path.split(os.path.sep): + if not have_dir and not assume_dir: + raise OSError(errno.ENOENT, "no such directory %s" % start) + + if d == os.path.pardir: # '..' + if len(start) >= len(root): + # do not follow '..' before root + start = os.path.dirname(start) + else: + # emit warning? + pass + else: + (start, have_dir) = __realpath(os.path.join(start, d), + root, loop_cnt, assume_dir) + + assert(__is_path_below(start, root)) + + return start + +def __realpath(file, root, loop_cnt, assume_dir): + while os.path.islink(file) and len(file) >= len(root): + if loop_cnt == 0: + raise OSError(errno.ELOOP, file) + + loop_cnt -= 1 + target = os.path.normpath(os.readlink(file)) + + if not os.path.isabs(target): + tdir = os.path.dirname(file) + assert(__is_path_below(tdir, root)) + else: + tdir = root + + file = __realpath_rel(tdir, target, root, loop_cnt, assume_dir) + + try: + is_dir = os.path.isdir(file) + except: + is_dir = false + + return (file, is_dir) + +def realpath(file, root, use_physdir = True, loop_cnt = 100, assume_dir = False): + """ Returns the canonical path of 'file' with assuming a + toplevel 'root' directory. When 'use_physdir' is set, all + preceding path components of 'file' will be resolved first; + this flag should be set unless it is guaranteed that there is + no symlink in the path. When 'assume_dir' is not set, missing + path components will raise an ENOENT error""" + + root = os.path.normpath(root) + file = os.path.normpath(file) + + if not root.endswith(os.path.sep): + # letting root end with '/' makes some things easier + root = root + os.path.sep + + if not __is_path_below(file, root): + raise OSError(errno.EINVAL, "file '%s' is not below root" % file) + + try: + if use_physdir: + file = __realpath_rel(root, file[(len(root) - 1):], root, loop_cnt, assume_dir) + else: + file = __realpath(file, root, loop_cnt, assume_dir)[0] + except OSError as e: + if e.errno == errno.ELOOP: + # make ELOOP more readable; without catching it, there will + # be printed a backtrace with 100s of OSError exceptions + # else + raise OSError(errno.ELOOP, + "too much recursions while resolving '%s'; loop in '%s'" % + (file, e.strerror)) + + raise + + return file diff --git a/meta/lib/oe/prservice.py b/meta/lib/oe/prservice.py new file mode 100644 index 0000000000..b0cbcb1fbc --- /dev/null +++ b/meta/lib/oe/prservice.py @@ -0,0 +1,126 @@ + +def prserv_make_conn(d, check = False): + import prserv.serv + host_params = filter(None, (d.getVar("PRSERV_HOST", True) or '').split(':')) + try: + conn = None + conn = prserv.serv.PRServerConnection(host_params[0], int(host_params[1])) + if check: + if not conn.ping(): + raise Exception('service not available') + d.setVar("__PRSERV_CONN",conn) + except Exception, exc: + bb.fatal("Connecting to PR service %s:%s failed: %s" % (host_params[0], host_params[1], str(exc))) + + return conn + +def prserv_dump_db(d): + if not d.getVar('PRSERV_HOST', True): + bb.error("Not using network based PR service") + return None + + conn = d.getVar("__PRSERV_CONN", True) + if conn is None: + conn = prserv_make_conn(d) + if conn is None: + bb.error("Making connection failed to remote PR service") + return None + + #dump db + opt_version = d.getVar('PRSERV_DUMPOPT_VERSION', True) + opt_pkgarch = d.getVar('PRSERV_DUMPOPT_PKGARCH', True) + opt_checksum = d.getVar('PRSERV_DUMPOPT_CHECKSUM', True) + opt_col = ("1" == d.getVar('PRSERV_DUMPOPT_COL', True)) + return conn.export(opt_version, opt_pkgarch, opt_checksum, opt_col) + +def prserv_import_db(d, filter_version=None, filter_pkgarch=None, filter_checksum=None): + if not d.getVar('PRSERV_HOST', True): + bb.error("Not using network based PR service") + return None + + conn = d.getVar("__PRSERV_CONN", True) + if conn is None: + conn = prserv_make_conn(d) + if conn is None: + bb.error("Making connection failed to remote PR service") + return None + #get the entry values + imported = [] + prefix = "PRAUTO$" + for v in d.keys(): + if v.startswith(prefix): + (remain, sep, checksum) = v.rpartition('$') + (remain, sep, pkgarch) = remain.rpartition('$') + (remain, sep, version) = remain.rpartition('$') + if (remain + '$' != prefix) or \ + (filter_version and filter_version != version) or \ + (filter_pkgarch and filter_pkgarch != pkgarch) or \ + (filter_checksum and filter_checksum != checksum): + continue + try: + value = int(d.getVar(remain + '$' + version + '$' + pkgarch + '$' + checksum, True)) + except BaseException as exc: + bb.debug("Not valid value of %s:%s" % (v,str(exc))) + continue + ret = conn.importone(version,pkgarch,checksum,value) + if ret != value: + bb.error("importing(%s,%s,%s,%d) failed. DB may have larger value %d" % (version,pkgarch,checksum,value,ret)) + else: + imported.append((version,pkgarch,checksum,value)) + return imported + +def prserv_export_tofile(d, metainfo, datainfo, lockdown, nomax=False): + import bb.utils + #initilize the output file + bb.utils.mkdirhier(d.getVar('PRSERV_DUMPDIR', True)) + df = d.getVar('PRSERV_DUMPFILE', True) + #write data + lf = bb.utils.lockfile("%s.lock" % df) + f = open(df, "a") + if metainfo: + #dump column info + f.write("#PR_core_ver = \"%s\"\n\n" % metainfo['core_ver']); + f.write("#Table: %s\n" % metainfo['tbl_name']) + f.write("#Columns:\n") + f.write("#name \t type \t notn \t dflt \t pk\n") + f.write("#----------\t --------\t --------\t --------\t ----\n") + for i in range(len(metainfo['col_info'])): + f.write("#%10s\t %8s\t %8s\t %8s\t %4s\n" % + (metainfo['col_info'][i]['name'], + metainfo['col_info'][i]['type'], + metainfo['col_info'][i]['notnull'], + metainfo['col_info'][i]['dflt_value'], + metainfo['col_info'][i]['pk'])) + f.write("\n") + + if lockdown: + f.write("PRSERV_LOCKDOWN = \"1\"\n\n") + + if datainfo: + idx = {} + for i in range(len(datainfo)): + pkgarch = datainfo[i]['pkgarch'] + value = datainfo[i]['value'] + if pkgarch not in idx: + idx[pkgarch] = i + elif value > datainfo[idx[pkgarch]]['value']: + idx[pkgarch] = i + f.write("PRAUTO$%s$%s$%s = \"%s\"\n" % + (str(datainfo[i]['version']), pkgarch, str(datainfo[i]['checksum']), str(value))) + if not nomax: + for i in idx: + f.write("PRAUTO_%s_%s = \"%s\"\n" % (str(datainfo[idx[i]]['version']),str(datainfo[idx[i]]['pkgarch']),str(datainfo[idx[i]]['value']))) + f.close() + bb.utils.unlockfile(lf) + +def prserv_check_avail(d): + host_params = filter(None, (d.getVar("PRSERV_HOST", True) or '').split(':')) + try: + if len(host_params) != 2: + raise TypeError + else: + int(host_params[1]) + except TypeError: + bb.fatal('Undefined/incorrect PRSERV_HOST value. Format: "host:port"') + else: + prserv_make_conn(d, True) diff --git a/meta/lib/oe/qa.py b/meta/lib/oe/qa.py new file mode 100644 index 0000000000..d5cdaa0fcd --- /dev/null +++ b/meta/lib/oe/qa.py @@ -0,0 +1,111 @@ +class ELFFile: + EI_NIDENT = 16 + + EI_CLASS = 4 + EI_DATA = 5 + EI_VERSION = 6 + EI_OSABI = 7 + EI_ABIVERSION = 8 + + # possible values for EI_CLASS + ELFCLASSNONE = 0 + ELFCLASS32 = 1 + ELFCLASS64 = 2 + + # possible value for EI_VERSION + EV_CURRENT = 1 + + # possible values for EI_DATA + ELFDATANONE = 0 + ELFDATA2LSB = 1 + ELFDATA2MSB = 2 + + def my_assert(self, expectation, result): + if not expectation == result: + #print "'%x','%x' %s" % (ord(expectation), ord(result), self.name) + raise Exception("This does not work as expected") + + def __init__(self, name, bits = 0): + self.name = name + self.bits = bits + self.objdump_output = {} + + def open(self): + self.file = file(self.name, "r") + self.data = self.file.read(ELFFile.EI_NIDENT+4) + + self.my_assert(len(self.data), ELFFile.EI_NIDENT+4) + self.my_assert(self.data[0], chr(0x7f) ) + self.my_assert(self.data[1], 'E') + self.my_assert(self.data[2], 'L') + self.my_assert(self.data[3], 'F') + if self.bits == 0: + if self.data[ELFFile.EI_CLASS] == chr(ELFFile.ELFCLASS32): + self.bits = 32 + elif self.data[ELFFile.EI_CLASS] == chr(ELFFile.ELFCLASS64): + self.bits = 64 + else: + # Not 32-bit or 64.. lets assert + raise Exception("ELF but not 32 or 64 bit.") + elif self.bits == 32: + self.my_assert(self.data[ELFFile.EI_CLASS], chr(ELFFile.ELFCLASS32)) + elif self.bits == 64: + self.my_assert(self.data[ELFFile.EI_CLASS], chr(ELFFile.ELFCLASS64)) + else: + raise Exception("Must specify unknown, 32 or 64 bit size.") + self.my_assert(self.data[ELFFile.EI_VERSION], chr(ELFFile.EV_CURRENT) ) + + self.sex = self.data[ELFFile.EI_DATA] + if self.sex == chr(ELFFile.ELFDATANONE): + raise Exception("self.sex == ELFDATANONE") + elif self.sex == chr(ELFFile.ELFDATA2LSB): + self.sex = "<" + elif self.sex == chr(ELFFile.ELFDATA2MSB): + self.sex = ">" + else: + raise Exception("Unknown self.sex") + + def osAbi(self): + return ord(self.data[ELFFile.EI_OSABI]) + + def abiVersion(self): + return ord(self.data[ELFFile.EI_ABIVERSION]) + + def abiSize(self): + return self.bits + + def isLittleEndian(self): + return self.sex == "<" + + def isBigEngian(self): + return self.sex == ">" + + def machine(self): + """ + We know the sex stored in self.sex and we + know the position + """ + import struct + (a,) = struct.unpack(self.sex+"H", self.data[18:20]) + return a + + def run_objdump(self, cmd, d): + import bb.process + import sys + + if cmd in self.objdump_output: + return self.objdump_output[cmd] + + objdump = d.getVar('OBJDUMP', True) + + env = os.environ.copy() + env["LC_ALL"] = "C" + env["PATH"] = d.getVar('PATH', True) + + try: + bb.note("%s %s %s" % (objdump, cmd, self.name)) + self.objdump_output[cmd] = bb.process.run([objdump, cmd, self.name], env=env, shell=False)[0] + return self.objdump_output[cmd] + except Exception as e: + bb.note("%s %s %s failed: %s" % (objdump, cmd, self.name, e)) + return "" diff --git a/meta/lib/oe/rootfs.py b/meta/lib/oe/rootfs.py new file mode 100644 index 0000000000..dddbef4d64 --- /dev/null +++ b/meta/lib/oe/rootfs.py @@ -0,0 +1,757 @@ +from abc import ABCMeta, abstractmethod +from oe.utils import execute_pre_post_process +from oe.utils import contains as base_contains +from oe.package_manager import * +from oe.manifest import * +import oe.path +import filecmp +import shutil +import os +import subprocess +import re + + +class Rootfs(object): + """ + This is an abstract class. Do not instantiate this directly. + """ + __metaclass__ = ABCMeta + + def __init__(self, d): + self.d = d + self.pm = None + self.image_rootfs = self.d.getVar('IMAGE_ROOTFS', True) + self.deploy_dir_image = self.d.getVar('DEPLOY_DIR_IMAGE', True) + + self.install_order = Manifest.INSTALL_ORDER + + @abstractmethod + def _create(self): + pass + + @abstractmethod + def _get_delayed_postinsts(self): + pass + + @abstractmethod + def _save_postinsts(self): + pass + + @abstractmethod + def _log_check(self): + pass + + def _insert_feed_uris(self): + if base_contains("IMAGE_FEATURES", "package-management", + True, False, self.d): + self.pm.insert_feeds_uris() + + @abstractmethod + def _handle_intercept_failure(self, failed_script): + pass + + """ + The _cleanup() method should be used to clean-up stuff that we don't really + want to end up on target. For example, in the case of RPM, the DB locks. + The method is called, once, at the end of create() method. + """ + @abstractmethod + def _cleanup(self): + pass + + def _exec_shell_cmd(self, cmd): + fakerootcmd = self.d.getVar('FAKEROOT', True) + if fakerootcmd is not None: + exec_cmd = [fakerootcmd, cmd] + else: + exec_cmd = cmd + + try: + subprocess.check_output(exec_cmd, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + return("Command '%s' returned %d:\n%s" % (e.cmd, e.returncode, e.output)) + + return None + + def create(self): + bb.note("###### Generate rootfs #######") + pre_process_cmds = self.d.getVar("ROOTFS_PREPROCESS_COMMAND", True) + post_process_cmds = self.d.getVar("ROOTFS_POSTPROCESS_COMMAND", True) + + intercepts_dir = os.path.join(self.d.getVar('WORKDIR', True), + "intercept_scripts") + + bb.utils.remove(intercepts_dir, True) + + bb.utils.mkdirhier(self.image_rootfs) + + bb.utils.mkdirhier(self.deploy_dir_image) + + shutil.copytree(self.d.expand("${COREBASE}/scripts/postinst-intercepts"), + intercepts_dir) + + shutil.copy(self.d.expand("${COREBASE}/meta/files/deploydir_readme.txt"), + self.deploy_dir_image + + "/README_-_DO_NOT_DELETE_FILES_IN_THIS_DIRECTORY.txt") + + execute_pre_post_process(self.d, pre_process_cmds) + + # call the package manager dependent create method + self._create() + + sysconfdir = self.image_rootfs + self.d.getVar('sysconfdir', True) + bb.utils.mkdirhier(sysconfdir) + with open(sysconfdir + "/version", "w+") as ver: + ver.write(self.d.getVar('BUILDNAME', True) + "\n") + + self._run_intercepts() + + execute_pre_post_process(self.d, post_process_cmds) + + if base_contains("IMAGE_FEATURES", "read-only-rootfs", + True, False, self.d): + delayed_postinsts = self._get_delayed_postinsts() + if delayed_postinsts is not None: + bb.fatal("The following packages could not be configured" + "offline and rootfs is read-only: %s" % + delayed_postinsts) + + if self.d.getVar('USE_DEVFS', True) != "1": + self._create_devfs() + + self._uninstall_uneeded() + + self._insert_feed_uris() + + self._run_ldconfig() + + self._generate_kernel_module_deps() + + self._cleanup() + + def _uninstall_uneeded(self): + if base_contains("IMAGE_FEATURES", "package-management", + True, False, self.d): + return + + delayed_postinsts = self._get_delayed_postinsts() + if delayed_postinsts is None: + installed_pkgs_dir = self.d.expand('${WORKDIR}/installed_pkgs.txt') + pkgs_to_remove = list() + with open(installed_pkgs_dir, "r+") as installed_pkgs: + pkgs_installed = installed_pkgs.read().split('\n') + for pkg_installed in pkgs_installed[:]: + pkg = pkg_installed.split()[0] + if pkg in ["update-rc.d", + "base-passwd", + self.d.getVar("ROOTFS_BOOTSTRAP_INSTALL", True) + ]: + pkgs_to_remove.append(pkg) + pkgs_installed.remove(pkg_installed) + + if len(pkgs_to_remove) > 0: + self.pm.remove(pkgs_to_remove, False) + # Update installed_pkgs.txt + open(installed_pkgs_dir, "w+").write('\n'.join(pkgs_installed)) + + if os.path.exists(self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/init.d/run-postinsts")): + self._exec_shell_cmd(["update-rc.d", "-f", "-r", + self.d.getVar('IMAGE_ROOTFS', True), + "run-postinsts", "remove"]) + else: + self._save_postinsts() + + self.pm.remove_packaging_data() + + def _run_intercepts(self): + intercepts_dir = os.path.join(self.d.getVar('WORKDIR', True), + "intercept_scripts") + + bb.note("Running intercept scripts:") + os.environ['D'] = self.image_rootfs + for script in os.listdir(intercepts_dir): + script_full = os.path.join(intercepts_dir, script) + + if script == "postinst_intercept" or not os.access(script_full, os.X_OK): + continue + + bb.note("> Executing %s intercept ..." % script) + + try: + subprocess.check_output(script_full) + except subprocess.CalledProcessError as e: + bb.warn("The postinstall intercept hook '%s' failed (exit code: %d)! See log for details!" % + (script, e.returncode)) + + with open(script_full) as intercept: + registered_pkgs = None + for line in intercept.read().split("\n"): + m = re.match("^##PKGS:(.*)", line) + if m is not None: + registered_pkgs = m.group(1).strip() + break + + if registered_pkgs is not None: + bb.warn("The postinstalls for the following packages " + "will be postponed for first boot: %s" % + registered_pkgs) + + # call the backend dependent handler + self._handle_intercept_failure(registered_pkgs) + + def _run_ldconfig(self): + if self.d.getVar('LDCONFIGDEPEND', True): + bb.note("Executing: ldconfig -r" + self.image_rootfs + "-c new -v") + self._exec_shell_cmd(['ldconfig', '-r', self.image_rootfs, '-c', + 'new', '-v']) + + def _generate_kernel_module_deps(self): + kernel_abi_ver_file = os.path.join(self.d.getVar('STAGING_KERNEL_DIR', True), + 'kernel-abiversion') + if os.path.exists(kernel_abi_ver_file): + kernel_ver = open(kernel_abi_ver_file).read().strip(' \n') + modules_dir = os.path.join(self.image_rootfs, 'lib', 'modules', kernel_ver) + + bb.utils.mkdirhier(modules_dir) + + self._exec_shell_cmd(['depmodwrapper', '-a', '-b', self.image_rootfs, + kernel_ver]) + + """ + Create devfs: + * IMAGE_DEVICE_TABLE is the old name to an absolute path to a device table file + * IMAGE_DEVICE_TABLES is a new name for a file, or list of files, seached + for in the BBPATH + If neither are specified then the default name of files/device_table-minimal.txt + is searched for in the BBPATH (same as the old version.) + """ + def _create_devfs(self): + devtable_list = [] + devtable = self.d.getVar('IMAGE_DEVICE_TABLE', True) + if devtable is not None: + devtable_list.append(devtable) + else: + devtables = self.d.getVar('IMAGE_DEVICE_TABLES', True) + if devtables is None: + devtables = 'files/device_table-minimal.txt' + for devtable in devtables.split(): + devtable_list.append("%s" % bb.utils.which(self.d.getVar('BBPATH', True), devtable)) + + for devtable in devtable_list: + self._exec_shell_cmd(["makedevs", "-r", + self.image_rootfs, "-D", devtable]) + + +class RpmRootfs(Rootfs): + def __init__(self, d, manifest_dir): + super(RpmRootfs, self).__init__(d) + + self.manifest = RpmManifest(d, manifest_dir) + + self.pm = RpmPM(d, + d.getVar('IMAGE_ROOTFS', True), + self.d.getVar('TARGET_VENDOR', True) + ) + + self.inc_rpm_image_gen = self.d.getVar('INC_RPM_IMAGE_GEN', True) + if self.inc_rpm_image_gen != "1": + bb.utils.remove(self.image_rootfs, True) + else: + self.pm.recovery_packaging_data() + bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS', True), True) + + self.pm.create_configs() + + ''' + While rpm incremental image generation is enabled, it will remove the + unneeded pkgs by comparing the new install solution manifest and the + old installed manifest. + ''' + def _create_incremental(self, pkgs_initial_install): + if self.inc_rpm_image_gen == "1": + + pkgs_to_install = list() + for pkg_type in pkgs_initial_install: + pkgs_to_install += pkgs_initial_install[pkg_type] + + installed_manifest = self.pm.load_old_install_solution() + solution_manifest = self.pm.dump_install_solution(pkgs_to_install) + + pkg_to_remove = list() + for pkg in installed_manifest: + if pkg not in solution_manifest: + pkg_to_remove.append(pkg) + + self.pm.update() + + bb.note('incremental update -- upgrade packages in place ') + self.pm.upgrade() + if pkg_to_remove != []: + bb.note('incremental removed: %s' % ' '.join(pkg_to_remove)) + self.pm.remove(pkg_to_remove) + + def _create(self): + pkgs_to_install = self.manifest.parse_initial_manifest() + + # update PM index files + self.pm.write_index() + + self.pm.dump_all_available_pkgs() + + if self.inc_rpm_image_gen == "1": + self._create_incremental(pkgs_to_install) + + self.pm.update() + + pkgs = [] + pkgs_attempt = [] + for pkg_type in pkgs_to_install: + if pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY: + pkgs_attempt += pkgs_to_install[pkg_type] + else: + pkgs += pkgs_to_install[pkg_type] + + self.pm.install(pkgs) + + self.pm.install(pkgs_attempt, True) + + self.pm.install_complementary() + + self._log_check() + + if self.inc_rpm_image_gen == "1": + self.pm.backup_packaging_data() + + self.pm.rpm_setup_smart_target_config() + + def _get_delayed_postinsts(self): + postinst_dir = self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/rpm-postinsts") + if os.path.isdir(postinst_dir): + files = os.listdir(postinst_dir) + for f in files: + bb.note('Delayed package scriptlet: %s' % f) + return files + + return None + + def _save_postinsts(self): + # this is just a stub. For RPM, the failed postinstalls are + # already saved in /etc/rpm-postinsts + pass + + def _log_check(self): + r = re.compile('(unpacking of archive failed|Cannot find package|exit 1|ERR|Fail)') + log_path = self.d.expand("${T}/log.do_rootfs") + with open(log_path, 'r') as log: + found_error = 0 + message = "\n" + for line in log.read().split('\n'): + if 'log_check' in line: + continue + + m = r.search(line) + if m: + found_error = 1 + bb.warn('log_check: There were error messages in the logfile') + bb.warn('log_check: Matched keyword: [%s]\n\n' % m.group()) + + if found_error >= 1 and found_error <= 5: + message += line + '\n' + found_error += 1 + + if found_error == 6: + bb.fatal(message) + + def _handle_intercept_failure(self, registered_pkgs): + rpm_postinsts_dir = self.image_rootfs + self.d.expand('${sysconfdir}/rpm-postinsts/') + bb.utils.mkdirhier(rpm_postinsts_dir) + + # Save the package postinstalls in /etc/rpm-postinsts + for pkg in registered_pkgs.split(): + self.pm.save_rpmpostinst(pkg) + + def _cleanup(self): + # during the execution of postprocess commands, rpm is called several + # times to get the files installed, dependencies, etc. This creates the + # __db.00* (Berkeley DB files that hold locks, rpm specific environment + # settings, etc.), that should not get into the final rootfs + self.pm.unlock_rpm_db() + + +class DpkgRootfs(Rootfs): + def __init__(self, d, manifest_dir): + super(DpkgRootfs, self).__init__(d) + + bb.utils.remove(self.image_rootfs, True) + bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS', True), True) + self.manifest = DpkgManifest(d, manifest_dir) + self.pm = DpkgPM(d, d.getVar('IMAGE_ROOTFS', True), + d.getVar('PACKAGE_ARCHS', True), + d.getVar('DPKG_ARCH', True)) + + + def _create(self): + pkgs_to_install = self.manifest.parse_initial_manifest() + + alt_dir = self.d.expand("${IMAGE_ROOTFS}/var/lib/dpkg/alternatives") + bb.utils.mkdirhier(alt_dir) + + # update PM index files + self.pm.write_index() + + self.pm.update() + + for pkg_type in self.install_order: + if pkg_type in pkgs_to_install: + self.pm.install(pkgs_to_install[pkg_type], + [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY]) + + self.pm.install_complementary() + + self.pm.fix_broken_dependencies() + + self.pm.mark_packages("installed") + + self.pm.run_pre_post_installs() + + def _get_delayed_postinsts(self): + pkg_list = [] + with open(self.image_rootfs + "/var/lib/dpkg/status") as status: + for line in status: + m_pkg = re.match("^Package: (.*)", line) + m_status = re.match("^Status:.*unpacked", line) + if m_pkg is not None: + pkg_name = m_pkg.group(1) + elif m_status is not None: + pkg_list.append(pkg_name) + + if len(pkg_list) == 0: + return None + + return pkg_list + + def _save_postinsts(self): + num = 0 + for p in self._get_delayed_postinsts(): + dst_postinst_dir = self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/deb-postinsts") + src_postinst_dir = self.d.expand("${IMAGE_ROOTFS}/var/lib/dpkg/info") + + bb.utils.mkdirhier(dst_postinst_dir) + + if os.path.exists(os.path.join(src_postinst_dir, p + ".postinst")): + shutil.copy(os.path.join(src_postinst_dir, p + ".postinst"), + os.path.join(dst_postinst_dir, "%03d-%s" % (num, p))) + + num += 1 + + def _handle_intercept_failure(self, registered_pkgs): + self.pm.mark_packages("unpacked", registered_pkgs.split()) + + def _log_check(self): + pass + + def _cleanup(self): + pass + + +class OpkgRootfs(Rootfs): + def __init__(self, d, manifest_dir): + super(OpkgRootfs, self).__init__(d) + + self.manifest = OpkgManifest(d, manifest_dir) + self.opkg_conf = self.d.getVar("IPKGCONF_TARGET", True) + self.pkg_archs = self.d.getVar("ALL_MULTILIB_PACKAGE_ARCHS", True) + + self.inc_opkg_image_gen = self.d.getVar('INC_IPK_IMAGE_GEN', True) or "" + if self._remove_old_rootfs(): + bb.utils.remove(self.image_rootfs, True) + self.pm = OpkgPM(d, + self.image_rootfs, + self.opkg_conf, + self.pkg_archs) + else: + self.pm = OpkgPM(d, + self.image_rootfs, + self.opkg_conf, + self.pkg_archs) + self.pm.recover_packaging_data() + + bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS', True), True) + + def _prelink_file(self, root_dir, filename): + bb.note('prelink %s in %s' % (filename, root_dir)) + prelink_cfg = oe.path.join(root_dir, + self.d.expand('${sysconfdir}/prelink.conf')) + if not os.path.exists(prelink_cfg): + shutil.copy(self.d.expand('${STAGING_DIR_NATIVE}${sysconfdir_native}/prelink.conf'), + prelink_cfg) + + cmd_prelink = self.d.expand('${STAGING_DIR_NATIVE}${sbindir_native}/prelink') + self._exec_shell_cmd([cmd_prelink, + '--root', + root_dir, + '-amR', + '-N', + '-c', + self.d.expand('${sysconfdir}/prelink.conf')]) + + ''' + Compare two files with the same key twice to see if they are equal. + If they are not equal, it means they are duplicated and come from + different packages. + 1st: Comapre them directly; + 2nd: While incremental image creation is enabled, one of the + files could be probaly prelinked in the previous image + creation and the file has been changed, so we need to + prelink the other one and compare them. + ''' + def _file_equal(self, key, f1, f2): + + # Both of them are not prelinked + if filecmp.cmp(f1, f2): + return True + + if self.image_rootfs not in f1: + self._prelink_file(f1.replace(key, ''), f1) + + if self.image_rootfs not in f2: + self._prelink_file(f2.replace(key, ''), f2) + + # Both of them are prelinked + if filecmp.cmp(f1, f2): + return True + + # Not equal + return False + + """ + This function was reused from the old implementation. + See commit: "image.bbclass: Added variables for multilib support." by + Lianhao Lu. + """ + def _multilib_sanity_test(self, dirs): + + allow_replace = self.d.getVar("MULTILIBRE_ALLOW_REP", True) + if allow_replace is None: + allow_replace = "" + + allow_rep = re.compile(re.sub("\|$", "", allow_replace)) + error_prompt = "Multilib check error:" + + files = {} + for dir in dirs: + for root, subfolders, subfiles in os.walk(dir): + for file in subfiles: + item = os.path.join(root, file) + key = str(os.path.join("/", os.path.relpath(item, dir))) + + valid = True + if key in files: + #check whether the file is allow to replace + if allow_rep.match(key): + valid = True + else: + if os.path.exists(files[key]) and \ + os.path.exists(item) and \ + not self._file_equal(key, files[key], item): + valid = False + bb.fatal("%s duplicate files %s %s is not the same\n" % + (error_prompt, item, files[key])) + + #pass the check, add to list + if valid: + files[key] = item + + def _multilib_test_install(self, pkgs): + ml_temp = self.d.getVar("MULTILIB_TEMP_ROOTFS", True) + bb.utils.mkdirhier(ml_temp) + + dirs = [self.image_rootfs] + + for variant in self.d.getVar("MULTILIB_VARIANTS", True).split(): + ml_target_rootfs = os.path.join(ml_temp, variant) + + bb.utils.remove(ml_target_rootfs, True) + + ml_opkg_conf = os.path.join(ml_temp, + variant + "-" + os.path.basename(self.opkg_conf)) + + ml_pm = OpkgPM(self.d, ml_target_rootfs, ml_opkg_conf, self.pkg_archs) + + ml_pm.update() + ml_pm.install(pkgs) + + dirs.append(ml_target_rootfs) + + self._multilib_sanity_test(dirs) + + ''' + While ipk incremental image generation is enabled, it will remove the + unneeded pkgs by comparing the old full manifest in previous existing + image and the new full manifest in the current image. + ''' + def _remove_extra_packages(self, pkgs_initial_install): + if self.inc_opkg_image_gen == "1": + # Parse full manifest in previous existing image creation session + old_full_manifest = self.manifest.parse_full_manifest() + + # Create full manifest for the current image session, the old one + # will be replaced by the new one. + self.manifest.create_full(self.pm) + + # Parse full manifest in current image creation session + new_full_manifest = self.manifest.parse_full_manifest() + + pkg_to_remove = list() + for pkg in old_full_manifest: + if pkg not in new_full_manifest: + pkg_to_remove.append(pkg) + + if pkg_to_remove != []: + bb.note('decremental removed: %s' % ' '.join(pkg_to_remove)) + self.pm.remove(pkg_to_remove) + + ''' + Compare with previous existing image creation, if some conditions + triggered, the previous old image should be removed. + The conditions include any of 'PACKAGE_EXCLUDE, NO_RECOMMENDATIONS + and BAD_RECOMMENDATIONS' has been changed. + ''' + def _remove_old_rootfs(self): + if self.inc_opkg_image_gen != "1": + return True + + vars_list_file = self.d.expand('${T}/vars_list') + + old_vars_list = "" + if os.path.exists(vars_list_file): + old_vars_list = open(vars_list_file, 'r+').read() + + new_vars_list = '%s:%s:%s\n' % \ + ((self.d.getVar('BAD_RECOMMENDATIONS', True) or '').strip(), + (self.d.getVar('NO_RECOMMENDATIONS', True) or '').strip(), + (self.d.getVar('PACKAGE_EXCLUDE', True) or '').strip()) + open(vars_list_file, 'w+').write(new_vars_list) + + if old_vars_list != new_vars_list: + return True + + return False + + def _create(self): + pkgs_to_install = self.manifest.parse_initial_manifest() + opkg_pre_process_cmds = self.d.getVar('OPKG_PREPROCESS_COMMANDS', True) + opkg_post_process_cmds = self.d.getVar('OPKG_POSTPROCESS_COMMANDS', True) + rootfs_post_install_cmds = self.d.getVar('ROOTFS_POSTINSTALL_COMMAND', True) + + # update PM index files, unless users provide their own feeds + if (self.d.getVar('BUILD_IMAGES_FROM_FEEDS', True) or "") != "1": + self.pm.write_index() + + execute_pre_post_process(self.d, opkg_pre_process_cmds) + + self.pm.update() + + self.pm.handle_bad_recommendations() + + if self.inc_opkg_image_gen == "1": + self._remove_extra_packages(pkgs_to_install) + + for pkg_type in self.install_order: + if pkg_type in pkgs_to_install: + # For multilib, we perform a sanity test before final install + # If sanity test fails, it will automatically do a bb.fatal() + # and the installation will stop + if pkg_type == Manifest.PKG_TYPE_MULTILIB: + self._multilib_test_install(pkgs_to_install[pkg_type]) + + self.pm.install(pkgs_to_install[pkg_type], + [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY]) + + self.pm.install_complementary() + + execute_pre_post_process(self.d, opkg_post_process_cmds) + execute_pre_post_process(self.d, rootfs_post_install_cmds) + + if self.inc_opkg_image_gen == "1": + self.pm.backup_packaging_data() + + def _get_delayed_postinsts(self): + pkg_list = [] + status_file = os.path.join(self.image_rootfs, + self.d.getVar('OPKGLIBDIR', True).strip('/'), + "opkg", "status") + + with open(status_file) as status: + for line in status: + m_pkg = re.match("^Package: (.*)", line) + m_status = re.match("^Status:.*unpacked", line) + if m_pkg is not None: + pkg_name = m_pkg.group(1) + elif m_status is not None: + pkg_list.append(pkg_name) + + if len(pkg_list) == 0: + return None + + return pkg_list + + def _save_postinsts(self): + num = 0 + for p in self._get_delayed_postinsts(): + dst_postinst_dir = self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/ipk-postinsts") + src_postinst_dir = self.d.expand("${IMAGE_ROOTFS}${OPKGLIBDIR}/opkg/info") + + bb.utils.mkdirhier(dst_postinst_dir) + + if os.path.exists(os.path.join(src_postinst_dir, p + ".postinst")): + shutil.copy(os.path.join(src_postinst_dir, p + ".postinst"), + os.path.join(dst_postinst_dir, "%03d-%s" % (num, p))) + + num += 1 + + def _handle_intercept_failure(self, registered_pkgs): + self.pm.mark_packages("unpacked", registered_pkgs.split()) + + def _log_check(self): + pass + + def _cleanup(self): + pass + + +def create_rootfs(d, manifest_dir=None): + env_bkp = os.environ.copy() + + img_type = d.getVar('IMAGE_PKGTYPE', True) + if img_type == "rpm": + RpmRootfs(d, manifest_dir).create() + elif img_type == "ipk": + OpkgRootfs(d, manifest_dir).create() + elif img_type == "deb": + DpkgRootfs(d, manifest_dir).create() + + os.environ.clear() + os.environ.update(env_bkp) + + +def image_list_installed_packages(d, format=None, rootfs_dir=None): + if not rootfs_dir: + rootfs_dir = d.getVar('IMAGE_ROOTFS', True) + + img_type = d.getVar('IMAGE_PKGTYPE', True) + if img_type == "rpm": + return RpmPkgsList(d, rootfs_dir).list(format) + elif img_type == "ipk": + return OpkgPkgsList(d, rootfs_dir, d.getVar("IPKGCONF_TARGET", True)).list(format) + elif img_type == "deb": + return DpkgPkgsList(d, rootfs_dir).list(format) + +if __name__ == "__main__": + """ + We should be able to run this as a standalone script, from outside bitbake + environment. + """ + """ + TBD + """ diff --git a/meta/lib/oe/sdk.py b/meta/lib/oe/sdk.py new file mode 100644 index 0000000000..564319965d --- /dev/null +++ b/meta/lib/oe/sdk.py @@ -0,0 +1,325 @@ +from abc import ABCMeta, abstractmethod +from oe.utils import execute_pre_post_process +from oe.manifest import * +from oe.package_manager import * +import os +import shutil +import glob + + +class Sdk(object): + __metaclass__ = ABCMeta + + def __init__(self, d, manifest_dir): + self.d = d + self.sdk_output = self.d.getVar('SDK_OUTPUT', True) + self.sdk_native_path = self.d.getVar('SDKPATHNATIVE', True).strip('/') + self.target_path = self.d.getVar('SDKTARGETSYSROOT', True).strip('/') + self.sysconfdir = self.d.getVar('sysconfdir', True).strip('/') + + self.sdk_target_sysroot = os.path.join(self.sdk_output, self.target_path) + self.sdk_host_sysroot = self.sdk_output + + if manifest_dir is None: + self.manifest_dir = self.d.getVar("SDK_DIR", True) + else: + self.manifest_dir = manifest_dir + + bb.utils.remove(self.sdk_output, True) + + self.install_order = Manifest.INSTALL_ORDER + + @abstractmethod + def _populate(self): + pass + + def populate(self): + bb.utils.mkdirhier(self.sdk_output) + + # call backend dependent implementation + self._populate() + + # Don't ship any libGL in the SDK + bb.utils.remove(os.path.join(self.sdk_output, self.sdk_native_path, + self.d.getVar('libdir_nativesdk', True).strip('/'), + "libGL*")) + + # Fix or remove broken .la files + bb.utils.remove(os.path.join(self.sdk_output, self.sdk_native_path, + self.d.getVar('libdir_nativesdk', True).strip('/'), + "*.la")) + + # Link the ld.so.cache file into the hosts filesystem + link_name = os.path.join(self.sdk_output, self.sdk_native_path, + self.sysconfdir, "ld.so.cache") + os.symlink("/etc/ld.so.cache", link_name) + + execute_pre_post_process(self.d, self.d.getVar('SDK_POSTPROCESS_COMMAND', True)) + + +class RpmSdk(Sdk): + def __init__(self, d, manifest_dir=None): + super(RpmSdk, self).__init__(d, manifest_dir) + + self.target_manifest = RpmManifest(d, self.manifest_dir, + Manifest.MANIFEST_TYPE_SDK_TARGET) + self.host_manifest = RpmManifest(d, self.manifest_dir, + Manifest.MANIFEST_TYPE_SDK_HOST) + + target_providename = ['/bin/sh', + '/bin/bash', + '/usr/bin/env', + '/usr/bin/perl', + 'pkgconfig' + ] + + self.target_pm = RpmPM(d, + self.sdk_target_sysroot, + self.d.getVar('TARGET_VENDOR', True), + 'target', + target_providename + ) + + sdk_providename = ['/bin/sh', + '/bin/bash', + '/usr/bin/env', + '/usr/bin/perl', + 'pkgconfig', + 'libGL.so()(64bit)', + 'libGL.so' + ] + + self.host_pm = RpmPM(d, + self.sdk_host_sysroot, + self.d.getVar('SDK_VENDOR', True), + 'host', + sdk_providename, + "SDK_PACKAGE_ARCHS", + "SDK_OS" + ) + + def _populate_sysroot(self, pm, manifest): + pkgs_to_install = manifest.parse_initial_manifest() + + pm.create_configs() + pm.write_index() + pm.dump_all_available_pkgs() + pm.update() + + for pkg_type in self.install_order: + if pkg_type in pkgs_to_install: + pm.install(pkgs_to_install[pkg_type], + [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY]) + + def _populate(self): + bb.note("Installing TARGET packages") + self._populate_sysroot(self.target_pm, self.target_manifest) + + self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY', True)) + + execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND", True)) + + self.target_pm.remove_packaging_data() + + bb.note("Installing NATIVESDK packages") + self._populate_sysroot(self.host_pm, self.host_manifest) + + execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND", True)) + + self.host_pm.remove_packaging_data() + + # Move host RPM library data + native_rpm_state_dir = os.path.join(self.sdk_output, + self.sdk_native_path, + self.d.getVar('localstatedir_nativesdk', True).strip('/'), + "lib", + "rpm" + ) + bb.utils.mkdirhier(native_rpm_state_dir) + for f in glob.glob(os.path.join(self.sdk_output, + "var", + "lib", + "rpm", + "*")): + bb.utils.movefile(f, native_rpm_state_dir) + + bb.utils.remove(os.path.join(self.sdk_output, "var"), True) + + # Move host sysconfig data + native_sysconf_dir = os.path.join(self.sdk_output, + self.sdk_native_path, + self.d.getVar('sysconfdir', + True).strip('/'), + ) + bb.utils.mkdirhier(native_sysconf_dir) + for f in glob.glob(os.path.join(self.sdk_output, "etc", "*")): + bb.utils.movefile(f, native_sysconf_dir) + bb.utils.remove(os.path.join(self.sdk_output, "etc"), True) + + +class OpkgSdk(Sdk): + def __init__(self, d, manifest_dir=None): + super(OpkgSdk, self).__init__(d, manifest_dir) + + self.target_conf = self.d.getVar("IPKGCONF_TARGET", True) + self.host_conf = self.d.getVar("IPKGCONF_SDK", True) + + self.target_manifest = OpkgManifest(d, self.manifest_dir, + Manifest.MANIFEST_TYPE_SDK_TARGET) + self.host_manifest = OpkgManifest(d, self.manifest_dir, + Manifest.MANIFEST_TYPE_SDK_HOST) + + self.target_pm = OpkgPM(d, self.sdk_target_sysroot, self.target_conf, + self.d.getVar("ALL_MULTILIB_PACKAGE_ARCHS", True)) + + self.host_pm = OpkgPM(d, self.sdk_host_sysroot, self.host_conf, + self.d.getVar("SDK_PACKAGE_ARCHS", True)) + + def _populate_sysroot(self, pm, manifest): + pkgs_to_install = manifest.parse_initial_manifest() + + if (self.d.getVar('BUILD_IMAGES_FROM_FEEDS', True) or "") != "1": + pm.write_index() + + pm.update() + + for pkg_type in self.install_order: + if pkg_type in pkgs_to_install: + pm.install(pkgs_to_install[pkg_type], + [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY]) + + def _populate(self): + bb.note("Installing TARGET packages") + self._populate_sysroot(self.target_pm, self.target_manifest) + + self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY', True)) + + execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND", True)) + + bb.note("Installing NATIVESDK packages") + self._populate_sysroot(self.host_pm, self.host_manifest) + + execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND", True)) + + target_sysconfdir = os.path.join(self.sdk_target_sysroot, self.sysconfdir) + host_sysconfdir = os.path.join(self.sdk_host_sysroot, self.sysconfdir) + + bb.utils.mkdirhier(target_sysconfdir) + shutil.copy(self.target_conf, target_sysconfdir) + os.chmod(os.path.join(target_sysconfdir, + os.path.basename(self.target_conf)), 0644) + + bb.utils.mkdirhier(host_sysconfdir) + shutil.copy(self.host_conf, host_sysconfdir) + os.chmod(os.path.join(host_sysconfdir, + os.path.basename(self.host_conf)), 0644) + + native_opkg_state_dir = os.path.join(self.sdk_output, self.sdk_native_path, + self.d.getVar('localstatedir_nativesdk', True).strip('/'), + "lib", "opkg") + bb.utils.mkdirhier(native_opkg_state_dir) + for f in glob.glob(os.path.join(self.sdk_output, "var", "lib", "opkg", "*")): + bb.utils.movefile(f, native_opkg_state_dir) + + bb.utils.remove(os.path.join(self.sdk_output, "var"), True) + + +class DpkgSdk(Sdk): + def __init__(self, d, manifest_dir=None): + super(DpkgSdk, self).__init__(d, manifest_dir) + + self.target_conf_dir = os.path.join(self.d.getVar("APTCONF_TARGET", True), "apt") + self.host_conf_dir = os.path.join(self.d.getVar("APTCONF_TARGET", True), "apt-sdk") + + self.target_manifest = DpkgManifest(d, self.manifest_dir, + Manifest.MANIFEST_TYPE_SDK_TARGET) + self.host_manifest = DpkgManifest(d, self.manifest_dir, + Manifest.MANIFEST_TYPE_SDK_HOST) + + self.target_pm = DpkgPM(d, self.sdk_target_sysroot, + self.d.getVar("PACKAGE_ARCHS", True), + self.d.getVar("DPKG_ARCH", True), + self.target_conf_dir) + + self.host_pm = DpkgPM(d, self.sdk_host_sysroot, + self.d.getVar("SDK_PACKAGE_ARCHS", True), + self.d.getVar("DEB_SDK_ARCH", True), + self.host_conf_dir) + + def _copy_apt_dir_to(self, dst_dir): + staging_etcdir_native = self.d.getVar("STAGING_ETCDIR_NATIVE", True) + + bb.utils.remove(dst_dir, True) + + shutil.copytree(os.path.join(staging_etcdir_native, "apt"), dst_dir) + + def _populate_sysroot(self, pm, manifest): + pkgs_to_install = manifest.parse_initial_manifest() + + pm.write_index() + pm.update() + + for pkg_type in self.install_order: + if pkg_type in pkgs_to_install: + pm.install(pkgs_to_install[pkg_type], + [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY]) + + def _populate(self): + bb.note("Installing TARGET packages") + self._populate_sysroot(self.target_pm, self.target_manifest) + + execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND", True)) + + self._copy_apt_dir_to(os.path.join(self.sdk_target_sysroot, "etc", "apt")) + + bb.note("Installing NATIVESDK packages") + self._populate_sysroot(self.host_pm, self.host_manifest) + + execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND", True)) + + self._copy_apt_dir_to(os.path.join(self.sdk_output, self.sdk_native_path, + "etc", "apt")) + + native_dpkg_state_dir = os.path.join(self.sdk_output, self.sdk_native_path, + "var", "lib", "dpkg") + bb.utils.mkdirhier(native_dpkg_state_dir) + for f in glob.glob(os.path.join(self.sdk_output, "var", "lib", "dpkg", "*")): + bb.utils.movefile(f, native_dpkg_state_dir) + + bb.utils.remove(os.path.join(self.sdk_output, "var"), True) + + +def sdk_list_installed_packages(d, target, format=None, rootfs_dir=None): + if rootfs_dir is None: + sdk_output = d.getVar('SDK_OUTPUT', True) + target_path = d.getVar('SDKTARGETSYSROOT', True).strip('/') + + rootfs_dir = [sdk_output, os.path.join(sdk_output, target_path)][target is True] + + img_type = d.getVar('IMAGE_PKGTYPE', True) + if img_type == "rpm": + arch_var = ["SDK_PACKAGE_ARCHS", None][target is True] + os_var = ["SDK_OS", None][target is True] + return RpmPkgsList(d, rootfs_dir, arch_var, os_var).list(format) + elif img_type == "ipk": + conf_file_var = ["IPKGCONF_SDK", "IPKGCONF_Target"][target is True] + return OpkgPkgsList(d, rootfs_dir, d.getVar(conf_file_var, True)).list(format) + elif img_type == "deb": + return DpkgPkgsList(d, rootfs_dir).list(format) + +def populate_sdk(d, manifest_dir=None): + env_bkp = os.environ.copy() + + img_type = d.getVar('IMAGE_PKGTYPE', True) + if img_type == "rpm": + RpmSdk(d, manifest_dir).populate() + elif img_type == "ipk": + OpkgSdk(d, manifest_dir).populate() + elif img_type == "deb": + DpkgSdk(d, manifest_dir).populate() + + os.environ.clear() + os.environ.update(env_bkp) + +if __name__ == "__main__": + pass diff --git a/meta/lib/oe/sstatesig.py b/meta/lib/oe/sstatesig.py new file mode 100644 index 0000000000..aa25c3a10e --- /dev/null +++ b/meta/lib/oe/sstatesig.py @@ -0,0 +1,166 @@ +import bb.siggen + +def sstate_rundepfilter(siggen, fn, recipename, task, dep, depname, dataCache): + # Return True if we should keep the dependency, False to drop it + def isNative(x): + return x.endswith("-native") + def isCross(x): + return x.endswith("-cross") or x.endswith("-cross-initial") or x.endswith("-cross-intermediate") + def isNativeSDK(x): + return x.startswith("nativesdk-") + def isKernel(fn): + inherits = " ".join(dataCache.inherits[fn]) + return inherits.find("/module-base.bbclass") != -1 or inherits.find("/linux-kernel-base.bbclass") != -1 + def isPackageGroup(fn): + inherits = " ".join(dataCache.inherits[fn]) + return "/packagegroup.bbclass" in inherits + def isImage(fn): + return "/image.bbclass" in " ".join(dataCache.inherits[fn]) + + # Always include our own inter-task dependencies + if recipename == depname: + return True + + # Quilt (patch application) changing isn't likely to affect anything + excludelist = ['quilt-native', 'subversion-native', 'git-native'] + if depname in excludelist and recipename != depname: + return False + + # Don't change native/cross/nativesdk recipe dependencies any further + if isNative(recipename) or isCross(recipename) or isNativeSDK(recipename): + return True + + # Only target packages beyond here + + # packagegroups are assumed to have well behaved names which don't change between architecures/tunes + if isPackageGroup(fn): + return False + + # Exclude well defined machine specific configurations which don't change ABI + if depname in siggen.abisaferecipes and not isImage(fn): + return False + + # Exclude well defined recipe->dependency + if "%s->%s" % (recipename, depname) in siggen.saferecipedeps: + return False + + # Kernel modules are well namespaced. We don't want to depend on the kernel's checksum + # if we're just doing an RRECOMMENDS_xxx = "kernel-module-*", not least because the checksum + # is machine specific. + # Therefore if we're not a kernel or a module recipe (inheriting the kernel classes) + # and we reccomend a kernel-module, we exclude the dependency. + depfn = dep.rsplit(".", 1)[0] + if dataCache and isKernel(depfn) and not isKernel(fn): + for pkg in dataCache.runrecs[fn]: + if " ".join(dataCache.runrecs[fn][pkg]).find("kernel-module-") != -1: + return False + + # Default to keep dependencies + return True + +class SignatureGeneratorOEBasic(bb.siggen.SignatureGeneratorBasic): + name = "OEBasic" + def init_rundepcheck(self, data): + self.abisaferecipes = (data.getVar("SIGGEN_EXCLUDERECIPES_ABISAFE", True) or "").split() + self.saferecipedeps = (data.getVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS", True) or "").split() + pass + def rundep_check(self, fn, recipename, task, dep, depname, dataCache = None): + return sstate_rundepfilter(self, fn, recipename, task, dep, depname, dataCache) + +class SignatureGeneratorOEBasicHash(bb.siggen.SignatureGeneratorBasicHash): + name = "OEBasicHash" + def init_rundepcheck(self, data): + self.abisaferecipes = (data.getVar("SIGGEN_EXCLUDERECIPES_ABISAFE", True) or "").split() + self.saferecipedeps = (data.getVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS", True) or "").split() + pass + def rundep_check(self, fn, recipename, task, dep, depname, dataCache = None): + return sstate_rundepfilter(self, fn, recipename, task, dep, depname, dataCache) + +# Insert these classes into siggen's namespace so it can see and select them +bb.siggen.SignatureGeneratorOEBasic = SignatureGeneratorOEBasic +bb.siggen.SignatureGeneratorOEBasicHash = SignatureGeneratorOEBasicHash + + +def find_siginfo(pn, taskname, taskhashlist, d): + """ Find signature data files for comparison purposes """ + + import fnmatch + import glob + + if taskhashlist: + hashfiles = {} + + if not taskname: + # We have to derive pn and taskname + key = pn + splitit = key.split('.bb.') + taskname = splitit[1] + pn = os.path.basename(splitit[0]).split('_')[0] + if key.startswith('virtual:native:'): + pn = pn + '-native' + + if taskname in ['do_fetch', 'do_unpack', 'do_patch', 'do_populate_lic']: + pn.replace("-native", "") + + filedates = {} + + # First search in stamps dir + localdata = d.createCopy() + localdata.setVar('MULTIMACH_TARGET_SYS', '*') + localdata.setVar('PN', pn) + localdata.setVar('PV', '*') + localdata.setVar('PR', '*') + localdata.setVar('EXTENDPE', '') + stamp = localdata.getVar('STAMP', True) + filespec = '%s.%s.sigdata.*' % (stamp, taskname) + foundall = False + import glob + for fullpath in glob.glob(filespec): + match = False + if taskhashlist: + for taskhash in taskhashlist: + if fullpath.endswith('.%s' % taskhash): + hashfiles[taskhash] = fullpath + if len(hashfiles) == len(taskhashlist): + foundall = True + break + else: + filedates[fullpath] = os.stat(fullpath).st_mtime + + if not taskhashlist or (len(filedates) < 2 and not foundall): + # That didn't work, look in sstate-cache + hashes = taskhashlist or ['*'] + localdata = bb.data.createCopy(d) + for hashval in hashes: + localdata.setVar('PACKAGE_ARCH', '*') + localdata.setVar('TARGET_VENDOR', '*') + localdata.setVar('TARGET_OS', '*') + localdata.setVar('PN', pn) + localdata.setVar('PV', '*') + localdata.setVar('PR', '*') + localdata.setVar('BB_TASKHASH', hashval) + if pn.endswith('-native') or pn.endswith('-crosssdk') or pn.endswith('-cross'): + localdata.setVar('SSTATE_EXTRAPATH', "${NATIVELSBSTRING}/") + sstatename = taskname[3:] + filespec = '%s_%s.*.siginfo' % (localdata.getVar('SSTATE_PKG', True), sstatename) + + if hashval != '*': + sstatedir = "%s/%s" % (d.getVar('SSTATE_DIR', True), hashval[:2]) + else: + sstatedir = d.getVar('SSTATE_DIR', True) + + for root, dirs, files in os.walk(sstatedir): + for fn in files: + fullpath = os.path.join(root, fn) + if fnmatch.fnmatch(fullpath, filespec): + if taskhashlist: + hashfiles[hashval] = fullpath + else: + filedates[fullpath] = os.stat(fullpath).st_mtime + + if taskhashlist: + return hashfiles + else: + return filedates + +bb.siggen.find_siginfo = find_siginfo diff --git a/meta/lib/oe/terminal.py b/meta/lib/oe/terminal.py new file mode 100644 index 0000000000..a33abd733d --- /dev/null +++ b/meta/lib/oe/terminal.py @@ -0,0 +1,218 @@ +import logging +import oe.classutils +import shlex +from bb.process import Popen, ExecutionError + +logger = logging.getLogger('BitBake.OE.Terminal') + + +class UnsupportedTerminal(Exception): + pass + +class NoSupportedTerminals(Exception): + pass + + +class Registry(oe.classutils.ClassRegistry): + command = None + + def __init__(cls, name, bases, attrs): + super(Registry, cls).__init__(name.lower(), bases, attrs) + + @property + def implemented(cls): + return bool(cls.command) + + +class Terminal(Popen): + __metaclass__ = Registry + + def __init__(self, sh_cmd, title=None, env=None, d=None): + fmt_sh_cmd = self.format_command(sh_cmd, title) + try: + Popen.__init__(self, fmt_sh_cmd, env=env) + except OSError as exc: + import errno + if exc.errno == errno.ENOENT: + raise UnsupportedTerminal(self.name) + else: + raise + + def format_command(self, sh_cmd, title): + fmt = {'title': title or 'Terminal', 'command': sh_cmd} + if isinstance(self.command, basestring): + return shlex.split(self.command.format(**fmt)) + else: + return [element.format(**fmt) for element in self.command] + +class XTerminal(Terminal): + def __init__(self, sh_cmd, title=None, env=None, d=None): + Terminal.__init__(self, sh_cmd, title, env, d) + if not os.environ.get('DISPLAY'): + raise UnsupportedTerminal(self.name) + +class Gnome(XTerminal): + command = 'gnome-terminal -t "{title}" -x {command}' + priority = 2 + +class Mate(XTerminal): + command = 'mate-terminal -t "{title}" -x {command}' + priority = 2 + +class Xfce(XTerminal): + command = 'Terminal -T "{title}" -e "{command}"' + priority = 2 + + def __init__(self, command, title=None, env=None, d=None): + # Upstream binary name is Terminal but Debian/Ubuntu use + # xfce4-terminal to avoid possible(?) conflicts + distro = distro_name() + if distro == 'ubuntu' or distro == 'debian': + cmd = 'xfce4-terminal -T "{title}" -e "{command}"' + else: + cmd = command + XTerminal.__init__(self, cmd, title, env, d) + +class Konsole(XTerminal): + command = 'konsole -T "{title}" -e {command}' + priority = 2 + + def __init__(self, sh_cmd, title=None, env=None, d=None): + # Check version + vernum = check_konsole_version("konsole") + if vernum: + if vernum.split('.')[0] == "2": + logger.debug(1, 'Konsole from KDE 4.x will not work as devshell, skipping') + raise UnsupportedTerminal(self.name) + XTerminal.__init__(self, sh_cmd, title, env, d) + +class XTerm(XTerminal): + command = 'xterm -T "{title}" -e {command}' + priority = 1 + +class Rxvt(XTerminal): + command = 'rxvt -T "{title}" -e {command}' + priority = 1 + +class Screen(Terminal): + command = 'screen -D -m -t "{title}" -S devshell {command}' + + def __init__(self, sh_cmd, title=None, env=None, d=None): + s_id = "devshell_%i" % os.getpid() + self.command = "screen -D -m -t \"{title}\" -S %s {command}" % s_id + Terminal.__init__(self, sh_cmd, title, env, d) + msg = 'Screen started. Please connect in another terminal with ' \ + '"screen -r %s"' % s_id + if (d): + bb.event.fire(bb.event.LogExecTTY(msg, "screen -r %s" % s_id, + 0.5, 10), d) + else: + logger.warn(msg) + +class TmuxRunning(Terminal): + """Open a new pane in the current running tmux window""" + name = 'tmux-running' + command = 'tmux split-window "{command}"' + priority = 2.75 + + def __init__(self, sh_cmd, title=None, env=None, d=None): + if not bb.utils.which(os.getenv('PATH'), 'tmux'): + raise UnsupportedTerminal('tmux is not installed') + + if not os.getenv('TMUX'): + raise UnsupportedTerminal('tmux is not running') + + Terminal.__init__(self, sh_cmd, title, env, d) + +class Tmux(Terminal): + """Start a new tmux session and window""" + command = 'tmux new -d -s devshell -n devshell "{command}"' + priority = 0.75 + + def __init__(self, sh_cmd, title=None, env=None, d=None): + if not bb.utils.which(os.getenv('PATH'), 'tmux'): + raise UnsupportedTerminal('tmux is not installed') + + # TODO: consider using a 'devshell' session shared amongst all + # devshells, if it's already there, add a new window to it. + window_name = 'devshell-%i' % os.getpid() + + self.command = 'tmux new -d -s {0} -n {0} "{{command}}"'.format(window_name) + Terminal.__init__(self, sh_cmd, title, env, d) + + attach_cmd = 'tmux att -t {0}'.format(window_name) + msg = 'Tmux started. Please connect in another terminal with `tmux att -t {0}`'.format(window_name) + if d: + bb.event.fire(bb.event.LogExecTTY(msg, attach_cmd, 0.5, 10), d) + else: + logger.warn(msg) + +class Custom(Terminal): + command = 'false' # This is a placeholder + priority = 3 + + def __init__(self, sh_cmd, title=None, env=None, d=None): + self.command = d and d.getVar('OE_TERMINAL_CUSTOMCMD', True) + if self.command: + if not '{command}' in self.command: + self.command += ' {command}' + Terminal.__init__(self, sh_cmd, title, env, d) + logger.warn('Custom terminal was started.') + else: + logger.debug(1, 'No custom terminal (OE_TERMINAL_CUSTOMCMD) set') + raise UnsupportedTerminal('OE_TERMINAL_CUSTOMCMD not set') + + +def prioritized(): + return Registry.prioritized() + +def spawn_preferred(sh_cmd, title=None, env=None, d=None): + """Spawn the first supported terminal, by priority""" + for terminal in prioritized(): + try: + spawn(terminal.name, sh_cmd, title, env, d) + break + except UnsupportedTerminal: + continue + else: + raise NoSupportedTerminals() + +def spawn(name, sh_cmd, title=None, env=None, d=None): + """Spawn the specified terminal, by name""" + logger.debug(1, 'Attempting to spawn terminal "%s"', name) + try: + terminal = Registry.registry[name] + except KeyError: + raise UnsupportedTerminal(name) + + pipe = terminal(sh_cmd, title, env, d) + output = pipe.communicate()[0] + if pipe.returncode != 0: + raise ExecutionError(sh_cmd, pipe.returncode, output) + +def check_konsole_version(konsole): + import subprocess as sub + try: + p = sub.Popen(['sh', '-c', '%s --version' % konsole],stdout=sub.PIPE,stderr=sub.PIPE) + out, err = p.communicate() + ver_info = out.rstrip().split('\n') + except OSError as exc: + import errno + if exc.errno == errno.ENOENT: + return None + else: + raise + vernum = None + for ver in ver_info: + if ver.startswith('Konsole'): + vernum = ver.split(' ')[-1] + return vernum + +def distro_name(): + try: + p = Popen(['lsb_release', '-i']) + out, err = p.communicate() + distro = out.split(':')[1].strip().lower() + except: + distro = "unknown" + return distro diff --git a/meta/lib/oe/tests/__init__.py b/meta/lib/oe/tests/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/meta/lib/oe/tests/test_license.py b/meta/lib/oe/tests/test_license.py new file mode 100644 index 0000000000..c388886184 --- /dev/null +++ b/meta/lib/oe/tests/test_license.py @@ -0,0 +1,68 @@ +import unittest +import oe.license + +class SeenVisitor(oe.license.LicenseVisitor): + def __init__(self): + self.seen = [] + oe.license.LicenseVisitor.__init__(self) + + def visit_Str(self, node): + self.seen.append(node.s) + +class TestSingleLicense(unittest.TestCase): + licenses = [ + "GPLv2", + "LGPL-2.0", + "Artistic", + "MIT", + "GPLv3+", + "FOO_BAR", + ] + invalid_licenses = ["GPL/BSD"] + + @staticmethod + def parse(licensestr): + visitor = SeenVisitor() + visitor.visit_string(licensestr) + return visitor.seen + + def test_single_licenses(self): + for license in self.licenses: + licenses = self.parse(license) + self.assertListEqual(licenses, [license]) + + def test_invalid_licenses(self): + for license in self.invalid_licenses: + with self.assertRaises(oe.license.InvalidLicense) as cm: + self.parse(license) + self.assertEqual(cm.exception.license, license) + +class TestSimpleCombinations(unittest.TestCase): + tests = { + "FOO&BAR": ["FOO", "BAR"], + "BAZ & MOO": ["BAZ", "MOO"], + "ALPHA|BETA": ["ALPHA"], + "BAZ&MOO|FOO": ["FOO"], + "FOO&BAR|BAZ": ["FOO", "BAR"], + } + preferred = ["ALPHA", "FOO", "BAR"] + + def test_tests(self): + def choose(a, b): + if all(lic in self.preferred for lic in b): + return b + else: + return a + + for license, expected in self.tests.items(): + licenses = oe.license.flattened_licenses(license, choose) + self.assertListEqual(licenses, expected) + +class TestComplexCombinations(TestSimpleCombinations): + tests = { + "FOO & (BAR | BAZ)&MOO": ["FOO", "BAR", "MOO"], + "(ALPHA|(BETA&THETA)|OMEGA)&DELTA": ["OMEGA", "DELTA"], + "((ALPHA|BETA)&FOO)|BAZ": ["BETA", "FOO"], + "(GPL-2.0|Proprietary)&BSD-4-clause&MIT": ["GPL-2.0", "BSD-4-clause", "MIT"], + } + preferred = ["BAR", "OMEGA", "BETA", "GPL-2.0"] diff --git a/meta/lib/oe/tests/test_path.py b/meta/lib/oe/tests/test_path.py new file mode 100644 index 0000000000..3d41ce157a --- /dev/null +++ b/meta/lib/oe/tests/test_path.py @@ -0,0 +1,89 @@ +import unittest +import oe, oe.path +import tempfile +import os +import errno +import shutil + +class TestRealPath(unittest.TestCase): + DIRS = [ "a", "b", "etc", "sbin", "usr", "usr/bin", "usr/binX", "usr/sbin", "usr/include", "usr/include/gdbm" ] + FILES = [ "etc/passwd", "b/file" ] + LINKS = [ + ( "bin", "/usr/bin", "/usr/bin" ), + ( "binX", "usr/binX", "/usr/binX" ), + ( "c", "broken", "/broken" ), + ( "etc/passwd-1", "passwd", "/etc/passwd" ), + ( "etc/passwd-2", "passwd-1", "/etc/passwd" ), + ( "etc/passwd-3", "/etc/passwd-1", "/etc/passwd" ), + ( "etc/shadow-1", "/etc/shadow", "/etc/shadow" ), + ( "etc/shadow-2", "/etc/shadow-1", "/etc/shadow" ), + ( "prog-A", "bin/prog-A", "/usr/bin/prog-A" ), + ( "prog-B", "/bin/prog-B", "/usr/bin/prog-B" ), + ( "usr/bin/prog-C", "../../sbin/prog-C", "/sbin/prog-C" ), + ( "usr/bin/prog-D", "/sbin/prog-D", "/sbin/prog-D" ), + ( "usr/binX/prog-E", "../sbin/prog-E", None ), + ( "usr/bin/prog-F", "../../../sbin/prog-F", "/sbin/prog-F" ), + ( "loop", "a/loop", None ), + ( "a/loop", "../loop", None ), + ( "b/test", "file/foo", "/b/file/foo" ), + ] + + LINKS_PHYS = [ + ( "./", "/", "" ), + ( "binX/prog-E", "/usr/sbin/prog-E", "/sbin/prog-E" ), + ] + + EXCEPTIONS = [ + ( "loop", errno.ELOOP ), + ( "b/test", errno.ENOENT ), + ] + + def __del__(self): + try: + #os.system("tree -F %s" % self.tmpdir) + shutil.rmtree(self.tmpdir) + except: + pass + + def setUp(self): + self.tmpdir = tempfile.mkdtemp(prefix = "oe-test_path") + self.root = os.path.join(self.tmpdir, "R") + + os.mkdir(os.path.join(self.tmpdir, "_real")) + os.symlink("_real", self.root) + + for d in self.DIRS: + os.mkdir(os.path.join(self.root, d)) + for f in self.FILES: + file(os.path.join(self.root, f), "w") + for l in self.LINKS: + os.symlink(l[1], os.path.join(self.root, l[0])) + + def __realpath(self, file, use_physdir, assume_dir = True): + return oe.path.realpath(os.path.join(self.root, file), self.root, + use_physdir, assume_dir = assume_dir) + + def test_norm(self): + for l in self.LINKS: + if l[2] == None: + continue + + target_p = self.__realpath(l[0], True) + target_l = self.__realpath(l[0], False) + + if l[2] != False: + self.assertEqual(target_p, target_l) + self.assertEqual(l[2], target_p[len(self.root):]) + + def test_phys(self): + for l in self.LINKS_PHYS: + target_p = self.__realpath(l[0], True) + target_l = self.__realpath(l[0], False) + + self.assertEqual(l[1], target_p[len(self.root):]) + self.assertEqual(l[2], target_l[len(self.root):]) + + def test_loop(self): + for e in self.EXCEPTIONS: + self.assertRaisesRegexp(OSError, r'\[Errno %u\]' % e[1], + self.__realpath, e[0], False, False) diff --git a/meta/lib/oe/tests/test_types.py b/meta/lib/oe/tests/test_types.py new file mode 100644 index 0000000000..367cc30e45 --- /dev/null +++ b/meta/lib/oe/tests/test_types.py @@ -0,0 +1,62 @@ +import unittest +from oe.maketype import create, factory + +class TestTypes(unittest.TestCase): + def assertIsInstance(self, obj, cls): + return self.assertTrue(isinstance(obj, cls)) + + def assertIsNot(self, obj, other): + return self.assertFalse(obj is other) + + def assertFactoryCreated(self, value, type, **flags): + cls = factory(type) + self.assertIsNot(cls, None) + self.assertIsInstance(create(value, type, **flags), cls) + +class TestBooleanType(TestTypes): + def test_invalid(self): + self.assertRaises(ValueError, create, '', 'boolean') + self.assertRaises(ValueError, create, 'foo', 'boolean') + self.assertRaises(TypeError, create, object(), 'boolean') + + def test_true(self): + self.assertTrue(create('y', 'boolean')) + self.assertTrue(create('yes', 'boolean')) + self.assertTrue(create('1', 'boolean')) + self.assertTrue(create('t', 'boolean')) + self.assertTrue(create('true', 'boolean')) + self.assertTrue(create('TRUE', 'boolean')) + self.assertTrue(create('truE', 'boolean')) + + def test_false(self): + self.assertFalse(create('n', 'boolean')) + self.assertFalse(create('no', 'boolean')) + self.assertFalse(create('0', 'boolean')) + self.assertFalse(create('f', 'boolean')) + self.assertFalse(create('false', 'boolean')) + self.assertFalse(create('FALSE', 'boolean')) + self.assertFalse(create('faLse', 'boolean')) + + def test_bool_equality(self): + self.assertEqual(create('n', 'boolean'), False) + self.assertNotEqual(create('n', 'boolean'), True) + self.assertEqual(create('y', 'boolean'), True) + self.assertNotEqual(create('y', 'boolean'), False) + +class TestList(TestTypes): + def assertListEqual(self, value, valid, sep=None): + obj = create(value, 'list', separator=sep) + self.assertEqual(obj, valid) + if sep is not None: + self.assertEqual(obj.separator, sep) + self.assertEqual(str(obj), obj.separator.join(obj)) + + def test_list_nosep(self): + testlist = ['alpha', 'beta', 'theta'] + self.assertListEqual('alpha beta theta', testlist) + self.assertListEqual('alpha beta\ttheta', testlist) + self.assertListEqual('alpha', ['alpha']) + + def test_list_usersep(self): + self.assertListEqual('foo:bar', ['foo', 'bar'], ':') + self.assertListEqual('foo:bar:baz', ['foo', 'bar', 'baz'], ':') diff --git a/meta/lib/oe/tests/test_utils.py b/meta/lib/oe/tests/test_utils.py new file mode 100644 index 0000000000..5d9ac52e7d --- /dev/null +++ b/meta/lib/oe/tests/test_utils.py @@ -0,0 +1,51 @@ +import unittest +from oe.utils import packages_filter_out_system + +class TestPackagesFilterOutSystem(unittest.TestCase): + def test_filter(self): + """ + Test that oe.utils.packages_filter_out_system works. + """ + try: + import bb + except ImportError: + self.skipTest("Cannot import bb") + + d = bb.data_smart.DataSmart() + d.setVar("PN", "foo") + + d.setVar("PACKAGES", "foo foo-doc foo-dev") + pkgs = packages_filter_out_system(d) + self.assertEqual(pkgs, []) + + d.setVar("PACKAGES", "foo foo-doc foo-data foo-dev") + pkgs = packages_filter_out_system(d) + self.assertEqual(pkgs, ["foo-data"]) + + d.setVar("PACKAGES", "foo foo-locale-en-gb") + pkgs = packages_filter_out_system(d) + self.assertEqual(pkgs, []) + + d.setVar("PACKAGES", "foo foo-data foo-locale-en-gb") + pkgs = packages_filter_out_system(d) + self.assertEqual(pkgs, ["foo-data"]) + + +class TestTrimVersion(unittest.TestCase): + def test_version_exception(self): + with self.assertRaises(TypeError): + trim_version(None, 2) + with self.assertRaises(TypeError): + trim_version((1, 2, 3), 2) + + def test_num_exception(self): + with self.assertRaises(ValueError): + trim_version("1.2.3", 0) + with self.assertRaises(ValueError): + trim_version("1.2.3", -1) + + def test_valid(self): + self.assertEqual(trim_version("1.2.3", 1), "1") + self.assertEqual(trim_version("1.2.3", 2), "1.2") + self.assertEqual(trim_version("1.2.3", 3), "1.2.3") + self.assertEqual(trim_version("1.2.3", 4), "1.2.3") diff --git a/meta/lib/oe/types.py b/meta/lib/oe/types.py new file mode 100644 index 0000000000..7f47c17d0e --- /dev/null +++ b/meta/lib/oe/types.py @@ -0,0 +1,153 @@ +import errno +import re +import os + + +class OEList(list): + """OpenEmbedded 'list' type + + Acts as an ordinary list, but is constructed from a string value and a + separator (optional), and re-joins itself when converted to a string with + str(). Set the variable type flag to 'list' to use this type, and the + 'separator' flag may be specified (defaulting to whitespace).""" + + name = "list" + + def __init__(self, value, separator = None): + if value is not None: + list.__init__(self, value.split(separator)) + else: + list.__init__(self) + + if separator is None: + self.separator = " " + else: + self.separator = separator + + def __str__(self): + return self.separator.join(self) + +def choice(value, choices): + """OpenEmbedded 'choice' type + + Acts as a multiple choice for the user. To use this, set the variable + type flag to 'choice', and set the 'choices' flag to a space separated + list of valid values.""" + if not isinstance(value, basestring): + raise TypeError("choice accepts a string, not '%s'" % type(value)) + + value = value.lower() + choices = choices.lower() + if value not in choices.split(): + raise ValueError("Invalid choice '%s'. Valid choices: %s" % + (value, choices)) + return value + +class NoMatch(object): + """Stub python regex pattern object which never matches anything""" + def findall(self, string, flags=0): + return None + + def finditer(self, string, flags=0): + return None + + def match(self, flags=0): + return None + + def search(self, string, flags=0): + return None + + def split(self, string, maxsplit=0): + return None + + def sub(pattern, repl, string, count=0): + return None + + def subn(pattern, repl, string, count=0): + return None + +NoMatch = NoMatch() + +def regex(value, regexflags=None): + """OpenEmbedded 'regex' type + + Acts as a regular expression, returning the pre-compiled regular + expression pattern object. To use this type, set the variable type flag + to 'regex', and optionally, set the 'regexflags' type to a space separated + list of the flags to control the regular expression matching (e.g. + FOO[regexflags] += 'ignorecase'). See the python documentation on the + 're' module for a list of valid flags.""" + + flagval = 0 + if regexflags: + for flag in regexflags.split(): + flag = flag.upper() + try: + flagval |= getattr(re, flag) + except AttributeError: + raise ValueError("Invalid regex flag '%s'" % flag) + + if not value: + # Let's ensure that the default behavior for an undefined or empty + # variable is to match nothing. If the user explicitly wants to match + # anything, they can match '.*' instead. + return NoMatch + + try: + return re.compile(value, flagval) + except re.error as exc: + raise ValueError("Invalid regex value '%s': %s" % + (value, exc.args[0])) + +def boolean(value): + """OpenEmbedded 'boolean' type + + Valid values for true: 'yes', 'y', 'true', 't', '1' + Valid values for false: 'no', 'n', 'false', 'f', '0' + """ + + if not isinstance(value, basestring): + raise TypeError("boolean accepts a string, not '%s'" % type(value)) + + value = value.lower() + if value in ('yes', 'y', 'true', 't', '1'): + return True + elif value in ('no', 'n', 'false', 'f', '0'): + return False + raise ValueError("Invalid boolean value '%s'" % value) + +def integer(value, numberbase=10): + """OpenEmbedded 'integer' type + + Defaults to base 10, but this can be specified using the optional + 'numberbase' flag.""" + + return int(value, int(numberbase)) + +_float = float +def float(value, fromhex='false'): + """OpenEmbedded floating point type + + To use this type, set the type flag to 'float', and optionally set the + 'fromhex' flag to a true value (obeying the same rules as for the + 'boolean' type) if the value is in base 16 rather than base 10.""" + + if boolean(fromhex): + return _float.fromhex(value) + else: + return _float(value) + +def path(value, relativeto='', normalize='true', mustexist='false'): + value = os.path.join(relativeto, value) + + if boolean(normalize): + value = os.path.normpath(value) + + if boolean(mustexist): + try: + open(value, 'r') + except IOError as exc: + if exc.errno == errno.ENOENT: + raise ValueError("{0}: {1}".format(value, os.strerror(errno.ENOENT))) + + return value diff --git a/meta/lib/oe/utils.py b/meta/lib/oe/utils.py new file mode 100644 index 0000000000..defa53679b --- /dev/null +++ b/meta/lib/oe/utils.py @@ -0,0 +1,166 @@ +try: + # Python 2 + import commands as cmdstatus +except ImportError: + # Python 3 + import subprocess as cmdstatus + +def read_file(filename): + try: + f = open( filename, "r" ) + except IOError as reason: + return "" # WARNING: can't raise an error now because of the new RDEPENDS handling. This is a bit ugly. :M: + else: + data = f.read().strip() + f.close() + return data + return None + +def ifelse(condition, iftrue = True, iffalse = False): + if condition: + return iftrue + else: + return iffalse + +def conditional(variable, checkvalue, truevalue, falsevalue, d): + if d.getVar(variable,1) == checkvalue: + return truevalue + else: + return falsevalue + +def less_or_equal(variable, checkvalue, truevalue, falsevalue, d): + if float(d.getVar(variable,1)) <= float(checkvalue): + return truevalue + else: + return falsevalue + +def version_less_or_equal(variable, checkvalue, truevalue, falsevalue, d): + result = bb.utils.vercmp_string(d.getVar(variable,True), checkvalue) + if result <= 0: + return truevalue + else: + return falsevalue + +def contains(variable, checkvalues, truevalue, falsevalue, d): + val = d.getVar(variable, True) + if not val: + return falsevalue + val = set(val.split()) + if isinstance(checkvalues, basestring): + checkvalues = set(checkvalues.split()) + else: + checkvalues = set(checkvalues) + if checkvalues.issubset(val): + return truevalue + return falsevalue + +def both_contain(variable1, variable2, checkvalue, d): + if d.getVar(variable1,1).find(checkvalue) != -1 and d.getVar(variable2,1).find(checkvalue) != -1: + return checkvalue + else: + return "" + +def prune_suffix(var, suffixes, d): + # See if var ends with any of the suffixes listed and + # remove it if found + for suffix in suffixes: + if var.endswith(suffix): + var = var.replace(suffix, "") + + prefix = d.getVar("MLPREFIX", True) + if prefix and var.startswith(prefix): + var = var.replace(prefix, "") + + return var + +def str_filter(f, str, d): + from re import match + return " ".join(filter(lambda x: match(f, x, 0), str.split())) + +def str_filter_out(f, str, d): + from re import match + return " ".join(filter(lambda x: not match(f, x, 0), str.split())) + +def param_bool(cfg, field, dflt = None): + """Lookup in map and convert it to a boolean; take + when this does not exist""" + value = cfg.get(field, dflt) + strvalue = str(value).lower() + if strvalue in ('yes', 'y', 'true', 't', '1'): + return True + elif strvalue in ('no', 'n', 'false', 'f', '0'): + return False + raise ValueError("invalid value for boolean parameter '%s': '%s'" % (field, value)) + +def inherits(d, *classes): + """Return True if the metadata inherits any of the specified classes""" + return any(bb.data.inherits_class(cls, d) for cls in classes) + +def features_backfill(var,d): + # This construct allows the addition of new features to variable specified + # as var + # Example for var = "DISTRO_FEATURES" + # This construct allows the addition of new features to DISTRO_FEATURES + # that if not present would disable existing functionality, without + # disturbing distributions that have already set DISTRO_FEATURES. + # Distributions wanting to elide a value in DISTRO_FEATURES_BACKFILL should + # add the feature to DISTRO_FEATURES_BACKFILL_CONSIDERED + features = (d.getVar(var, True) or "").split() + backfill = (d.getVar(var+"_BACKFILL", True) or "").split() + considered = (d.getVar(var+"_BACKFILL_CONSIDERED", True) or "").split() + + addfeatures = [] + for feature in backfill: + if feature not in features and feature not in considered: + addfeatures.append(feature) + + if addfeatures: + d.appendVar(var, " " + " ".join(addfeatures)) + + +def packages_filter_out_system(d): + """ + Return a list of packages from PACKAGES with the "system" packages such as + PN-dbg PN-doc PN-locale-eb-gb removed. + """ + pn = d.getVar('PN', True) + blacklist = map(lambda suffix: pn + suffix, ('', '-dbg', '-dev', '-doc', '-locale', '-staticdev')) + localepkg = pn + "-locale-" + pkgs = [] + + for pkg in d.getVar('PACKAGES', True).split(): + if pkg not in blacklist and localepkg not in pkg: + pkgs.append(pkg) + return pkgs + +def getstatusoutput(cmd): + return cmdstatus.getstatusoutput(cmd) + + +def trim_version(version, num_parts=2): + """ + Return just the first of , split by periods. For + example, trim_version("1.2.3", 2) will return "1.2". + """ + if type(version) is not str: + raise TypeError("Version should be a string") + if num_parts < 1: + raise ValueError("Cannot split to parts < 1") + + parts = version.split(".") + trimmed = ".".join(parts[:num_parts]) + return trimmed + +def cpu_count(): + import multiprocessing + return multiprocessing.cpu_count() + +def execute_pre_post_process(d, cmds): + if cmds is None: + return + + for cmd in cmds.strip().split(';'): + cmd = cmd.strip() + if cmd != '': + bb.note("Executing %s ..." % cmd) + bb.build.exec_func(cmd, d) diff --git a/meta/lib/oeqa/__init__.py b/meta/lib/oeqa/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/meta/lib/oeqa/controllers/__init__.py b/meta/lib/oeqa/controllers/__init__.py new file mode 100644 index 0000000000..8eda92763c --- /dev/null +++ b/meta/lib/oeqa/controllers/__init__.py @@ -0,0 +1,3 @@ +# Enable other layers to have modules in the same named directory +from pkgutil import extend_path +__path__ = extend_path(__path__, __name__) diff --git a/meta/lib/oeqa/controllers/masterimage.py b/meta/lib/oeqa/controllers/masterimage.py new file mode 100644 index 0000000000..188c630bcd --- /dev/null +++ b/meta/lib/oeqa/controllers/masterimage.py @@ -0,0 +1,133 @@ +import os +import bb +import traceback +import time + +import oeqa.targetcontrol +import oeqa.utils.sshcontrol as sshcontrol +import oeqa.utils.commands as commands + +class GummibootTarget(oeqa.targetcontrol.SimpleRemoteTarget): + + def __init__(self, d): + # let our base class do the ip thing + super(GummibootTarget, self).__init__(d) + + # test rootfs + kernel + self.rootfs = os.path.join(d.getVar("DEPLOY_DIR_IMAGE", True), d.getVar("IMAGE_LINK_NAME", True) + '.tar.gz') + self.kernel = os.path.join(d.getVar("DEPLOY_DIR_IMAGE", True), d.getVar("KERNEL_IMAGETYPE")) + if not os.path.isfile(self.rootfs): + # we could've checked that IMAGE_FSTYPES contains tar.gz but the config for running testimage might not be + # the same as the config with which the image was build, ie + # you bitbake core-image-sato with IMAGE_FSTYPES += "tar.gz" + # and your autobuilder overwrites the config, adds the test bits and runs bitbake core-image-sato -c testimage + bb.fatal("No rootfs found. Did you build the image ?\nIf yes, did you build it with IMAGE_FSTYPES += \"tar.gz\" ? \ + \nExpected path: %s" % self.rootfs) + if not os.path.isfile(self.kernel): + bb.fatal("No kernel found. Expected path: %s" % self.kernel) + + # if the user knows what he's doing, then by all means... + # test-rootfs.tar.gz and test-kernel are hardcoded names in other places + # they really have to be used like that in commands though + cmds = d.getVar("TEST_DEPLOY_CMDS", True) + + # this the value we need to set in the LoaderEntryOneShot EFI variable + # so the system boots the 'test' bootloader label and not the default + # The first four bytes are EFI bits, and the rest is an utf-16le string + # (EFI vars values need to be utf-16) + # $ echo -en "test\0" | iconv -f ascii -t utf-16le | hexdump -C + # 00000000 74 00 65 00 73 00 74 00 00 00 |t.e.s.t...| + self.efivarvalue = r'\x07\x00\x00\x00\x74\x00\x65\x00\x73\x00\x74\x00\x00\x00' + + if cmds: + self.deploy_cmds = cmds.split("\n") + else: + self.deploy_cmds = [ + 'mount -L boot /boot', + 'mkdir -p /mnt/testrootfs', + 'mount -L testrootfs /mnt/testrootfs', + 'modprobe efivarfs', + 'mount -t efivarfs efivarfs /sys/firmware/efi/efivars', + 'cp ~/test-kernel /boot', + 'rm -rf /mnt/testrootfs/*', + 'tar xzvf ~/test-rootfs.tar.gz -C /mnt/testrootfs', + 'printf "%s" > /sys/firmware/efi/efivars/LoaderEntryOneShot-4a67b082-0a4c-41cf-b6c7-440b29bb8c4f' % self.efivarvalue + ] + + # master ssh connection + self.master = None + + # this is the name of the command that controls the power for a board + # e.g: TEST_POWERCONTROL_CMD = "/home/user/myscripts/powercontrol.py ${MACHINE} what-ever-other-args-the-script-wants" + # the command should take as the last argument "off" and "on" and "cycle" (off, on) + self.powercontrol_cmd = d.getVar("TEST_POWERCONTROL_CMD", True) or None + self.powercontrol_args = d.getVar("TEST_POWERCONTROL_EXTRA_ARGS") or "" + self.origenv = os.environ + if self.powercontrol_cmd: + if self.powercontrol_args: + self.powercontrol_cmd = "%s %s" % (self.powercontrol_cmd, self.powercontrol_args) + # the external script for controlling power might use ssh + # ssh + keys means we need the original user env + bborigenv = d.getVar("BB_ORIGENV", False) or {} + for key in bborigenv: + val = bborigenv.getVar(key, True) + if val is not None: + self.origenv[key] = str(val) + self.power_ctl("on") + + def power_ctl(self, msg): + if self.powercontrol_cmd: + cmd = "%s %s" % (self.powercontrol_cmd, msg) + commands.runCmd(cmd, preexec_fn=os.setsid, env=self.origenv) + + def power_cycle(self, conn): + if self.powercontrol_cmd: + # be nice, don't just cut power + conn.run("shutdown -h now") + time.sleep(10) + self.power_ctl("cycle") + else: + status, output = conn.run("reboot") + if status != 0: + bb.error("Failed rebooting target and no power control command defined. You need to manually reset the device.\n%s" % output) + + def deploy(self): + bb.plain("%s - deploying image on target" % self.pn) + # base class just sets the ssh log file for us + super(GummibootTarget, self).deploy() + self.master = sshcontrol.SSHControl(ip=self.ip, logfile=self.sshlog, timeout=600, port=self.port) + try: + self._deploy() + except Exception as e: + bb.fatal("Failed deploying test image: %s" % e) + + def _deploy(self): + # make sure we are in the right image + status, output = self.master.run("cat /etc/masterimage") + if status != 0: + raise Exception("No ssh connectivity or target isn't running a master image.\n%s" % output) + + # make sure these aren't mounted + self.master.run("umount /boot; umount /mnt/testrootfs; umount /sys/firmware/efi/efivars;") + + # from now on, every deploy cmd should return 0 + # else an exception will be thrown by sshcontrol + self.master.ignore_status = False + self.master.copy_to(self.rootfs, "~/test-rootfs.tar.gz") + self.master.copy_to(self.kernel, "~/test-kernel") + for cmd in self.deploy_cmds: + self.master.run(cmd) + + + def start(self, params=None): + bb.plain("%s - boot test image on target" % self.pn) + self.power_cycle(self.master) + # there are better ways than a timeout but this should work for now + time.sleep(120) + # set the ssh object for the target/test image + self.connection = sshcontrol.SSHControl(self.ip, logfile=self.sshlog, port=self.port) + bb.plain("%s - start running tests" % self.pn) + + def stop(self): + bb.plain("%s - reboot/powercycle target" % self.pn) + self.power_cycle(self.connection) diff --git a/meta/lib/oeqa/controllers/testtargetloader.py b/meta/lib/oeqa/controllers/testtargetloader.py new file mode 100644 index 0000000000..019bbfd840 --- /dev/null +++ b/meta/lib/oeqa/controllers/testtargetloader.py @@ -0,0 +1,69 @@ +import types +import bb + +# This class is responsible for loading a test target controller +class TestTargetLoader: + + # Search oeqa.controllers module directory for and return a controller + # corresponding to the given target name. + # AttributeError raised if not found. + # ImportError raised if a provided module can not be imported. + def get_controller_module(self, target, bbpath): + controllerslist = self.get_controller_modulenames(bbpath) + bb.note("Available controller modules: %s" % str(controllerslist)) + controller = self.load_controller_from_name(target, controllerslist) + return controller + + # Return a list of all python modules in lib/oeqa/controllers for each + # layer in bbpath + def get_controller_modulenames(self, bbpath): + + controllerslist = [] + + def add_controller_list(path): + if not os.path.exists(os.path.join(path, '__init__.py')): + bb.fatal('Controllers directory %s exists but is missing __init__.py' % path) + files = sorted([f for f in os.listdir(path) if f.endswith('.py') and not f.startswith('_')]) + for f in files: + module = 'oeqa.controllers.' + f[:-3] + if module not in controllerslist: + controllerslist.append(module) + else: + bb.warn("Duplicate controller module found for %s, only one added. Layers should create unique controller module names" % module) + + for p in bbpath: + controllerpath = os.path.join(p, 'lib', 'oeqa', 'controllers') + bb.debug(2, 'Searching for target controllers in %s' % controllerpath) + if os.path.exists(controllerpath): + add_controller_list(controllerpath) + return controllerslist + + # Search for and return a controller from given target name and + # set of module names. + # Raise AttributeError if not found. + # Raise ImportError if a provided module can not be imported + def load_controller_from_name(self, target, modulenames): + for name in modulenames: + obj = self.load_controller_from_module(target, name) + if obj: + return obj + raise AttributeError("Unable to load {0} from available modules: {1}".format(target, str(modulenames))) + + # Search for and return a controller or None from given module name + def load_controller_from_module(self, target, modulename): + obj = None + # import module, allowing it to raise import exception + module = __import__(modulename, globals(), locals(), [target]) + # look for target class in the module, catching any exceptions as it + # is valid that a module may not have the target class. + try: + obj = getattr(module, target) + if obj: + from oeqa.targetcontrol import BaseTarget + if (not isinstance(obj, (type, types.ClassType))): + bb.warn("Target {0} found, but not of type Class".format(target)) + if( not issubclass(obj, BaseTarget)): + bb.warn("Target {0} found, but subclass is not BaseTarget".format(target)) + except: + obj = None + return obj diff --git a/meta/lib/oeqa/oetest.py b/meta/lib/oeqa/oetest.py new file mode 100644 index 0000000000..0db6cb80a9 --- /dev/null +++ b/meta/lib/oeqa/oetest.py @@ -0,0 +1,107 @@ +# Copyright (C) 2013 Intel Corporation +# +# Released under the MIT license (see COPYING.MIT) + +# Main unittest module used by testimage.bbclass +# This provides the oeRuntimeTest base class which is inherited by all tests in meta/lib/oeqa/runtime. + +# It also has some helper functions and it's responsible for actually starting the tests + +import os, re, mmap +import unittest +import inspect + + +def loadTests(tc): + + # set the context object passed from the test class + setattr(oeTest, "tc", tc) + # set ps command to use + setattr(oeRuntimeTest, "pscmd", "ps -ef" if oeTest.hasPackage("procps") else "ps") + # prepare test suite, loader and runner + suite = unittest.TestSuite() + testloader = unittest.TestLoader() + testloader.sortTestMethodsUsing = None + suite = testloader.loadTestsFromNames(tc.testslist) + + return suite + +def runTests(tc): + + suite = loadTests(tc) + print("Test modules %s" % tc.testslist) + print("Found %s tests" % suite.countTestCases()) + runner = unittest.TextTestRunner(verbosity=2) + result = runner.run(suite) + + return result + + +class oeTest(unittest.TestCase): + + longMessage = True + testFailures = [] + testSkipped = [] + testErrors = [] + + def run(self, result=None): + super(oeTest, self).run(result) + + # we add to our own lists the results, we use those for decorators + if len(result.failures) > len(oeTest.testFailures): + oeTest.testFailures.append(str(result.failures[-1][0]).split()[0]) + if len(result.skipped) > len(oeTest.testSkipped): + oeTest.testSkipped.append(str(result.skipped[-1][0]).split()[0]) + if len(result.errors) > len(oeTest.testErrors): + oeTest.testErrors.append(str(result.errors[-1][0]).split()[0]) + + @classmethod + def hasPackage(self, pkg): + + if re.search(pkg, oeTest.tc.pkgmanifest): + return True + return False + + @classmethod + def hasFeature(self,feature): + + if feature in oeTest.tc.imagefeatures or \ + feature in oeTest.tc.distrofeatures: + return True + else: + return False + + +class oeRuntimeTest(oeTest): + + def __init__(self, methodName='runTest'): + self.target = oeRuntimeTest.tc.target + super(oeRuntimeTest, self).__init__(methodName) + + +def getmodule(pos=2): + # stack returns a list of tuples containg frame information + # First element of the list the is current frame, caller is 1 + frameinfo = inspect.stack()[pos] + modname = inspect.getmodulename(frameinfo[1]) + #modname = inspect.getmodule(frameinfo[0]).__name__ + return modname + +def skipModule(reason, pos=2): + modname = getmodule(pos) + if modname not in oeTest.tc.testsrequired: + raise unittest.SkipTest("%s: %s" % (modname, reason)) + else: + raise Exception("\nTest %s wants to be skipped.\nReason is: %s" \ + "\nTest was required in TEST_SUITES, so either the condition for skipping is wrong" \ + "\nor the image really doesn't have the required feature/package when it should." % (modname, reason)) + +def skipModuleIf(cond, reason): + + if cond: + skipModule(reason, 3) + +def skipModuleUnless(cond, reason): + + if not cond: + skipModule(reason, 3) diff --git a/meta/lib/oeqa/runexported.py b/meta/lib/oeqa/runexported.py new file mode 100755 index 0000000000..e1b6642ec2 --- /dev/null +++ b/meta/lib/oeqa/runexported.py @@ -0,0 +1,140 @@ +#!/usr/bin/env python + + +# Copyright (C) 2013 Intel Corporation +# +# Released under the MIT license (see COPYING.MIT) + +# This script should be used outside of the build system to run image tests. +# It needs a json file as input as exported by the build. +# E.g for an already built image: +#- export the tests: +# TEST_EXPORT_ONLY = "1" +# TEST_TARGET = "simpleremote" +# TEST_TARGET_IP = "192.168.7.2" +# TEST_SERVER_IP = "192.168.7.1" +# bitbake core-image-sato -c testimage +# Setup your target, e.g for qemu: runqemu core-image-sato +# cd build/tmp/testimage/core-image-sato +# ./runexported.py testdata.json + +import sys +import os +import time +from optparse import OptionParser + +try: + import simplejson as json +except ImportError: + import json + +sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "oeqa"))) + +from oeqa.oetest import runTests +from oeqa.utils.sshcontrol import SSHControl + +# this isn't pretty but we need a fake target object +# for running the tests externally as we don't care +# about deploy/start we only care about the connection methods (run, copy) +class FakeTarget(object): + def __init__(self, d): + self.connection = None + self.ip = None + self.server_ip = None + self.datetime = time.strftime('%Y%m%d%H%M%S',time.gmtime()) + self.testdir = d.getVar("TEST_LOG_DIR", True) + self.pn = d.getVar("PN", True) + + def exportStart(self): + self.sshlog = os.path.join(self.testdir, "ssh_target_log.%s" % self.datetime) + sshloglink = os.path.join(self.testdir, "ssh_target_log") + if os.path.islink(sshloglink): + os.unlink(sshloglink) + os.symlink(self.sshlog, sshloglink) + print("SSH log file: %s" % self.sshlog) + self.connection = SSHControl(self.ip, logfile=self.sshlog) + + def run(self, cmd, timeout=None): + return self.connection.run(cmd, timeout) + + def copy_to(self, localpath, remotepath): + return self.connection.copy_to(localpath, remotepath) + + def copy_from(self, remotepath, localpath): + return self.connection.copy_from(remotepath, localpath) + + +class MyDataDict(dict): + def getVar(self, key, unused = None): + return self.get(key, "") + +class TestContext(object): + def __init__(self): + self.d = None + self.target = None + +def main(): + + usage = "usage: %prog [options] " + parser = OptionParser(usage=usage) + parser.add_option("-t", "--target-ip", dest="ip", help="The IP address of the target machine. Use this to \ + overwrite the value determined from TEST_TARGET_IP at build time") + parser.add_option("-s", "--server-ip", dest="server_ip", help="The IP address of this machine. Use this to \ + overwrite the value determined from TEST_SERVER_IP at build time.") + parser.add_option("-d", "--deploy-dir", dest="deploy_dir", help="Full path to the package feeds, that this \ + the contents of what used to be DEPLOY_DIR on the build machine. If not specified it will use the value \ + specified in the json if that directory actually exists or it will error out.") + parser.add_option("-l", "--log-dir", dest="log_dir", help="This sets the path for TEST_LOG_DIR. If not specified \ + the current dir is used. This is used for usually creating a ssh log file and a scp test file.") + + (options, args) = parser.parse_args() + if len(args) != 1: + parser.error("Incorrect number of arguments. The one and only argument should be a json file exported by the build system") + + with open(args[0], "r") as f: + loaded = json.load(f) + + if options.ip: + loaded["target"]["ip"] = options.ip + if options.server_ip: + loaded["target"]["server_ip"] = options.server_ip + + d = MyDataDict() + for key in loaded["d"].keys(): + d[key] = loaded["d"][key] + + if options.log_dir: + d["TEST_LOG_DIR"] = options.log_dir + else: + d["TEST_LOG_DIR"] = os.path.abspath(os.path.dirname(__file__)) + if options.deploy_dir: + d["DEPLOY_DIR"] = options.deploy_dir + else: + if not os.path.isdir(d["DEPLOY_DIR"]): + raise Exception("The path to DEPLOY_DIR does not exists: %s" % d["DEPLOY_DIR"]) + + + target = FakeTarget(d) + for key in loaded["target"].keys(): + setattr(target, key, loaded["target"][key]) + + tc = TestContext() + setattr(tc, "d", d) + setattr(tc, "target", target) + for key in loaded.keys(): + if key != "d" and key != "target": + setattr(tc, key, loaded[key]) + + target.exportStart() + runTests(tc) + + return 0 + +if __name__ == "__main__": + try: + ret = main() + except Exception: + ret = 1 + import traceback + traceback.print_exc(5) + sys.exit(ret) diff --git a/meta/lib/oeqa/runtime/__init__.py b/meta/lib/oeqa/runtime/__init__.py new file mode 100644 index 0000000000..4cf3fa76b6 --- /dev/null +++ b/meta/lib/oeqa/runtime/__init__.py @@ -0,0 +1,3 @@ +# Enable other layers to have tests in the same named directory +from pkgutil import extend_path +__path__ = extend_path(__path__, __name__) diff --git a/meta/lib/oeqa/runtime/buildcvs.py b/meta/lib/oeqa/runtime/buildcvs.py new file mode 100644 index 0000000000..f1fbf19c1f --- /dev/null +++ b/meta/lib/oeqa/runtime/buildcvs.py @@ -0,0 +1,30 @@ +from oeqa.oetest import oeRuntimeTest +from oeqa.utils.decorators import * +from oeqa.utils.targetbuild import TargetBuildProject + +def setUpModule(): + if not oeRuntimeTest.hasFeature("tools-sdk"): + skipModule("Image doesn't have tools-sdk in IMAGE_FEATURES") + +class BuildCvsTest(oeRuntimeTest): + + @classmethod + def setUpClass(self): + self.project = TargetBuildProject(oeRuntimeTest.tc.target, oeRuntimeTest.tc.d, + "http://ftp.gnu.org/non-gnu/cvs/source/feature/1.12.13/cvs-1.12.13.tar.bz2") + self.project.download_archive() + + @skipUnlessPassed("test_ssh") + def test_cvs(self): + self.assertEqual(self.project.run_configure(), 0, + msg="Running configure failed") + + self.assertEqual(self.project.run_make(), 0, + msg="Running make failed") + + self.assertEqual(self.project.run_install(), 0, + msg="Running make install failed") + + @classmethod + def tearDownClass(self): + self.project.clean() diff --git a/meta/lib/oeqa/runtime/buildiptables.py b/meta/lib/oeqa/runtime/buildiptables.py new file mode 100644 index 0000000000..f6061a7f98 --- /dev/null +++ b/meta/lib/oeqa/runtime/buildiptables.py @@ -0,0 +1,30 @@ +from oeqa.oetest import oeRuntimeTest +from oeqa.utils.decorators import * +from oeqa.utils.targetbuild import TargetBuildProject + +def setUpModule(): + if not oeRuntimeTest.hasFeature("tools-sdk"): + skipModule("Image doesn't have tools-sdk in IMAGE_FEATURES") + +class BuildIptablesTest(oeRuntimeTest): + + @classmethod + def setUpClass(self): + self.project = TargetBuildProject(oeRuntimeTest.tc.target, oeRuntimeTest.tc.d, + "http://netfilter.org/projects/iptables/files/iptables-1.4.13.tar.bz2") + self.project.download_archive() + + @skipUnlessPassed("test_ssh") + def test_iptables(self): + self.assertEqual(self.project.run_configure(), 0, + msg="Running configure failed") + + self.assertEqual(self.project.run_make(), 0, + msg="Running make failed") + + self.assertEqual(self.project.run_install(), 0, + msg="Running make install failed") + + @classmethod + def tearDownClass(self): + self.project.clean() diff --git a/meta/lib/oeqa/runtime/buildsudoku.py b/meta/lib/oeqa/runtime/buildsudoku.py new file mode 100644 index 0000000000..a754f1d9ea --- /dev/null +++ b/meta/lib/oeqa/runtime/buildsudoku.py @@ -0,0 +1,27 @@ +from oeqa.oetest import oeRuntimeTest +from oeqa.utils.decorators import * +from oeqa.utils.targetbuild import TargetBuildProject + +def setUpModule(): + if not oeRuntimeTest.hasFeature("tools-sdk"): + skipModule("Image doesn't have tools-sdk in IMAGE_FEATURES") + +class SudokuTest(oeRuntimeTest): + + @classmethod + def setUpClass(self): + self.project = TargetBuildProject(oeRuntimeTest.tc.target, oeRuntimeTest.tc.d, + "http://downloads.sourceforge.net/project/sudoku-savant/sudoku-savant/sudoku-savant-1.3/sudoku-savant-1.3.tar.bz2") + self.project.download_archive() + + @skipUnlessPassed("test_ssh") + def test_sudoku(self): + self.assertEqual(self.project.run_configure(), 0, + msg="Running configure failed") + + self.assertEqual(self.project.run_make(), 0, + msg="Running make failed") + + @classmethod + def tearDownClass(self): + self.project.clean() diff --git a/meta/lib/oeqa/runtime/connman.py b/meta/lib/oeqa/runtime/connman.py new file mode 100644 index 0000000000..c03688206f --- /dev/null +++ b/meta/lib/oeqa/runtime/connman.py @@ -0,0 +1,30 @@ +import unittest +from oeqa.oetest import oeRuntimeTest, skipModule +from oeqa.utils.decorators import * + +def setUpModule(): + if not oeRuntimeTest.hasPackage("connman"): + skipModule("No connman package in image") + + +class ConnmanTest(oeRuntimeTest): + + def service_status(self, service): + if oeRuntimeTest.hasFeature("systemd"): + (status, output) = self.target.run('systemctl status -l %s' % service) + return output + else: + return "Unable to get status or logs for %s" % service + + @skipUnlessPassed('test_ssh') + def test_connmand_help(self): + (status, output) = self.target.run('/usr/sbin/connmand --help') + self.assertEqual(status, 0, msg="status and output: %s and %s" % (status,output)) + + + @skipUnlessPassed('test_connmand_help') + def test_connmand_running(self): + (status, output) = self.target.run(oeRuntimeTest.pscmd + ' | grep [c]onnmand') + if status != 0: + print self.service_status("connman") + self.fail("No connmand process running") diff --git a/meta/lib/oeqa/runtime/date.py b/meta/lib/oeqa/runtime/date.py new file mode 100644 index 0000000000..a208e29ada --- /dev/null +++ b/meta/lib/oeqa/runtime/date.py @@ -0,0 +1,22 @@ +from oeqa.oetest import oeRuntimeTest +from oeqa.utils.decorators import * +import re + +class DateTest(oeRuntimeTest): + + @skipUnlessPassed("test_ssh") + def test_date(self): + (status, output) = self.target.run('date +"%Y-%m-%d %T"') + self.assertEqual(status, 0, msg="Failed to get initial date, output: %s" % output) + oldDate = output + + sampleDate = '"2016-08-09 10:00:00"' + (status, output) = self.target.run("date -s %s" % sampleDate) + self.assertEqual(status, 0, msg="Date set failed, output: %s" % output) + + (status, output) = self.target.run("date -R") + p = re.match('Tue, 09 Aug 2016 10:00:.. \+0000', output) + self.assertTrue(p, msg="The date was not set correctly, output: %s" % output) + + (status, output) = self.target.run('date -s "%s"' % oldDate) + self.assertEqual(status, 0, msg="Failed to reset date, output: %s" % output) diff --git a/meta/lib/oeqa/runtime/df.py b/meta/lib/oeqa/runtime/df.py new file mode 100644 index 0000000000..b6da35027c --- /dev/null +++ b/meta/lib/oeqa/runtime/df.py @@ -0,0 +1,11 @@ +import unittest +from oeqa.oetest import oeRuntimeTest +from oeqa.utils.decorators import * + + +class DfTest(oeRuntimeTest): + + @skipUnlessPassed("test_ssh") + def test_df(self): + (status,output) = self.target.run("df / | sed -n '2p' | awk '{print $4}'") + self.assertTrue(int(output)>5120, msg="Not enough space on image. Current size is %s" % output) diff --git a/meta/lib/oeqa/runtime/dmesg.py b/meta/lib/oeqa/runtime/dmesg.py new file mode 100644 index 0000000000..64247ea704 --- /dev/null +++ b/meta/lib/oeqa/runtime/dmesg.py @@ -0,0 +1,11 @@ +import unittest +from oeqa.oetest import oeRuntimeTest +from oeqa.utils.decorators import * + + +class DmesgTest(oeRuntimeTest): + + @skipUnlessPassed('test_ssh') + def test_dmesg(self): + (status, output) = self.target.run('dmesg | grep -v mmci-pl18x | grep -v "error changing net interface name" | grep -iv "dma timeout" | grep -i error') + self.assertEqual(status, 1, msg = "Error messages in dmesg log: %s" % output) diff --git a/meta/lib/oeqa/runtime/files/hellomod.c b/meta/lib/oeqa/runtime/files/hellomod.c new file mode 100644 index 0000000000..a383397e93 --- /dev/null +++ b/meta/lib/oeqa/runtime/files/hellomod.c @@ -0,0 +1,19 @@ +#include +#include +#include + +static int __init hello_init(void) +{ + printk(KERN_INFO "Hello world!\n"); + return 0; +} + +static void __exit hello_cleanup(void) +{ + printk(KERN_INFO "Cleaning up hellomod.\n"); +} + +module_init(hello_init); +module_exit(hello_cleanup); + +MODULE_LICENSE("GPL"); diff --git a/meta/lib/oeqa/runtime/files/hellomod_makefile b/meta/lib/oeqa/runtime/files/hellomod_makefile new file mode 100644 index 0000000000..b92d5c8fe0 --- /dev/null +++ b/meta/lib/oeqa/runtime/files/hellomod_makefile @@ -0,0 +1,8 @@ +obj-m := hellomod.o +KDIR := /usr/src/kernel + +all: + $(MAKE) -C $(KDIR) M=$(PWD) modules + +clean: + $(MAKE) -C $(KDIR) M=$(PWD) clean diff --git a/meta/lib/oeqa/runtime/files/test.c b/meta/lib/oeqa/runtime/files/test.c new file mode 100644 index 0000000000..2d8389c92e --- /dev/null +++ b/meta/lib/oeqa/runtime/files/test.c @@ -0,0 +1,26 @@ +#include +#include +#include + +double convert(long long l) +{ + return (double)l; +} + +int main(int argc, char * argv[]) { + + long long l = 10; + double f; + double check = 10.0; + + f = convert(l); + printf("convert: %lld => %f\n", l, f); + if ( f != check ) exit(1); + + f = 1234.67; + check = 1234.0; + printf("floorf(%f) = %f\n", f, floorf(f)); + if ( floorf(f) != check) exit(1); + + return 0; +} diff --git a/meta/lib/oeqa/runtime/files/test.pl b/meta/lib/oeqa/runtime/files/test.pl new file mode 100644 index 0000000000..689c8f1635 --- /dev/null +++ b/meta/lib/oeqa/runtime/files/test.pl @@ -0,0 +1,2 @@ +$a = 9.01e+21 - 9.01e+21 + 0.01; +print ("the value of a is ", $a, "\n"); diff --git a/meta/lib/oeqa/runtime/files/test.py b/meta/lib/oeqa/runtime/files/test.py new file mode 100644 index 0000000000..f3a2273c52 --- /dev/null +++ b/meta/lib/oeqa/runtime/files/test.py @@ -0,0 +1,6 @@ +import os + +os.system('touch /tmp/testfile.python') + +a = 9.01e+21 - 9.01e+21 + 0.01 +print "the value of a is %s" % a diff --git a/meta/lib/oeqa/runtime/files/testmakefile b/meta/lib/oeqa/runtime/files/testmakefile new file mode 100644 index 0000000000..ca1844e930 --- /dev/null +++ b/meta/lib/oeqa/runtime/files/testmakefile @@ -0,0 +1,5 @@ +test: test.o + gcc -o test test.o -lm +test.o: test.c + gcc -c test.c + diff --git a/meta/lib/oeqa/runtime/gcc.py b/meta/lib/oeqa/runtime/gcc.py new file mode 100644 index 0000000000..b63badd3e4 --- /dev/null +++ b/meta/lib/oeqa/runtime/gcc.py @@ -0,0 +1,36 @@ +import unittest +import os +from oeqa.oetest import oeRuntimeTest, skipModule +from oeqa.utils.decorators import * + +def setUpModule(): + if not oeRuntimeTest.hasFeature("tools-sdk"): + skipModule("Image doesn't have tools-sdk in IMAGE_FEATURES") + + +class GccCompileTest(oeRuntimeTest): + + @classmethod + def setUpClass(self): + oeRuntimeTest.tc.target.copy_to(os.path.join(oeRuntimeTest.tc.filesdir, "test.c"), "/tmp/test.c") + oeRuntimeTest.tc.target.copy_to(os.path.join(oeRuntimeTest.tc.filesdir, "testmakefile"), "/tmp/testmakefile") + + def test_gcc_compile(self): + (status, output) = self.target.run('gcc /tmp/test.c -o /tmp/test -lm') + self.assertEqual(status, 0, msg="gcc compile failed, output: %s" % output) + (status, output) = self.target.run('/tmp/test') + self.assertEqual(status, 0, msg="running compiled file failed, output %s" % output) + + def test_gpp_compile(self): + (status, output) = self.target.run('g++ /tmp/test.c -o /tmp/test -lm') + self.assertEqual(status, 0, msg="g++ compile failed, output: %s" % output) + (status, output) = self.target.run('/tmp/test') + self.assertEqual(status, 0, msg="running compiled file failed, output %s" % output) + + def test_make(self): + (status, output) = self.target.run('cd /tmp; make -f testmakefile') + self.assertEqual(status, 0, msg="running make failed, output %s" % output) + + @classmethod + def tearDownClass(self): + oeRuntimeTest.tc.target.run("rm /tmp/test.c /tmp/test.o /tmp/test /tmp/testmakefile") diff --git a/meta/lib/oeqa/runtime/kernelmodule.py b/meta/lib/oeqa/runtime/kernelmodule.py new file mode 100644 index 0000000000..cbc5742eff --- /dev/null +++ b/meta/lib/oeqa/runtime/kernelmodule.py @@ -0,0 +1,33 @@ +import unittest +import os +from oeqa.oetest import oeRuntimeTest, skipModule +from oeqa.utils.decorators import * + +def setUpModule(): + if not oeRuntimeTest.hasFeature("tools-sdk"): + skipModule("Image doesn't have tools-sdk in IMAGE_FEATURES") + + +class KernelModuleTest(oeRuntimeTest): + + def setUp(self): + self.target.copy_to(os.path.join(oeRuntimeTest.tc.filesdir, "hellomod.c"), "/tmp/hellomod.c") + self.target.copy_to(os.path.join(oeRuntimeTest.tc.filesdir, "hellomod_makefile"), "/tmp/Makefile") + + @skipUnlessPassed('test_ssh') + @skipUnlessPassed('test_gcc_compile') + def test_kernel_module(self): + cmds = [ + 'cd /usr/src/kernel && make scripts', + 'cd /tmp && make', + 'cd /tmp && insmod hellomod.ko', + 'lsmod | grep hellomod', + 'dmesg | grep Hello', + 'rmmod hellomod', 'dmesg | grep "Cleaning up hellomod"' + ] + for cmd in cmds: + (status, output) = self.target.run(cmd, 900) + self.assertEqual(status, 0, msg="\n".join([cmd, output])) + + def tearDown(self): + self.target.run('rm -f /tmp/Makefile /tmp/hellomod.c') diff --git a/meta/lib/oeqa/runtime/ldd.py b/meta/lib/oeqa/runtime/ldd.py new file mode 100644 index 0000000000..4374530fc4 --- /dev/null +++ b/meta/lib/oeqa/runtime/ldd.py @@ -0,0 +1,19 @@ +import unittest +from oeqa.oetest import oeRuntimeTest +from oeqa.utils.decorators import * + +def setUpModule(): + if not oeRuntimeTest.hasFeature("tools-sdk"): + skipModule("Image doesn't have tools-sdk in IMAGE_FEATURES") + +class LddTest(oeRuntimeTest): + + @skipUnlessPassed('test_ssh') + def test_ldd_exists(self): + (status, output) = self.target.run('which ldd') + self.assertEqual(status, 0, msg = "ldd does not exist in PATH: which ldd: %s" % output) + + @skipUnlessPassed('test_ldd_exists') + def test_ldd_rtldlist_check(self): + (status, output) = self.target.run('for i in $(which ldd | xargs cat | grep "^RTLDLIST"|cut -d\'=\' -f2|tr -d \'"\'); do test -f $i && echo $i && break; done') + self.assertEqual(status, 0, msg = "ldd path not correct or RTLDLIST files don't exist. ") diff --git a/meta/lib/oeqa/runtime/logrotate.py b/meta/lib/oeqa/runtime/logrotate.py new file mode 100644 index 0000000000..80489a3267 --- /dev/null +++ b/meta/lib/oeqa/runtime/logrotate.py @@ -0,0 +1,27 @@ +# This test should cover https://bugzilla.yoctoproject.org/tr_show_case.cgi?case_id=289 testcase +# Note that the image under test must have logrotate installed + +import unittest +from oeqa.oetest import oeRuntimeTest, skipModule +from oeqa.utils.decorators import * + +def setUpModule(): + if not oeRuntimeTest.hasPackage("logrotate"): + skipModule("No logrotate package in image") + + +class LogrotateTest(oeRuntimeTest): + + @skipUnlessPassed("test_ssh") + def test_1_logrotate_setup(self): + (status, output) = self.target.run('mkdir /home/root/logrotate_dir') + self.assertEqual(status, 0, msg = "Could not create logrotate_dir. Output: %s" % output) + (status, output) = self.target.run("sed -i 's#wtmp {#wtmp {\\n olddir /home/root/logrotate_dir#' /etc/logrotate.conf") + self.assertEqual(status, 0, msg = "Could not write to logrotate.conf file. Status and output: %s and %s)" % (status, output)) + + @skipUnlessPassed("test_1_logrotate_setup") + def test_2_logrotate(self): + (status, output) = self.target.run('logrotate -f /etc/logrotate.conf') + self.assertEqual(status, 0, msg = "logrotate service could not be reloaded. Status and output: %s and %s" % (status, output)) + output = self.target.run('ls -la /home/root/logrotate_dir/ | wc -l')[1] + self.assertTrue(int(output)>=3, msg = "new logfile could not be created. List of files within log directory: %s" %(self.target.run('ls -la /home/root/logrotate_dir')[1])) diff --git a/meta/lib/oeqa/runtime/multilib.py b/meta/lib/oeqa/runtime/multilib.py new file mode 100644 index 0000000000..13a3b54b18 --- /dev/null +++ b/meta/lib/oeqa/runtime/multilib.py @@ -0,0 +1,17 @@ +import unittest +from oeqa.oetest import oeRuntimeTest, skipModule +from oeqa.utils.decorators import * + +def setUpModule(): + multilibs = oeRuntimeTest.tc.d.getVar("MULTILIBS", True) or "" + if "multilib:lib32" not in multilibs: + skipModule("this isn't a multilib:lib32 image") + + +class MultilibTest(oeRuntimeTest): + + @skipUnlessPassed('test_ssh') + def test_file_connman(self): + self.assertTrue(oeRuntimeTest.hasPackage('connman-gnome'), msg="This test assumes connman-gnome is installed") + (status, output) = self.target.run("readelf -h /usr/bin/connman-applet | sed -n '3p' | awk '{print $2}'") + self.assertEqual(output, "ELF32", msg="connman-applet isn't an ELF32 binary. readelf says: %s" % self.target.run("readelf -h /usr/bin/connman-applet")[1]) diff --git a/meta/lib/oeqa/runtime/pam.py b/meta/lib/oeqa/runtime/pam.py new file mode 100644 index 0000000000..52e1eb88e6 --- /dev/null +++ b/meta/lib/oeqa/runtime/pam.py @@ -0,0 +1,24 @@ +# This test should cover https://bugzilla.yoctoproject.org/tr_show_case.cgi?case_id=287 testcase +# Note that the image under test must have "pam" in DISTRO_FEATURES + +import unittest +from oeqa.oetest import oeRuntimeTest +from oeqa.utils.decorators import * + +def setUpModule(): + if not oeRuntimeTest.hasFeature("pam"): + skipModule("target doesn't have 'pam' in DISTRO_FEATURES") + + +class PamBasicTest(oeRuntimeTest): + + @skipUnlessPassed('test_ssh') + def test_pam(self): + (status, output) = self.target.run('login --help') + self.assertEqual(status, 1, msg = "login command does not work as expected. Status and output:%s and %s" %(status, output)) + (status, output) = self.target.run('passwd --help') + self.assertEqual(status, 6, msg = "passwd command does not work as expected. Status and output:%s and %s" %(status, output)) + (status, output) = self.target.run('su --help') + self.assertEqual(status, 2, msg = "su command does not work as expected. Status and output:%s and %s" %(status, output)) + (status, output) = self.target.run('useradd --help') + self.assertEqual(status, 2, msg = "useradd command does not work as expected. Status and output:%s and %s" %(status, output)) diff --git a/meta/lib/oeqa/runtime/perl.py b/meta/lib/oeqa/runtime/perl.py new file mode 100644 index 0000000000..c9bb684c11 --- /dev/null +++ b/meta/lib/oeqa/runtime/perl.py @@ -0,0 +1,28 @@ +import unittest +import os +from oeqa.oetest import oeRuntimeTest, skipModule +from oeqa.utils.decorators import * + +def setUpModule(): + if not oeRuntimeTest.hasPackage("perl"): + skipModule("No perl package in the image") + + +class PerlTest(oeRuntimeTest): + + @classmethod + def setUpClass(self): + oeRuntimeTest.tc.target.copy_to(os.path.join(oeRuntimeTest.tc.filesdir, "test.pl"), "/tmp/test.pl") + + def test_perl_exists(self): + (status, output) = self.target.run('which perl') + self.assertEqual(status, 0, msg="Perl binary not in PATH or not on target.") + + def test_perl_works(self): + (status, output) = self.target.run('perl /tmp/test.pl') + self.assertEqual(status, 0, msg="Exit status was not 0. Output: %s" % output) + self.assertEqual(output, "the value of a is 0.01", msg="Incorrect output: %s" % output) + + @classmethod + def tearDownClass(self): + oeRuntimeTest.tc.target.run("rm /tmp/test.pl") diff --git a/meta/lib/oeqa/runtime/ping.py b/meta/lib/oeqa/runtime/ping.py new file mode 100644 index 0000000000..a73c72402a --- /dev/null +++ b/meta/lib/oeqa/runtime/ping.py @@ -0,0 +1,20 @@ +import subprocess +import unittest +import sys +import time +from oeqa.oetest import oeRuntimeTest + +class PingTest(oeRuntimeTest): + + def test_ping(self): + output = '' + count = 0 + endtime = time.time() + 60 + while count < 5 and time.time() < endtime: + proc = subprocess.Popen("ping -c 1 %s" % self.target.ip, shell=True, stdout=subprocess.PIPE) + output += proc.communicate()[0] + if proc.poll() == 0: + count += 1 + else: + count = 0 + self.assertEqual(count, 5, msg = "Expected 5 consecutive replies, got %d.\nping output is:\n%s" % (count,output)) diff --git a/meta/lib/oeqa/runtime/python.py b/meta/lib/oeqa/runtime/python.py new file mode 100644 index 0000000000..c037ab2c18 --- /dev/null +++ b/meta/lib/oeqa/runtime/python.py @@ -0,0 +1,33 @@ +import unittest +import os +from oeqa.oetest import oeRuntimeTest, skipModule +from oeqa.utils.decorators import * + +def setUpModule(): + if not oeRuntimeTest.hasPackage("python"): + skipModule("No python package in the image") + + +class PythonTest(oeRuntimeTest): + + @classmethod + def setUpClass(self): + oeRuntimeTest.tc.target.copy_to(os.path.join(oeRuntimeTest.tc.filesdir, "test.py"), "/tmp/test.py") + + def test_python_exists(self): + (status, output) = self.target.run('which python') + self.assertEqual(status, 0, msg="Python binary not in PATH or not on target.") + + def test_python_stdout(self): + (status, output) = self.target.run('python /tmp/test.py') + self.assertEqual(status, 0, msg="Exit status was not 0. Output: %s" % output) + self.assertEqual(output, "the value of a is 0.01", msg="Incorrect output: %s" % output) + + def test_python_testfile(self): + (status, output) = self.target.run('ls /tmp/testfile.python') + self.assertEqual(status, 0, msg="Python test file generate failed.") + + + @classmethod + def tearDownClass(self): + oeRuntimeTest.tc.target.run("rm /tmp/test.py /tmp/testfile.python") diff --git a/meta/lib/oeqa/runtime/rpm.py b/meta/lib/oeqa/runtime/rpm.py new file mode 100644 index 0000000000..084d22f96b --- /dev/null +++ b/meta/lib/oeqa/runtime/rpm.py @@ -0,0 +1,50 @@ +import unittest +import os +import fnmatch +from oeqa.oetest import oeRuntimeTest, skipModule +from oeqa.utils.decorators import * + +def setUpModule(): + if not oeRuntimeTest.hasFeature("package-management"): + skipModule("rpm module skipped: target doesn't have package-management in IMAGE_FEATURES") + if "package_rpm" != oeRuntimeTest.tc.d.getVar("PACKAGE_CLASSES", True).split()[0]: + skipModule("rpm module skipped: target doesn't have rpm as primary package manager") + + +class RpmBasicTest(oeRuntimeTest): + + @skipUnlessPassed('test_ssh') + def test_rpm_help(self): + (status, output) = self.target.run('rpm --help') + self.assertEqual(status, 0, msg="status and output: %s and %s" % (status,output)) + + @skipUnlessPassed('test_rpm_help') + def test_rpm_query(self): + (status, output) = self.target.run('rpm -q rpm') + self.assertEqual(status, 0, msg="status and output: %s and %s" % (status,output)) + +class RpmInstallRemoveTest(oeRuntimeTest): + + @classmethod + def setUpClass(self): + pkgarch = oeRuntimeTest.tc.d.getVar('TUNE_PKGARCH', True).replace("-", "_") + rpmdir = os.path.join(oeRuntimeTest.tc.d.getVar('DEPLOY_DIR', True), "rpm", pkgarch) + # pick rpm-doc as a test file to get installed, because it's small and it will always be built for standard targets + for f in fnmatch.filter(os.listdir(rpmdir), "rpm-doc-*.%s.rpm" % pkgarch): + testrpmfile = f + oeRuntimeTest.tc.target.copy_to(os.path.join(rpmdir,testrpmfile), "/tmp/rpm-doc.rpm") + + @skipUnlessPassed('test_rpm_help') + def test_rpm_install(self): + (status, output) = self.target.run('rpm -ivh /tmp/rpm-doc.rpm') + self.assertEqual(status, 0, msg="Failed to install rpm-doc package: %s" % output) + + @skipUnlessPassed('test_rpm_install') + def test_rpm_remove(self): + (status,output) = self.target.run('rpm -e rpm-doc') + self.assertEqual(status, 0, msg="Failed to remove rpm-doc package: %s" % output) + + @classmethod + def tearDownClass(self): + oeRuntimeTest.tc.target.run('rm -f /tmp/rpm-doc.rpm') + diff --git a/meta/lib/oeqa/runtime/scanelf.py b/meta/lib/oeqa/runtime/scanelf.py new file mode 100644 index 0000000000..b9abf24640 --- /dev/null +++ b/meta/lib/oeqa/runtime/scanelf.py @@ -0,0 +1,26 @@ +import unittest +from oeqa.oetest import oeRuntimeTest, skipModule +from oeqa.utils.decorators import * + +def setUpModule(): + if not oeRuntimeTest.hasPackage("pax-utils"): + skipModule("pax-utils package not installed") + +class ScanelfTest(oeRuntimeTest): + + def setUp(self): + self.scancmd = 'scanelf --quiet --recursive --mount --ldpath --path' + + @skipUnlessPassed('test_ssh') + def test_scanelf_textrel(self): + # print TEXTREL information + self.scancmd += " --textrel" + (status, output) = self.target.run(self.scancmd) + self.assertEqual(output.strip(), "", "\n".join([self.scancmd, output])) + + @skipUnlessPassed('test_ssh') + def test_scanelf_rpath(self): + # print RPATH information + self.scancmd += " --rpath" + (status, output) = self.target.run(self.scancmd) + self.assertEqual(output.strip(), "", "\n".join([self.scancmd, output])) diff --git a/meta/lib/oeqa/runtime/scp.py b/meta/lib/oeqa/runtime/scp.py new file mode 100644 index 0000000000..03095bf966 --- /dev/null +++ b/meta/lib/oeqa/runtime/scp.py @@ -0,0 +1,21 @@ +import os +from oeqa.oetest import oeRuntimeTest, skipModule +from oeqa.utils.decorators import skipUnlessPassed + +def setUpModule(): + if not (oeRuntimeTest.hasPackage("dropbear") or oeRuntimeTest.hasPackage("openssh-sshd")): + skipModule("No ssh package in image") + +class ScpTest(oeRuntimeTest): + + @skipUnlessPassed('test_ssh') + def test_scp_file(self): + test_log_dir = oeRuntimeTest.tc.d.getVar("TEST_LOG_DIR", True) + test_file_path = os.path.join(test_log_dir, 'test_scp_file') + with open(test_file_path, 'w') as test_scp_file: + test_scp_file.seek(2 ** 22 - 1) + test_scp_file.write(os.linesep) + (status, output) = self.target.copy_to(test_file_path, '/tmp/test_scp_file') + self.assertEqual(status, 0, msg = "File could not be copied. Output: %s" % output) + (status, output) = self.target.run("ls -la /tmp/test_scp_file") + self.assertEqual(status, 0, msg = "SCP test failed") diff --git a/meta/lib/oeqa/runtime/skeletoninit.py b/meta/lib/oeqa/runtime/skeletoninit.py new file mode 100644 index 0000000000..557e715a3e --- /dev/null +++ b/meta/lib/oeqa/runtime/skeletoninit.py @@ -0,0 +1,28 @@ +# This test should cover https://bugzilla.yoctoproject.org/tr_show_case.cgi?case_id=284 testcase +# Note that the image under test must have meta-skeleton layer in bblayers and IMAGE_INSTALL_append = " service" in local.conf + +import unittest +from oeqa.oetest import oeRuntimeTest +from oeqa.utils.decorators import * + +def setUpModule(): + if not oeRuntimeTest.hasPackage("service"): + skipModule("No service package in image") + + +class SkeletonBasicTest(oeRuntimeTest): + + @skipUnlessPassed('test_ssh') + @unittest.skipIf("systemd" == oeRuntimeTest.tc.d.getVar("VIRTUAL-RUNTIME_init_manager"), "Not appropiate for systemd image") + def test_skeleton_availability(self): + (status, output) = self.target.run('ls /etc/init.d/skeleton') + self.assertEqual(status, 0, msg = "skeleton init script not found. Output:\n%s " % output) + (status, output) = self.target.run('ls /usr/sbin/skeleton-test') + self.assertEqual(status, 0, msg = "skeleton-test not found. Output:\n%s" % output) + + @skipUnlessPassed('test_skeleton_availability') + @unittest.skipIf("systemd" == oeRuntimeTest.tc.d.getVar("VIRTUAL-RUNTIME_init_manager"), "Not appropiate for systemd image") + def test_skeleton_script(self): + output1 = self.target.run("/etc/init.d/skeleton start")[1] + (status, output2) = self.target.run(oeRuntimeTest.pscmd + ' | grep [s]keleton-test') + self.assertEqual(status, 0, msg = "Skeleton script could not be started:\n%s\n%s" % (output1, output2)) diff --git a/meta/lib/oeqa/runtime/smart.py b/meta/lib/oeqa/runtime/smart.py new file mode 100644 index 0000000000..195f1170c6 --- /dev/null +++ b/meta/lib/oeqa/runtime/smart.py @@ -0,0 +1,110 @@ +import unittest +import re +from oeqa.oetest import oeRuntimeTest +from oeqa.utils.decorators import * +from oeqa.utils.httpserver import HTTPService + +def setUpModule(): + if not oeRuntimeTest.hasFeature("package-management"): + skipModule("Image doesn't have package management feature") + if not oeRuntimeTest.hasPackage("smart"): + skipModule("Image doesn't have smart installed") + if "package_rpm" != oeRuntimeTest.tc.d.getVar("PACKAGE_CLASSES", True).split()[0]: + skipModule("Rpm is not the primary package manager") + +class SmartTest(oeRuntimeTest): + + @skipUnlessPassed('test_smart_help') + def smart(self, command, expected = 0): + command = 'smart %s' % command + status, output = self.target.run(command, 1500) + message = os.linesep.join([command, output]) + self.assertEqual(status, expected, message) + self.assertFalse("Cannot allocate memory" in output, message) + return output + +class SmartBasicTest(SmartTest): + + @skipUnlessPassed('test_ssh') + def test_smart_help(self): + self.smart('--help') + + def test_smart_version(self): + self.smart('--version') + + def test_smart_info(self): + self.smart('info python-smartpm') + + def test_smart_query(self): + self.smart('query python-smartpm') + + def test_smart_search(self): + self.smart('search python-smartpm') + + def test_smart_stats(self): + self.smart('stats') + +class SmartRepoTest(SmartTest): + + @classmethod + def setUpClass(self): + self.repo_server = HTTPService(oeRuntimeTest.tc.d.getVar('DEPLOY_DIR', True), oeRuntimeTest.tc.target.server_ip) + self.repo_server.start() + + @classmethod + def tearDownClass(self): + self.repo_server.stop() + + def test_smart_channel(self): + self.smart('channel', 1) + + def test_smart_channel_add(self): + image_pkgtype = self.tc.d.getVar('IMAGE_PKGTYPE', True) + deploy_url = 'http://%s:%s/%s' %(self.target.server_ip, self.repo_server.port, image_pkgtype) + pkgarchs = self.tc.d.getVar('PACKAGE_ARCHS', True).replace("-","_").split() + for arch in os.listdir('%s/%s' % (self.repo_server.root_dir, image_pkgtype)): + if arch in pkgarchs: + self.smart('channel -y --add {a} type=rpm-md baseurl={u}/{a}'.format(a=arch, u=deploy_url)) + self.smart('update') + + def test_smart_channel_help(self): + self.smart('channel --help') + + def test_smart_channel_list(self): + self.smart('channel --list') + + def test_smart_channel_show(self): + self.smart('channel --show') + + def test_smart_channel_rpmsys(self): + self.smart('channel --show rpmsys') + self.smart('channel --disable rpmsys') + self.smart('channel --enable rpmsys') + + @skipUnlessPassed('test_smart_channel_add') + def test_smart_install(self): + self.smart('remove -y psplash-default') + self.smart('install -y psplash-default') + + @skipUnlessPassed('test_smart_install') + def test_smart_install_dependency(self): + self.smart('remove -y psplash') + self.smart('install -y psplash-default') + + @skipUnlessPassed('test_smart_channel_add') + def test_smart_install_from_disk(self): + self.smart('remove -y psplash-default') + self.smart('download psplash-default') + self.smart('install -y ./psplash-default*') + + @skipUnlessPassed('test_smart_channel_add') + def test_smart_install_from_http(self): + output = self.smart('download --urls psplash-default') + url = re.search('(http://.*/psplash-default.*\.rpm)', output) + self.assertTrue(url, msg="Couln't find download url in %s" % output) + self.smart('remove -y psplash-default') + self.smart('install -y %s' % url.group(0)) + + @skipUnlessPassed('test_smart_install') + def test_smart_reinstall(self): + self.smart('reinstall -y psplash-default') diff --git a/meta/lib/oeqa/runtime/ssh.py b/meta/lib/oeqa/runtime/ssh.py new file mode 100644 index 0000000000..e64866019f --- /dev/null +++ b/meta/lib/oeqa/runtime/ssh.py @@ -0,0 +1,18 @@ +import subprocess +import unittest +import sys +from oeqa.oetest import oeRuntimeTest, skipModule +from oeqa.utils.decorators import * + +def setUpModule(): + if not (oeRuntimeTest.hasPackage("dropbear") or oeRuntimeTest.hasPackage("openssh")): + skipModule("No ssh package in image") + +class SshTest(oeRuntimeTest): + + @skipUnlessPassed('test_ping') + def test_ssh(self): + (status, output) = self.target.run('uname -a') + self.assertEqual(status, 0, msg="SSH Test failed: %s" % output) + (status, output) = self.target.run('cat /etc/masterimage') + self.assertEqual(status, 1, msg="This isn't the right image - /etc/masterimage shouldn't be here %s" % output) diff --git a/meta/lib/oeqa/runtime/syslog.py b/meta/lib/oeqa/runtime/syslog.py new file mode 100644 index 0000000000..b95b36175a --- /dev/null +++ b/meta/lib/oeqa/runtime/syslog.py @@ -0,0 +1,46 @@ +import unittest +from oeqa.oetest import oeRuntimeTest, skipModule +from oeqa.utils.decorators import * + +def setUpModule(): + if not oeRuntimeTest.hasPackage("syslog"): + skipModule("No syslog package in image") + +class SyslogTest(oeRuntimeTest): + + @skipUnlessPassed("test_ssh") + def test_syslog_help(self): + (status,output) = self.target.run('/sbin/syslogd --help') + self.assertEqual(status, 0, msg="status and output: %s and %s" % (status,output)) + + @skipUnlessPassed("test_syslog_help") + def test_syslog_running(self): + (status,output) = self.target.run(oeRuntimeTest.pscmd + ' | grep -i [s]yslogd') + self.assertEqual(status, 0, msg="no syslogd process, ps output: %s" % self.target.run(oeRuntimeTest.pscmd)[1]) + + +class SyslogTestConfig(oeRuntimeTest): + + @skipUnlessPassed("test_syslog_running") + def test_syslog_logger(self): + (status,output) = self.target.run('logger foobar && test -e /var/log/messages && grep foobar /var/log/messages || logread | grep foobar') + self.assertEqual(status, 0, msg="Test log string not found in /var/log/messages. Output: %s " % output) + + @skipUnlessPassed("test_syslog_running") + def test_syslog_restart(self): + if "systemd" != oeRuntimeTest.tc.d.getVar("VIRTUAL-RUNTIME_init_manager"): + (status,output) = self.target.run('/etc/init.d/syslog restart') + else: + (status,output) = self.target.run('systemctl restart syslog.service') + + @skipUnlessPassed("test_syslog_restart") + @skipUnlessPassed("test_syslog_logger") + @unittest.skipIf("systemd" == oeRuntimeTest.tc.d.getVar("VIRTUAL-RUNTIME_init_manager"), "Not appropiate for systemd image") + def test_syslog_startup_config(self): + self.target.run('echo "LOGFILE=/var/log/test" >> /etc/syslog-startup.conf') + (status,output) = self.target.run('/etc/init.d/syslog restart') + self.assertEqual(status, 0, msg="Could not restart syslog service. Status and output: %s and %s" % (status,output)) + (status,output) = self.target.run('logger foobar && grep foobar /var/log/test') + self.assertEqual(status, 0, msg="Test log string not found. Output: %s " % output) + self.target.run("sed -i 's#LOGFILE=/var/log/test##' /etc/syslog-startup.conf") + self.target.run('/etc/init.d/syslog restart') diff --git a/meta/lib/oeqa/runtime/systemd.py b/meta/lib/oeqa/runtime/systemd.py new file mode 100644 index 0000000000..6de84f891b --- /dev/null +++ b/meta/lib/oeqa/runtime/systemd.py @@ -0,0 +1,84 @@ +import unittest +import re +from oeqa.oetest import oeRuntimeTest, skipModule +from oeqa.utils.decorators import * + +def setUpModule(): + if not oeRuntimeTest.hasFeature("systemd"): + skipModule("target doesn't have systemd in DISTRO_FEATURES") + if "systemd" != oeRuntimeTest.tc.d.getVar("VIRTUAL-RUNTIME_init_manager", True): + skipModule("systemd is not the init manager for this image") + + +class SystemdTest(oeRuntimeTest): + + def systemctl(self, action = '', target = '', expected = 0, verbose = False): + command = 'systemctl %s %s' % (action, target) + status, output = self.target.run(command) + message = '\n'.join([command, output]) + if status != expected and verbose: + message += self.target.run('systemctl status --full %s' % target)[1] + self.assertEqual(status, expected, message) + return output + + +class SystemdBasicTests(SystemdTest): + + @skipUnlessPassed('test_ssh') + def test_systemd_basic(self): + self.systemctl('--version') + + @skipUnlessPassed('test_system_basic') + def test_systemd_list(self): + self.systemctl('list-unit-files') + + def settle(self): + """ + Block until systemd has finished activating any units being activated, + or until two minutes has elapsed. + + Returns a tuple, either (True, '') if all units have finished + activating, or (False, message string) if there are still units + activating (generally, failing units that restart). + """ + import time + endtime = time.time() + (60 * 2) + while True: + status, output = self.target.run('systemctl --state=activating') + if "0 loaded units listed" in output: + return (True, '') + if time.time() >= endtime: + return (False, output) + time.sleep(10) + + @skipUnlessPassed('test_systemd_basic') + def test_systemd_failed(self): + settled, output = self.settle() + self.assertTrue(settled, msg="Timed out waiting for systemd to settle:\n" + output) + + output = self.systemctl('list-units', '--failed') + match = re.search("0 loaded units listed", output) + if not match: + output += self.systemctl('status --full --failed') + self.assertTrue(match, msg="Some systemd units failed:\n%s" % output) + + +class SystemdServiceTests(SystemdTest): + + @skipUnlessPassed('test_systemd_basic') + def test_systemd_status(self): + self.systemctl('status --full', 'avahi-daemon.service') + + @skipUnlessPassed('test_systemd_status') + def test_systemd_stop_start(self): + self.systemctl('stop', 'avahi-daemon.service') + self.systemctl('is-active', 'avahi-daemon.service', expected=3, verbose=True) + self.systemctl('start','avahi-daemon.service') + self.systemctl('is-active', 'avahi-daemon.service', verbose=True) + + @skipUnlessPassed('test_systemd_basic') + def test_systemd_disable_enable(self): + self.systemctl('disable', 'avahi-daemon.service') + self.systemctl('is-enabled', 'avahi-daemon.service', expected=1) + self.systemctl('enable', 'avahi-daemon.service') + self.systemctl('is-enabled', 'avahi-daemon.service') diff --git a/meta/lib/oeqa/runtime/vnc.py b/meta/lib/oeqa/runtime/vnc.py new file mode 100644 index 0000000000..5ed10727bc --- /dev/null +++ b/meta/lib/oeqa/runtime/vnc.py @@ -0,0 +1,19 @@ +from oeqa.oetest import oeRuntimeTest +from oeqa.utils.decorators import * +import re + +def setUpModule(): + skipModuleUnless(oeRuntimeTest.hasPackage('x11vnc'), "No x11vnc package in image") + +class VNCTest(oeRuntimeTest): + + @skipUnlessPassed('test_ssh') + def test_vnc(self): + (status, output) = self.target.run('x11vnc -display :0 -bg -o x11vnc.log') + self.assertEqual(status, 0, msg="x11vnc server failed to start: %s" % output) + port = re.search('PORT=[0-9]*', output) + self.assertTrue(port, msg="Listening port not specified in command output: %s" %output) + + vncport = port.group(0).split('=')[1] + (status, output) = self.target.run('netstat -ntl | grep ":%s"' % vncport) + self.assertEqual(status, 0, msg="x11vnc server not running on port %s\n\n%s" % (vncport, self.target.run('netstat -ntl; cat x11vnc.log')[1])) diff --git a/meta/lib/oeqa/runtime/x32lib.py b/meta/lib/oeqa/runtime/x32lib.py new file mode 100644 index 0000000000..6bad201b12 --- /dev/null +++ b/meta/lib/oeqa/runtime/x32lib.py @@ -0,0 +1,17 @@ +import unittest +from oeqa.oetest import oeRuntimeTest, skipModule +from oeqa.utils.decorators import * + +def setUpModule(): + #check if DEFAULTTUNE is set and it's value is: x86-64-x32 + defaulttune = oeRuntimeTest.tc.d.getVar("DEFAULTTUNE", True) + if "x86-64-x32" not in defaulttune: + skipModule("DEFAULTTUNE is not set to x86-64-x32") + +class X32libTest(oeRuntimeTest): + + @skipUnlessPassed("test_ssh") + def test_x32_file(self): + status1 = self.target.run("readelf -h /bin/ls | grep Class | grep ELF32")[0] + status2 = self.target.run("readelf -h /bin/ls | grep Machine | grep X86-64")[0] + self.assertTrue(status1 == 0 and status2 == 0, msg="/bin/ls isn't an X86-64 ELF32 binary. readelf says: %s" % self.target.run("readelf -h /bin/ls")[1]) diff --git a/meta/lib/oeqa/runtime/xorg.py b/meta/lib/oeqa/runtime/xorg.py new file mode 100644 index 0000000000..12dccd8198 --- /dev/null +++ b/meta/lib/oeqa/runtime/xorg.py @@ -0,0 +1,21 @@ +import unittest +from oeqa.oetest import oeRuntimeTest, skipModule +from oeqa.utils.decorators import * + +def setUpModule(): + if not oeRuntimeTest.hasFeature("x11-base"): + skipModule("target doesn't have x11 in IMAGE_FEATURES") + + +class XorgTest(oeRuntimeTest): + + @skipUnlessPassed('test_ssh') + def test_xorg_running(self): + (status, output) = self.target.run(oeRuntimeTest.pscmd + ' | grep -v xinit | grep [X]org') + self.assertEqual(status, 0, msg="Xorg does not appear to be running %s" % self.target.run(oeRuntimeTest.pscmd)[1]) + + @skipUnlessPassed('test_ssh') + def test_xorg_error(self): + (status, output) = self.target.run('cat /var/log/Xorg.0.log | grep -v "(EE) error," | grep -v "PreInit" | grep -v "evdev:" | grep -v "glx" | grep "(EE)"') + self.assertEqual(status, 1, msg="Errors in Xorg log: %s" % output) + diff --git a/meta/lib/oeqa/selftest/__init__.py b/meta/lib/oeqa/selftest/__init__.py new file mode 100644 index 0000000000..3ad9513f40 --- /dev/null +++ b/meta/lib/oeqa/selftest/__init__.py @@ -0,0 +1,2 @@ +from pkgutil import extend_path +__path__ = extend_path(__path__, __name__) diff --git a/meta/lib/oeqa/selftest/_sstatetests_noauto.py b/meta/lib/oeqa/selftest/_sstatetests_noauto.py new file mode 100644 index 0000000000..fc9ae7efb9 --- /dev/null +++ b/meta/lib/oeqa/selftest/_sstatetests_noauto.py @@ -0,0 +1,95 @@ +import datetime +import unittest +import os +import re +import shutil + +import oeqa.utils.ftools as ftools +from oeqa.selftest.base import oeSelfTest +from oeqa.utils.commands import runCmd, bitbake, get_bb_var, get_test_layer +from oeqa.selftest.sstate import SStateBase + + +class RebuildFromSState(SStateBase): + + @classmethod + def setUpClass(self): + self.builddir = os.path.join(os.environ.get('BUILDDIR')) + + def get_dep_targets(self, primary_targets): + found_targets = [] + bitbake("-g " + ' '.join(map(str, primary_targets))) + with open(os.path.join(self.builddir, 'pn-buildlist'), 'r') as pnfile: + found_targets = pnfile.read().splitlines() + return found_targets + + def configure_builddir(self, builddir): + os.mkdir(builddir) + self.track_for_cleanup(builddir) + os.mkdir(os.path.join(builddir, 'conf')) + shutil.copyfile(os.path.join(os.environ.get('BUILDDIR'), 'conf/local.conf'), os.path.join(builddir, 'conf/local.conf')) + config = {} + config['default_sstate_dir'] = "SSTATE_DIR ?= \"${TOPDIR}/sstate-cache\"" + config['null_sstate_mirrors'] = "SSTATE_MIRRORS = \"\"" + config['default_tmp_dir'] = "TMPDIR = \"${TOPDIR}/tmp\"" + for key in config: + ftools.append_file(os.path.join(builddir, 'conf/selftest.inc'), config[key]) + shutil.copyfile(os.path.join(os.environ.get('BUILDDIR'), 'conf/bblayers.conf'), os.path.join(builddir, 'conf/bblayers.conf')) + try: + shutil.copyfile(os.path.join(os.environ.get('BUILDDIR'), 'conf/auto.conf'), os.path.join(builddir, 'conf/auto.conf')) + except: + pass + + def hardlink_tree(self, src, dst): + os.mkdir(dst) + self.track_for_cleanup(dst) + for root, dirs, files in os.walk(src): + if root == src: + continue + os.mkdir(os.path.join(dst, root.split(src)[1][1:])) + for sstate_file in files: + os.link(os.path.join(root, sstate_file), os.path.join(dst, root.split(src)[1][1:], sstate_file)) + + def run_test_sstate_rebuild(self, primary_targets, relocate=False, rebuild_dependencies=False): + buildA = os.path.join(self.builddir, 'buildA') + if relocate: + buildB = os.path.join(self.builddir, 'buildB') + else: + buildB = buildA + + if rebuild_dependencies: + rebuild_targets = self.get_dep_targets(primary_targets) + else: + rebuild_targets = primary_targets + + self.configure_builddir(buildA) + runCmd((". %s/oe-init-build-env %s && " % (get_bb_var('COREBASE'), buildA)) + 'bitbake ' + ' '.join(map(str, primary_targets)), shell=True, executable='/bin/bash') + self.hardlink_tree(os.path.join(buildA, 'sstate-cache'), os.path.join(self.builddir, 'sstate-cache-buildA')) + shutil.rmtree(buildA) + + failed_rebuild = [] + failed_cleansstate = [] + for target in rebuild_targets: + self.configure_builddir(buildB) + self.hardlink_tree(os.path.join(self.builddir, 'sstate-cache-buildA'), os.path.join(buildB, 'sstate-cache')) + + result_cleansstate = runCmd((". %s/oe-init-build-env %s && " % (get_bb_var('COREBASE'), buildB)) + 'bitbake -ccleansstate ' + target, ignore_status=True, shell=True, executable='/bin/bash') + if not result_cleansstate.status == 0: + failed_cleansstate.append(target) + shutil.rmtree(buildB) + continue + + result_build = runCmd((". %s/oe-init-build-env %s && " % (get_bb_var('COREBASE'), buildB)) + 'bitbake ' + target, ignore_status=True, shell=True, executable='/bin/bash') + if not result_build.status == 0: + failed_rebuild.append(target) + + shutil.rmtree(buildB) + + self.assertFalse(failed_rebuild, msg="The following recipes have failed to rebuild: %s" % ' '.join(map(str, failed_rebuild))) + self.assertFalse(failed_cleansstate, msg="The following recipes have failed cleansstate(all others have passed both cleansstate and rebuild from sstate tests): %s" % ' '.join(map(str, failed_cleansstate))) + + def test_sstate_relocation(self): + self.run_test_sstate_rebuild(['core-image-sato-sdk'], relocate=True, rebuild_dependencies=True) + + def test_sstate_rebuild(self): + self.run_test_sstate_rebuild(['core-image-sato-sdk'], relocate=False, rebuild_dependencies=True) diff --git a/meta/lib/oeqa/selftest/base.py b/meta/lib/oeqa/selftest/base.py new file mode 100644 index 0000000000..fc880e9d26 --- /dev/null +++ b/meta/lib/oeqa/selftest/base.py @@ -0,0 +1,129 @@ +# Copyright (c) 2013 Intel Corporation +# +# Released under the MIT license (see COPYING.MIT) + + +# DESCRIPTION +# Base class inherited by test classes in meta/lib/selftest + +import unittest +import os +import sys +import shutil +import logging +import errno + +import oeqa.utils.ftools as ftools +from oeqa.utils.commands import runCmd, bitbake, get_bb_var, get_test_layer + +class oeSelfTest(unittest.TestCase): + + log = logging.getLogger("selftest.base") + longMessage = True + + def __init__(self, methodName="runTest"): + self.builddir = os.environ.get("BUILDDIR") + self.localconf_path = os.path.join(self.builddir, "conf/local.conf") + self.testinc_path = os.path.join(self.builddir, "conf/selftest.inc") + self.testlayer_path = oeSelfTest.testlayer_path + self._extra_tear_down_commands = [] + self._track_for_cleanup = [] + super(oeSelfTest, self).__init__(methodName) + + def setUp(self): + os.chdir(self.builddir) + # we don't know what the previous test left around in config or inc files + # if it failed so we need a fresh start + try: + os.remove(self.testinc_path) + except OSError as e: + if e.errno != errno.ENOENT: + raise + for root, _, files in os.walk(self.testlayer_path): + for f in files: + if f == 'test_recipe.inc': + os.remove(os.path.join(root, f)) + # tests might need their own setup + # but if they overwrite this one they have to call + # super each time, so let's give them an alternative + self.setUpLocal() + + def setUpLocal(self): + pass + + def tearDown(self): + if self._extra_tear_down_commands: + failed_extra_commands = [] + for command in self._extra_tear_down_commands: + result = runCmd(command, ignore_status=True) + if not result.status == 0: + failed_extra_commands.append(command) + if failed_extra_commands: + self.log.warning("tearDown commands have failed: %s" % ', '.join(map(str, failed_extra_commands))) + self.log.debug("Trying to move on.") + self._extra_tear_down_commands = [] + + if self._track_for_cleanup: + for path in self._track_for_cleanup: + if os.path.isdir(path): + shutil.rmtree(path) + if os.path.isfile(path): + os.remove(path) + self._track_for_cleanup = [] + + self.tearDownLocal() + + def tearDownLocal(self): + pass + + # add test specific commands to the tearDown method. + def add_command_to_tearDown(self, command): + self.log.debug("Adding command '%s' to tearDown for this test." % command) + self._extra_tear_down_commands.append(command) + # add test specific files or directories to be removed in the tearDown method + def track_for_cleanup(self, path): + self.log.debug("Adding path '%s' to be cleaned up when test is over" % path) + self._track_for_cleanup.append(path) + + # write to /conf/selftest.inc + def write_config(self, data): + self.log.debug("Writing to: %s\n%s\n" % (self.testinc_path, data)) + ftools.write_file(self.testinc_path, data) + + # append to /conf/selftest.inc + def append_config(self, data): + self.log.debug("Appending to: %s\n%s\n" % (self.testinc_path, data)) + ftools.append_file(self.testinc_path, data) + + # remove data from /conf/selftest.inc + def remove_config(self, data): + self.log.debug("Removing from: %s\n\%s\n" % (self.testinc_path, data)) + ftools.remove_from_file(self.testinc_path, data) + + # write to meta-sefltest/recipes-test//test_recipe.inc + def write_recipeinc(self, recipe, data): + inc_file = os.path.join(self.testlayer_path, 'recipes-test', recipe, 'test_recipe.inc') + self.log.debug("Writing to: %s\n%s\n" % (inc_file, data)) + ftools.write_file(inc_file, data) + + # append data to meta-sefltest/recipes-test//test_recipe.inc + def append_recipeinc(self, recipe, data): + inc_file = os.path.join(self.testlayer_path, 'recipes-test', recipe, 'test_recipe.inc') + self.log.debug("Appending to: %s\n%s\n" % (inc_file, data)) + ftools.append_file(inc_file, data) + + # remove data from meta-sefltest/recipes-test//test_recipe.inc + def remove_recipeinc(self, recipe, data): + inc_file = os.path.join(self.testlayer_path, 'recipes-test', recipe, 'test_recipe.inc') + self.log.debug("Removing from: %s\n%s\n" % (inc_file, data)) + ftools.remove_from_file(inc_file, data) + + # delete meta-sefltest/recipes-test//test_recipe.inc file + def delete_recipeinc(self, recipe): + inc_file = os.path.join(self.testlayer_path, 'recipes-test', recipe, 'test_recipe.inc') + self.log.debug("Deleting file: %s" % inc_file) + try: + os.remove(inc_file) + except OSError as e: + if e.errno != errno.ENOENT: + raise diff --git a/meta/lib/oeqa/selftest/bblayers.py b/meta/lib/oeqa/selftest/bblayers.py new file mode 100644 index 0000000000..52aa4f8112 --- /dev/null +++ b/meta/lib/oeqa/selftest/bblayers.py @@ -0,0 +1,37 @@ +import unittest +import os +import logging +import re +import shutil + +import oeqa.utils.ftools as ftools +from oeqa.selftest.base import oeSelfTest +from oeqa.utils.commands import runCmd + +class BitbakeLayers(oeSelfTest): + + def test_bitbakelayers_showcrossdepends(self): + result = runCmd('bitbake-layers show-cross-depends') + self.assertTrue('aspell' in result.output) + + def test_bitbakelayers_showlayers(self): + result = runCmd('bitbake-layers show_layers') + self.assertTrue('meta-selftest' in result.output) + + def test_bitbakelayers_showappends(self): + result = runCmd('bitbake-layers show_appends') + self.assertTrue('xcursor-transparent-theme_0.1.1.bbappend' in result.output, msg='xcursor-transparent-theme_0.1.1.bbappend file was not recognised') + + def test_bitbakelayers_showoverlayed(self): + result = runCmd('bitbake-layers show_overlayed') + self.assertTrue('aspell' in result.output, msg='xcursor-transparent-theme_0.1.1.bbappend file was not recognised') + + def test_bitbakelayers_flatten(self): + self.assertFalse(os.path.isdir(os.path.join(self.builddir, 'test'))) + result = runCmd('bitbake-layers flatten test') + bb_file = os.path.join(self.builddir, 'test/recipes-graphics/xcursor-transparent-theme/xcursor-transparent-theme_0.1.1.bb') + self.assertTrue(os.path.isfile(bb_file)) + contents = ftools.read_file(bb_file) + find_in_contents = re.search("##### bbappended from meta-selftest #####\n(.*\n)*include test_recipe.inc", contents) + shutil.rmtree(os.path.join(self.builddir, 'test')) + self.assertTrue(find_in_contents) diff --git a/meta/lib/oeqa/selftest/bbtests.py b/meta/lib/oeqa/selftest/bbtests.py new file mode 100644 index 0000000000..6815ecfe0b --- /dev/null +++ b/meta/lib/oeqa/selftest/bbtests.py @@ -0,0 +1,104 @@ +import unittest +import os +import logging +import re +import shutil + +import oeqa.utils.ftools as ftools +from oeqa.selftest.base import oeSelfTest +from oeqa.utils.commands import runCmd, bitbake, get_bb_var + +class BitbakeTests(oeSelfTest): + + def test_run_bitbake_from_dir_1(self): + os.chdir(os.path.join(self.builddir, 'conf')) + bitbake('-e') + + def test_run_bitbake_from_dir_2(self): + my_env = os.environ.copy() + my_env['BBPATH'] = my_env['BUILDDIR'] + os.chdir(os.path.dirname(os.environ['BUILDDIR'])) + bitbake('-e', env=my_env) + + def test_event_handler(self): + self.write_config("INHERIT += \"test_events\"") + result = bitbake('m4-native') + find_build_started = re.search("NOTE: Test for bb\.event\.BuildStarted(\n.*)*NOTE: Preparing runqueue", result.output) + find_build_completed = re.search("Tasks Summary:.*(\n.*)*NOTE: Test for bb\.event\.BuildCompleted", result.output) + self.assertTrue(find_build_started, msg = "Match failed in:\n%s" % result.output) + self.assertTrue(find_build_completed, msg = "Match failed in:\n%s" % result.output) + self.assertFalse('Test for bb.event.InvalidEvent' in result.output) + + def test_local_sstate(self): + bitbake('m4-native -ccleansstate') + bitbake('m4-native') + bitbake('m4-native -cclean') + result = bitbake('m4-native') + find_setscene = re.search("m4-native.*do_.*_setscene", result.output) + self.assertTrue(find_setscene) + + def test_bitbake_invalid_recipe(self): + result = bitbake('-b asdf', ignore_status=True) + self.assertTrue("ERROR: Unable to find any recipe file matching 'asdf'" in result.output) + + def test_bitbake_invalid_target(self): + result = bitbake('asdf', ignore_status=True) + self.assertTrue("ERROR: Nothing PROVIDES 'asdf'" in result.output) + + def test_warnings_errors(self): + result = bitbake('-b asdf', ignore_status=True) + find_warnings = re.search("Summary: There w.{2,3}? [1-9][0-9]* WARNING messages* shown", result.output) + find_errors = re.search("Summary: There w.{2,3}? [1-9][0-9]* ERROR messages* shown", result.output) + self.assertTrue(find_warnings, msg="Did not find the mumber of warnings at the end of the build:\n" + result.output) + self.assertTrue(find_errors, msg="Did not find the mumber of errors at the end of the build:\n" + result.output) + + def test_invalid_patch(self): + self.write_recipeinc('man', 'SRC_URI += "file://man-1.5h1-make.patch"') + result = bitbake('man -c patch', ignore_status=True) + self.delete_recipeinc('man') + bitbake('-cclean man') + self.assertTrue("ERROR: Function failed: patch_do_patch" in result.output) + + def test_force_task(self): + bitbake('m4-native') + result = bitbake('-C compile m4-native') + look_for_tasks = ['do_compile', 'do_install', 'do_populate_sysroot'] + for task in look_for_tasks: + find_task = re.search("m4-native.*%s" % task, result.output) + self.assertTrue(find_task) + + def test_bitbake_g(self): + result = bitbake('-g core-image-full-cmdline') + self.assertTrue('NOTE: PN build list saved to \'pn-buildlist\'' in result.output) + self.assertTrue('openssh' in ftools.read_file(os.path.join(self.builddir, 'pn-buildlist'))) + for f in ['pn-buildlist', 'pn-depends.dot', 'package-depends.dot', 'task-depends.dot']: + os.remove(f) + + def test_image_manifest(self): + bitbake('core-image-minimal') + deploydir = get_bb_var("DEPLOY_DIR_IMAGE", target="core-image-minimal") + imagename = get_bb_var("IMAGE_LINK_NAME", target="core-image-minimal") + manifest = os.path.join(deploydir, imagename + ".manifest") + self.assertTrue(os.path.islink(manifest), msg="No manifest file created for image") + + def test_invalid_recipe_src_uri(self): + data = 'SRC_URI = "file://invalid"' + self.write_recipeinc('man', data) + bitbake('-ccleanall man') + result = bitbake('-c fetch man', ignore_status=True) + bitbake('-ccleanall man') + self.delete_recipeinc('man') + self.assertEqual(result.status, 1, msg='Command succeded when it should have failed') + self.assertTrue('ERROR: Fetcher failure: Unable to find file file://invalid anywhere. The paths that were searched were:' in result.output) + self.assertTrue('ERROR: Function failed: Fetcher failure for URL: \'file://invalid\'. Unable to fetch URL from any source.' in result.output) + + def test_rename_downloaded_file(self): + data = 'SRC_URI_append = ";downloadfilename=test-aspell.tar.gz"' + self.write_recipeinc('aspell', data) + bitbake('-ccleanall aspell') + result = bitbake('-c fetch aspell', ignore_status=True) + self.delete_recipeinc('aspell') + self.assertEqual(result.status, 0) + self.assertTrue(os.path.isfile(os.path.join(get_bb_var("DL_DIR"), 'test-aspell.tar.gz'))) + self.assertTrue(os.path.isfile(os.path.join(get_bb_var("DL_DIR"), 'test-aspell.tar.gz.done'))) + bitbake('-ccleanall aspell') diff --git a/meta/lib/oeqa/selftest/buildhistory.py b/meta/lib/oeqa/selftest/buildhistory.py new file mode 100644 index 0000000000..d8cae4664b --- /dev/null +++ b/meta/lib/oeqa/selftest/buildhistory.py @@ -0,0 +1,45 @@ +import unittest +import os +import re +import shutil +import datetime + +import oeqa.utils.ftools as ftools +from oeqa.selftest.base import oeSelfTest +from oeqa.utils.commands import Command, runCmd, bitbake, get_bb_var, get_test_layer + + +class BuildhistoryBase(oeSelfTest): + + def config_buildhistory(self, tmp_bh_location=False): + if (not 'buildhistory' in get_bb_var('USER_CLASSES')) and (not 'buildhistory' in get_bb_var('INHERIT')): + add_buildhistory_config = 'INHERIT += "buildhistory"\nBUILDHISTORY_COMMIT = "1"' + self.append_config(add_buildhistory_config) + + if tmp_bh_location: + # Using a temporary buildhistory location for testing + tmp_bh_dir = os.path.join(self.builddir, "tmp_buildhistory_%s" % datetime.datetime.now().strftime('%Y%m%d%H%M%S')) + buildhistory_dir_config = "BUILDHISTORY_DIR = \"%s\"" % tmp_bh_dir + self.append_config(buildhistory_dir_config) + self.track_for_cleanup(tmp_bh_dir) + + def run_buildhistory_operation(self, target, global_config='', target_config='', change_bh_location=False, expect_error=False, error_regex=''): + if change_bh_location: + tmp_bh_location = True + else: + tmp_bh_location = False + self.config_buildhistory(tmp_bh_location) + + self.append_config(global_config) + self.append_recipeinc(target, target_config) + bitbake("-cclean %s" % target) + result = bitbake(target, ignore_status=True) + self.remove_config(global_config) + self.remove_recipeinc(target, target_config) + + if expect_error: + self.assertEqual(result.status, 1, msg="Error expected for global config '%s' and target config '%s'" % (global_config, target_config)) + search_for_error = re.search(error_regex, result.output) + self.assertTrue(search_for_error, msg="Could not find desired error in output: %s" % error_regex) + else: + self.assertEqual(result.status, 0, msg="Command 'bitbake %s' has failed unexpectedly: %s" % (target, result.output)) diff --git a/meta/lib/oeqa/selftest/buildoptions.py b/meta/lib/oeqa/selftest/buildoptions.py new file mode 100644 index 0000000000..8ff40baddc --- /dev/null +++ b/meta/lib/oeqa/selftest/buildoptions.py @@ -0,0 +1,113 @@ +import unittest +import os +import logging +import re + +from oeqa.selftest.base import oeSelfTest +from oeqa.selftest.buildhistory import BuildhistoryBase +from oeqa.utils.commands import runCmd, bitbake, get_bb_var +import oeqa.utils.ftools as ftools + +class ImageOptionsTests(oeSelfTest): + + def test_incremental_image_generation(self): + bitbake("-c cleanall core-image-minimal") + self.write_config('INC_RPM_IMAGE_GEN = "1"') + self.append_config('IMAGE_FEATURES += "ssh-server-openssh"') + bitbake("core-image-minimal") + res = runCmd("grep 'Installing openssh-sshd' %s" % (os.path.join(get_bb_var("WORKDIR", "core-image-minimal"), "temp/log.do_rootfs")), ignore_status=True) + self.remove_config('IMAGE_FEATURES += "ssh-server-openssh"') + self.assertEqual(0, res.status, msg="No match for openssh-sshd in log.do_rootfs") + bitbake("core-image-minimal") + res = runCmd("grep 'Removing openssh-sshd' %s" %(os.path.join(get_bb_var("WORKDIR", "core-image-minimal"), "temp/log.do_rootfs")),ignore_status=True) + self.assertEqual(0, res.status, msg="openssh-sshd was not removed from image") + + def test_rm_old_image(self): + bitbake("core-image-minimal") + deploydir = get_bb_var("DEPLOY_DIR_IMAGE", target="core-image-minimal") + imagename = get_bb_var("IMAGE_LINK_NAME", target="core-image-minimal") + deploydir_files = os.listdir(deploydir) + track_original_files = [] + for image_file in deploydir_files: + if imagename in image_file and os.path.islink(os.path.join(deploydir, image_file)): + track_original_files.append(os.path.realpath(os.path.join(deploydir, image_file))) + self.append_config("RM_OLD_IMAGE = \"1\"") + bitbake("-C rootfs core-image-minimal") + deploydir_files = os.listdir(deploydir) + remaining_not_expected = [path for path in track_original_files if os.path.basename(path) in deploydir_files] + self.assertFalse(remaining_not_expected, msg="\nThe following image files ware not removed: %s" % ', '.join(map(str, remaining_not_expected))) + + def test_ccache_tool(self): + bitbake("ccache-native") + self.assertTrue(os.path.isfile(os.path.join(get_bb_var('STAGING_BINDIR_NATIVE', 'ccache-native'), "ccache"))) + self.write_config('INHERIT += "ccache"') + bitbake("m4 -c cleansstate") + bitbake("m4 -c compile") + res = runCmd("grep ccache %s" % (os.path.join(get_bb_var("WORKDIR","m4"),"temp/log.do_compile")), ignore_status=True) + self.assertEqual(0, res.status, msg="No match for ccache in m4 log.do_compile") + bitbake("ccache-native -ccleansstate") + + +class DiskMonTest(oeSelfTest): + + def test_stoptask_behavior(self): + result = runCmd("df -Pk %s" % os.getcwd()) + size = result.output.split("\n")[1].split()[3] + self.write_config('BB_DISKMON_DIRS = "STOPTASKS,${TMPDIR},%sK,4510K"' % size) + res = bitbake("m4", ignore_status = True) + self.assertTrue('ERROR: No new tasks can be executed since the disk space monitor action is "STOPTASKS"!' in res.output) + self.assertEqual(res.status, 1) + self.write_config('BB_DISKMON_DIRS = "ABORT,${TMPDIR},%sK,4510K"' % size) + res = bitbake("m4", ignore_status = True) + self.assertTrue('ERROR: Immediately abort since the disk space monitor action is "ABORT"!' in res.output) + self.assertEqual(res.status, 1) + self.write_config('BB_DISKMON_DIRS = "WARN,${TMPDIR},%sK,4510K"' % size) + res = bitbake("m4") + self.assertTrue('WARNING: The free space' in res.output) + +class SanityOptionsTest(oeSelfTest): + + def test_options_warnqa_errorqa_switch(self): + bitbake("xcursor-transparent-theme -ccleansstate") + + if "packages-list" not in get_bb_var("ERROR_QA"): + self.write_config("ERROR_QA_append = \" packages-list\"") + + self.write_recipeinc('xcursor-transparent-theme', 'PACKAGES += \"${PN}-dbg\"') + res = bitbake("xcursor-transparent-theme", ignore_status=True) + self.delete_recipeinc('xcursor-transparent-theme') + self.assertTrue("ERROR: QA Issue: xcursor-transparent-theme-dbg is listed in PACKAGES multiple times, this leads to packaging errors." in res.output) + self.assertEqual(res.status, 1) + self.write_recipeinc('xcursor-transparent-theme', 'PACKAGES += \"${PN}-dbg\"') + self.append_config('ERROR_QA_remove = "packages-list"') + self.append_config('WARN_QA_append = " packages-list"') + res = bitbake("xcursor-transparent-theme") + bitbake("xcursor-transparent-theme -ccleansstate") + self.delete_recipeinc('xcursor-transparent-theme') + self.assertTrue("WARNING: QA Issue: xcursor-transparent-theme-dbg is listed in PACKAGES multiple times, this leads to packaging errors." in res.output) + + def test_sanity_userspace_dependency(self): + self.append_config('WARN_QA_append = " unsafe-references-in-binaries unsafe-references-in-scripts"') + bitbake("-ccleansstate gzip nfs-utils") + res = bitbake("gzip nfs-utils") + self.assertTrue("WARNING: QA Issue: gzip" in res.output) + self.assertTrue("WARNING: QA Issue: nfs-utils" in res.output) + +class BuildhistoryTests(BuildhistoryBase): + + def test_buildhistory_basic(self): + self.run_buildhistory_operation('xcursor-transparent-theme') + self.assertTrue(os.path.isdir(get_bb_var('BUILDHISTORY_DIR'))) + + def test_buildhistory_buildtime_pr_backwards(self): + self.add_command_to_tearDown('cleanup-workdir') + target = 'xcursor-transparent-theme' + error = "ERROR: QA Issue: Package version for package %s went backwards which would break package feeds from (.*-r1 to .*-r0)" % target + self.run_buildhistory_operation(target, target_config="PR = \"r1\"", change_bh_location=True) + self.run_buildhistory_operation(target, target_config="PR = \"r0\"", change_bh_location=False, expect_error=True, error_regex=error) + + + + + + diff --git a/meta/lib/oeqa/selftest/oescripts.py b/meta/lib/oeqa/selftest/oescripts.py new file mode 100644 index 0000000000..4aab2ed095 --- /dev/null +++ b/meta/lib/oeqa/selftest/oescripts.py @@ -0,0 +1,60 @@ +import datetime +import unittest +import os +import re +import shutil + +import oeqa.utils.ftools as ftools +from oeqa.selftest.base import oeSelfTest +from oeqa.selftest.buildhistory import BuildhistoryBase +from oeqa.utils.commands import Command, runCmd, bitbake, get_bb_var, get_test_layer + +class TestScripts(oeSelfTest): + + def test_cleanup_workdir(self): + path = os.path.dirname(get_bb_var('WORKDIR', 'gzip')) + old_version_recipe = os.path.join(get_bb_var('COREBASE'), 'meta/recipes-extended/gzip/gzip_1.3.12.bb') + old_version = '1.3.12' + bitbake("-ccleansstate gzip") + bitbake("-ccleansstate -b %s" % old_version_recipe) + if os.path.exists(get_bb_var('WORKDIR', "-b %s" % old_version_recipe)): + shutil.rmtree(get_bb_var('WORKDIR', "-b %s" % old_version_recipe)) + if os.path.exists(get_bb_var('WORKDIR', 'gzip')): + shutil.rmtree(get_bb_var('WORKDIR', 'gzip')) + + if os.path.exists(path): + initial_contents = os.listdir(path) + else: + initial_contents = [] + + bitbake('gzip') + intermediary_contents = os.listdir(path) + bitbake("-b %s" % old_version_recipe) + runCmd('cleanup-workdir') + remaining_contents = os.listdir(path) + + expected_contents = [x for x in intermediary_contents if x not in initial_contents] + remaining_not_expected = [x for x in remaining_contents if x not in expected_contents] + self.assertFalse(remaining_not_expected, msg="Not all necessary content has been deleted from %s: %s" % (path, ', '.join(map(str, remaining_not_expected)))) + expected_not_remaining = [x for x in expected_contents if x not in remaining_contents] + self.assertFalse(expected_not_remaining, msg="The script removed extra contents from %s: %s" % (path, ', '.join(map(str, expected_not_remaining)))) + +class BuildhistoryDiffTests(BuildhistoryBase): + + def test_buildhistory_diff(self): + self.add_command_to_tearDown('cleanup-workdir') + target = 'xcursor-transparent-theme' + self.run_buildhistory_operation(target, target_config="PR = \"r1\"", change_bh_location=True) + self.run_buildhistory_operation(target, target_config="PR = \"r0\"", change_bh_location=False, expect_error=True) + result = runCmd("buildhistory-diff -p %s" % get_bb_var('BUILDHISTORY_DIR')) + expected_output = 'PR changed from "r1" to "r0"' + self.assertTrue(expected_output in result.output, msg="Did not find expected output: %s" % result.output) + + + + + + + + + diff --git a/meta/lib/oeqa/selftest/prservice.py b/meta/lib/oeqa/selftest/prservice.py new file mode 100644 index 0000000000..789c05f1e5 --- /dev/null +++ b/meta/lib/oeqa/selftest/prservice.py @@ -0,0 +1,113 @@ +import unittest +import os +import logging +import re +import shutil +import datetime + +import oeqa.utils.ftools as ftools +from oeqa.selftest.base import oeSelfTest +from oeqa.utils.commands import runCmd, bitbake, get_bb_var + +class BitbakePrTests(oeSelfTest): + + def get_pr_version(self, package_name): + pkgdata_dir = get_bb_var('PKGDATA_DIR') + package_data_file = os.path.join(pkgdata_dir, 'runtime', package_name) + package_data = ftools.read_file(package_data_file) + find_pr = re.search("PKGR: r[0-9]+\.([0-9]+)", package_data) + self.assertTrue(find_pr) + return int(find_pr.group(1)) + + def get_task_stamp(self, package_name, recipe_task): + stampdata = get_bb_var('STAMP', target=package_name).split('/') + prefix = stampdata[-1] + package_stamps_path = "/".join(stampdata[:-1]) + stamps = [] + for stamp in os.listdir(package_stamps_path): + find_stamp = re.match("%s\.%s\.([a-z0-9]{32})" % (prefix, recipe_task), stamp) + if find_stamp: + stamps.append(find_stamp.group(1)) + self.assertFalse(len(stamps) == 0, msg="Cound not find stamp for task %s for recipe %s" % (recipe_task, package_name)) + self.assertFalse(len(stamps) > 1, msg="Found multiple %s stamps for the %s recipe in the %s directory." % (recipe_task, package_name, package_stamps_path)) + return str(stamps[0]) + + def increment_package_pr(self, package_name): + inc_data = "do_package_append() {\nbb.build.exec_func('do_test_prserv', d)\n}\ndo_test_prserv() {\necho \"The current date is: %s\"\n}" % datetime.datetime.now() + self.write_recipeinc(package_name, inc_data) + bitbake("-ccleansstate %s" % package_name) + res = bitbake(package_name, ignore_status=True) + self.delete_recipeinc(package_name) + self.assertEqual(res.status, 0, msg=res.output) + self.assertTrue("NOTE: Started PRServer with DBfile" in res.output, msg=res.output) + + def config_pr_tests(self, package_name, package_type='rpm', pr_socket='localhost:0'): + config_package_data = 'PACKAGE_CLASSES = "package_%s"' % package_type + self.write_config(config_package_data) + config_server_data = 'PRSERV_HOST = "%s"' % pr_socket + self.append_config(config_server_data) + + def run_test_pr_service(self, package_name, package_type='rpm', track_task='do_package', pr_socket='localhost:0'): + self.config_pr_tests(package_name, package_type, pr_socket) + + self.increment_package_pr(package_name) + pr_1 = self.get_pr_version(package_name) + stamp_1 = self.get_task_stamp(package_name, track_task) + + self.increment_package_pr(package_name) + pr_2 = self.get_pr_version(package_name) + stamp_2 = self.get_task_stamp(package_name, track_task) + + bitbake("-ccleansstate %s" % package_name) + self.assertTrue(pr_2 - pr_1 == 1) + self.assertTrue(stamp_1 != stamp_2) + + def run_test_pr_export_import(self, package_name, replace_current_db=True): + self.config_pr_tests(package_name) + + self.increment_package_pr(package_name) + pr_1 = self.get_pr_version(package_name) + + exported_db_path = os.path.join(self.builddir, 'export.inc') + export_result = runCmd("bitbake-prserv-tool export %s" % exported_db_path, ignore_status=True) + self.assertEqual(export_result.status, 0, msg="PR Service database export failed: %s" % export_result.output) + + if replace_current_db: + current_db_path = os.path.join(get_bb_var('PERSISTENT_DIR'), 'prserv.sqlite3') + self.assertTrue(os.path.exists(current_db_path), msg="Path to current PR Service database is invalid: %s" % current_db_path) + os.remove(current_db_path) + + import_result = runCmd("bitbake-prserv-tool import %s" % exported_db_path, ignore_status=True) + os.remove(exported_db_path) + self.assertEqual(import_result.status, 0, msg="PR Service database import failed: %s" % import_result.output) + + self.increment_package_pr(package_name) + pr_2 = self.get_pr_version(package_name) + + bitbake("-ccleansstate %s" % package_name) + self.assertTrue(pr_2 - pr_1 == 1) + + + def test_import_export_replace_db(self): + self.run_test_pr_export_import('m4') + + def test_import_export_override_db(self): + self.run_test_pr_export_import('m4', replace_current_db=False) + + def test_pr_service_rpm_arch_dep(self): + self.run_test_pr_service('m4', 'rpm', 'do_package') + + def test_pr_service_deb_arch_dep(self): + self.run_test_pr_service('m4', 'deb', 'do_package') + + def test_pr_service_ipk_arch_dep(self): + self.run_test_pr_service('m4', 'ipk', 'do_package') + + def test_pr_service_rpm_arch_indep(self): + self.run_test_pr_service('xcursor-transparent-theme', 'rpm', 'do_package') + + def test_pr_service_deb_arch_indep(self): + self.run_test_pr_service('xcursor-transparent-theme', 'deb', 'do_package') + + def test_pr_service_ipk_arch_indep(self): + self.run_test_pr_service('xcursor-transparent-theme', 'ipk', 'do_package') diff --git a/meta/lib/oeqa/selftest/sstate.py b/meta/lib/oeqa/selftest/sstate.py new file mode 100644 index 0000000000..5989724432 --- /dev/null +++ b/meta/lib/oeqa/selftest/sstate.py @@ -0,0 +1,53 @@ +import datetime +import unittest +import os +import re +import shutil + +import oeqa.utils.ftools as ftools +from oeqa.selftest.base import oeSelfTest +from oeqa.utils.commands import runCmd, bitbake, get_bb_var, get_test_layer + + +class SStateBase(oeSelfTest): + + def setUpLocal(self): + self.temp_sstate_location = None + self.sstate_path = get_bb_var('SSTATE_DIR') + self.distro = get_bb_var('NATIVELSBSTRING') + self.distro_specific_sstate = os.path.join(self.sstate_path, self.distro) + + # Creates a special sstate configuration with the option to add sstate mirrors + def config_sstate(self, temp_sstate_location=False, add_local_mirrors=[]): + self.temp_sstate_location = temp_sstate_location + + if self.temp_sstate_location: + temp_sstate_path = os.path.join(self.builddir, "temp_sstate_%s" % datetime.datetime.now().strftime('%Y%m%d%H%M%S')) + config_temp_sstate = "SSTATE_DIR = \"%s\"" % temp_sstate_path + self.append_config(config_temp_sstate) + self.track_for_cleanup(temp_sstate_path) + self.sstate_path = get_bb_var('SSTATE_DIR') + self.distro = get_bb_var('NATIVELSBSTRING') + self.distro_specific_sstate = os.path.join(self.sstate_path, self.distro) + + if add_local_mirrors: + config_set_sstate_if_not_set = 'SSTATE_MIRRORS ?= ""' + self.append_config(config_set_sstate_if_not_set) + for local_mirror in add_local_mirrors: + self.assertFalse(os.path.join(local_mirror) == os.path.join(self.sstate_path), msg='Cannot add the current sstate path as a sstate mirror') + config_sstate_mirror = "SSTATE_MIRRORS += \"file://.* file:///%s/PATH\"" % local_mirror + self.append_config(config_sstate_mirror) + + # Returns a list containing sstate files + def search_sstate(self, filename_regex, distro_specific=True, distro_nonspecific=True): + result = [] + for root, dirs, files in os.walk(self.sstate_path): + if distro_specific and re.search("%s/[a-z0-9]{2}$" % self.distro, root): + for f in files: + if re.search(filename_regex, f): + result.append(f) + if distro_nonspecific and re.search("%s/[a-z0-9]{2}$" % self.sstate_path, root): + for f in files: + if re.search(filename_regex, f): + result.append(f) + return result diff --git a/meta/lib/oeqa/selftest/sstatetests.py b/meta/lib/oeqa/selftest/sstatetests.py new file mode 100644 index 0000000000..35ff28b04a --- /dev/null +++ b/meta/lib/oeqa/selftest/sstatetests.py @@ -0,0 +1,193 @@ +import datetime +import unittest +import os +import re +import shutil + +import oeqa.utils.ftools as ftools +from oeqa.selftest.base import oeSelfTest +from oeqa.utils.commands import runCmd, bitbake, get_bb_var, get_test_layer +from oeqa.selftest.sstate import SStateBase + + +class SStateTests(SStateBase): + + # Test sstate files creation and their location + def run_test_sstate_creation(self, targets, distro_specific=True, distro_nonspecific=True, temp_sstate_location=True, should_pass=True): + self.config_sstate(temp_sstate_location) + + if self.temp_sstate_location: + bitbake(['-cclean'] + targets) + else: + bitbake(['-ccleansstate'] + targets) + + bitbake(targets) + file_tracker = self.search_sstate('|'.join(map(str, targets)), distro_specific, distro_nonspecific) + if should_pass: + self.assertTrue(file_tracker , msg="Could not find sstate files for: %s" % ', '.join(map(str, targets))) + else: + self.assertTrue(not file_tracker , msg="Found sstate files in the wrong place for: %s" % ', '.join(map(str, targets))) + + def test_sstate_creation_distro_specific_pass(self): + self.run_test_sstate_creation(['binutils-cross', 'binutils-native'], distro_specific=True, distro_nonspecific=False, temp_sstate_location=True) + + def test_sstate_creation_distro_specific_fail(self): + self.run_test_sstate_creation(['binutils-cross', 'binutils-native'], distro_specific=False, distro_nonspecific=True, temp_sstate_location=True, should_pass=False) + + def test_sstate_creation_distro_nonspecific_pass(self): + self.run_test_sstate_creation(['eglibc-initial'], distro_specific=False, distro_nonspecific=True, temp_sstate_location=True) + + def test_sstate_creation_distro_nonspecific_fail(self): + self.run_test_sstate_creation(['eglibc-initial'], distro_specific=True, distro_nonspecific=False, temp_sstate_location=True, should_pass=False) + + + # Test the sstate files deletion part of the do_cleansstate task + def run_test_cleansstate_task(self, targets, distro_specific=True, distro_nonspecific=True, temp_sstate_location=True): + self.config_sstate(temp_sstate_location) + + bitbake(['-ccleansstate'] + targets) + + bitbake(targets) + tgz_created = self.search_sstate('|'.join(map(str, [s + '.*?\.tgz$' for s in targets])), distro_specific, distro_nonspecific) + self.assertTrue(tgz_created, msg="Could not find sstate .tgz files for: %s" % ', '.join(map(str, targets))) + + siginfo_created = self.search_sstate('|'.join(map(str, [s + '.*?\.siginfo$' for s in targets])), distro_specific, distro_nonspecific) + self.assertTrue(siginfo_created, msg="Could not find sstate .siginfo files for: %s" % ', '.join(map(str, targets))) + + bitbake(['-ccleansstate'] + targets) + tgz_removed = self.search_sstate('|'.join(map(str, [s + '.*?\.tgz$' for s in targets])), distro_specific, distro_nonspecific) + self.assertTrue(not tgz_removed, msg="do_cleansstate didn't remove .tgz sstate files for: %s" % ', '.join(map(str, targets))) + + def test_cleansstate_task_distro_specific_nonspecific(self): + self.run_test_cleansstate_task(['binutils-cross', 'binutils-native', 'eglibc-initial'], distro_specific=True, distro_nonspecific=True, temp_sstate_location=True) + + def test_cleansstate_task_distro_nonspecific(self): + self.run_test_cleansstate_task(['eglibc-initial'], distro_specific=False, distro_nonspecific=True, temp_sstate_location=True) + + def test_cleansstate_task_distro_specific(self): + self.run_test_cleansstate_task(['binutils-cross', 'binutils-native', 'eglibc-initial'], distro_specific=True, distro_nonspecific=False, temp_sstate_location=True) + + + # Test rebuilding of distro-specific sstate files + def run_test_rebuild_distro_specific_sstate(self, targets, temp_sstate_location=True): + self.config_sstate(temp_sstate_location) + + bitbake(['-ccleansstate'] + targets) + + bitbake(targets) + self.assertTrue(self.search_sstate('|'.join(map(str, [s + '.*?\.tgz$' for s in targets])), distro_specific=False, distro_nonspecific=True) == [], msg="Found distro non-specific sstate for: %s" % ', '.join(map(str, targets))) + file_tracker_1 = self.search_sstate('|'.join(map(str, [s + '.*?\.tgz$' for s in targets])), distro_specific=True, distro_nonspecific=False) + self.assertTrue(len(file_tracker_1) >= len(targets), msg = "Not all sstate files ware created for: %s" % ', '.join(map(str, targets))) + + self.track_for_cleanup(self.distro_specific_sstate + "_old") + shutil.copytree(self.distro_specific_sstate, self.distro_specific_sstate + "_old") + shutil.rmtree(self.distro_specific_sstate) + + bitbake(['-cclean'] + targets) + bitbake(targets) + file_tracker_2 = self.search_sstate('|'.join(map(str, [s + '.*?\.tgz$' for s in targets])), distro_specific=True, distro_nonspecific=False) + self.assertTrue(len(file_tracker_2) >= len(targets), msg = "Not all sstate files ware created for: %s" % ', '.join(map(str, targets))) + + not_recreated = [x for x in file_tracker_1 if x not in file_tracker_2] + self.assertTrue(not_recreated == [], msg="The following sstate files ware not recreated: %s" % ', '.join(map(str, not_recreated))) + + created_once = [x for x in file_tracker_2 if x not in file_tracker_1] + self.assertTrue(created_once == [], msg="The following sstate files ware created only in the second run: %s" % ', '.join(map(str, created_once))) + + def test_rebuild_distro_specific_sstate_cross_native_targets(self): + self.run_test_rebuild_distro_specific_sstate(['binutils-cross', 'binutils-native'], temp_sstate_location=True) + + def test_rebuild_distro_specific_sstate_cross_target(self): + self.run_test_rebuild_distro_specific_sstate(['binutils-cross'], temp_sstate_location=True) + + def test_rebuild_distro_specific_sstate_native_target(self): + self.run_test_rebuild_distro_specific_sstate(['binutils-native'], temp_sstate_location=True) + + + # Test the sstate-cache-management script. Each element in the global_config list is used with the corresponding element in the target_config list + # global_config elements are expected to not generate any sstate files that would be removed by sstate-cache-management.sh (such as changing the value of MACHINE) + def run_test_sstate_cache_management_script(self, target, global_config=[''], target_config=[''], ignore_patterns=[]): + self.assertTrue(global_config) + self.assertTrue(target_config) + self.assertTrue(len(global_config) == len(target_config), msg='Lists global_config and target_config should have the same number of elements') + self.config_sstate(temp_sstate_location=True, add_local_mirrors=[self.sstate_path]) + + # If buildhistory is enabled, we need to disable version-going-backwards QA checks for this test. It may report errors otherwise. + if ('buildhistory' in get_bb_var('USER_CLASSES')) or ('buildhistory' in get_bb_var('INHERIT')): + remove_errors_config = 'ERROR_QA_remove = "version-going-backwards"' + self.append_config(remove_errors_config) + + # For not this only checks if random sstate tasks are handled correctly as a group. + # In the future we should add control over what tasks we check for. + + sstate_archs_list = [] + expected_remaining_sstate = [] + for idx in range(len(target_config)): + self.append_config(global_config[idx]) + self.append_recipeinc(target, target_config[idx]) + sstate_arch = get_bb_var('SSTATE_PKGARCH', target) + if not sstate_arch in sstate_archs_list: + sstate_archs_list.append(sstate_arch) + if target_config[idx] == target_config[-1]: + target_sstate_before_build = self.search_sstate(target + '.*?\.tgz$') + bitbake("-cclean %s" % target) + result = bitbake(target, ignore_status=True) + if target_config[idx] == target_config[-1]: + target_sstate_after_build = self.search_sstate(target + '.*?\.tgz$') + expected_remaining_sstate += [x for x in target_sstate_after_build if x not in target_sstate_before_build if not any(pattern in x for pattern in ignore_patterns)] + self.remove_config(global_config[idx]) + self.remove_recipeinc(target, target_config[idx]) + self.assertEqual(result.status, 0) + + runCmd("sstate-cache-management.sh -y --cache-dir=%s --remove-duplicated --extra-archs=%s" % (self.sstate_path, ','.join(map(str, sstate_archs_list)))) + actual_remaining_sstate = [x for x in self.search_sstate(target + '.*?\.tgz$') if not any(pattern in x for pattern in ignore_patterns)] + + actual_not_expected = [x for x in actual_remaining_sstate if x not in expected_remaining_sstate] + self.assertFalse(actual_not_expected, msg="Files should have been removed but ware not: %s" % ', '.join(map(str, actual_not_expected))) + expected_not_actual = [x for x in expected_remaining_sstate if x not in actual_remaining_sstate] + self.assertFalse(expected_not_actual, msg="Extra files ware removed: %s" ', '.join(map(str, expected_not_actual))) + + + def test_sstate_cache_management_script_using_pr_1(self): + global_config = [] + target_config = [] + global_config.append('') + target_config.append('PR = "0"') + self.run_test_sstate_cache_management_script('m4', global_config, target_config, ignore_patterns=['populate_lic']) + + def test_sstate_cache_management_script_using_pr_2(self): + global_config = [] + target_config = [] + global_config.append('') + target_config.append('PR = "0"') + global_config.append('') + target_config.append('PR = "1"') + self.run_test_sstate_cache_management_script('m4', global_config, target_config, ignore_patterns=['populate_lic']) + + def test_sstate_cache_management_script_using_pr_3(self): + global_config = [] + target_config = [] + global_config.append('MACHINE = "qemux86-64"') + target_config.append('PR = "0"') + global_config.append(global_config[0]) + target_config.append('PR = "1"') + global_config.append('MACHINE = "qemux86"') + target_config.append('PR = "1"') + self.run_test_sstate_cache_management_script('m4', global_config, target_config, ignore_patterns=['populate_lic']) + + def test_sstate_cache_management_script_using_machine(self): + global_config = [] + target_config = [] + global_config.append('MACHINE = "qemux86-64"') + target_config.append('') + global_config.append('MACHINE = "qemux86"') + target_config.append('') + self.run_test_sstate_cache_management_script('m4', global_config, target_config, ignore_patterns=['populate_lic']) + + + + + + + + diff --git a/meta/lib/oeqa/targetcontrol.py b/meta/lib/oeqa/targetcontrol.py new file mode 100644 index 0000000000..873a66457a --- /dev/null +++ b/meta/lib/oeqa/targetcontrol.py @@ -0,0 +1,175 @@ +# Copyright (C) 2013 Intel Corporation +# +# Released under the MIT license (see COPYING.MIT) + +# This module is used by testimage.bbclass for setting up and controlling a target machine. + +import os +import shutil +import subprocess +import bb +import traceback +import sys +from oeqa.utils.sshcontrol import SSHControl +from oeqa.utils.qemurunner import QemuRunner +from oeqa.controllers.testtargetloader import TestTargetLoader +from abc import ABCMeta, abstractmethod + +def get_target_controller(d): + testtarget = d.getVar("TEST_TARGET", True) + # old, simple names + if testtarget == "qemu": + return QemuTarget(d) + elif testtarget == "simpleremote": + return SimpleRemoteTarget(d) + else: + # use the class name + try: + # is it a core class defined here? + controller = getattr(sys.modules[__name__], testtarget) + except AttributeError: + # nope, perhaps a layer defined one + try: + bbpath = d.getVar("BBPATH", True).split(':') + testtargetloader = TestTargetLoader() + controller = testtargetloader.get_controller_module(testtarget, bbpath) + except ImportError as e: + bb.fatal("Failed to import {0} from available controller modules:\n{1}".format(testtarget,traceback.format_exc())) + except AttributeError as e: + bb.fatal("Invalid TEST_TARGET - " + str(e)) + return controller(d) + + +class BaseTarget(object): + + __metaclass__ = ABCMeta + + def __init__(self, d): + self.connection = None + self.ip = None + self.server_ip = None + self.datetime = d.getVar('DATETIME', True) + self.testdir = d.getVar("TEST_LOG_DIR", True) + self.pn = d.getVar("PN", True) + + @abstractmethod + def deploy(self): + + self.sshlog = os.path.join(self.testdir, "ssh_target_log.%s" % self.datetime) + sshloglink = os.path.join(self.testdir, "ssh_target_log") + if os.path.islink(sshloglink): + os.unlink(sshloglink) + os.symlink(self.sshlog, sshloglink) + bb.note("SSH log file: %s" % self.sshlog) + + @abstractmethod + def start(self, params=None): + pass + + @abstractmethod + def stop(self): + pass + + @abstractmethod + def restart(self, params=None): + pass + + def run(self, cmd, timeout=None): + return self.connection.run(cmd, timeout) + + def copy_to(self, localpath, remotepath): + return self.connection.copy_to(localpath, remotepath) + + def copy_from(self, remotepath, localpath): + return self.connection.copy_from(remotepath, localpath) + + + +class QemuTarget(BaseTarget): + + def __init__(self, d): + + super(QemuTarget, self).__init__(d) + + self.qemulog = os.path.join(self.testdir, "qemu_boot_log.%s" % self.datetime) + self.origrootfs = os.path.join(d.getVar("DEPLOY_DIR_IMAGE", True), d.getVar("IMAGE_LINK_NAME", True) + '.ext3') + self.rootfs = os.path.join(self.testdir, d.getVar("IMAGE_LINK_NAME", True) + '-testimage.ext3') + + self.runner = QemuRunner(machine=d.getVar("MACHINE", True), + rootfs=self.rootfs, + tmpdir = d.getVar("TMPDIR", True), + deploy_dir_image = d.getVar("DEPLOY_DIR_IMAGE", True), + display = d.getVar("BB_ORIGENV", False).getVar("DISPLAY", True), + logfile = self.qemulog, + boottime = int(d.getVar("TEST_QEMUBOOT_TIMEOUT", True))) + + def deploy(self): + try: + shutil.copyfile(self.origrootfs, self.rootfs) + except Exception as e: + bb.fatal("Error copying rootfs: %s" % e) + + qemuloglink = os.path.join(self.testdir, "qemu_boot_log") + if os.path.islink(qemuloglink): + os.unlink(qemuloglink) + os.symlink(self.qemulog, qemuloglink) + + bb.note("rootfs file: %s" % self.rootfs) + bb.note("Qemu log file: %s" % self.qemulog) + super(QemuTarget, self).deploy() + + def start(self, params=None): + if self.runner.start(params): + self.ip = self.runner.ip + self.server_ip = self.runner.server_ip + self.connection = SSHControl(ip=self.ip, logfile=self.sshlog) + else: + raise bb.build.FuncFailed("%s - FAILED to start qemu - check the task log and the boot log" % self.pn) + + def stop(self): + self.runner.stop() + self.connection = None + self.ip = None + self.server_ip = None + + def restart(self, params=None): + if self.runner.restart(params): + self.ip = self.runner.ip + self.server_ip = self.runner.server_ip + self.connection = SSHControl(ip=self.ip, logfile=self.sshlog) + else: + raise bb.build.FuncFailed("%s - FAILED to re-start qemu - check the task log and the boot log" % self.pn) + + +class SimpleRemoteTarget(BaseTarget): + + def __init__(self, d): + super(SimpleRemoteTarget, self).__init__(d) + addr = d.getVar("TEST_TARGET_IP", True) or bb.fatal('Please set TEST_TARGET_IP with the IP address of the machine you want to run the tests on.') + self.ip = addr.split(":")[0] + try: + self.port = addr.split(":")[1] + except IndexError: + self.port = None + bb.note("Target IP: %s" % self.ip) + self.server_ip = d.getVar("TEST_SERVER_IP", True) + if not self.server_ip: + try: + self.server_ip = subprocess.check_output(['ip', 'route', 'get', self.ip ]).split("\n")[0].split()[-1] + except Exception as e: + bb.fatal("Failed to determine the host IP address (alternatively you can set TEST_SERVER_IP with the IP address of this machine): %s" % e) + bb.note("Server IP: %s" % self.server_ip) + + def deploy(self): + super(SimpleRemoteTarget, self).deploy() + + def start(self, params=None): + self.connection = SSHControl(self.ip, logfile=self.sshlog, port=self.port) + + def stop(self): + self.connection = None + self.ip = None + self.server_ip = None + + def restart(self, params=None): + pass diff --git a/meta/lib/oeqa/utils/__init__.py b/meta/lib/oeqa/utils/__init__.py new file mode 100644 index 0000000000..8eda92763c --- /dev/null +++ b/meta/lib/oeqa/utils/__init__.py @@ -0,0 +1,3 @@ +# Enable other layers to have modules in the same named directory +from pkgutil import extend_path +__path__ = extend_path(__path__, __name__) diff --git a/meta/lib/oeqa/utils/commands.py b/meta/lib/oeqa/utils/commands.py new file mode 100644 index 0000000000..9b42620610 --- /dev/null +++ b/meta/lib/oeqa/utils/commands.py @@ -0,0 +1,137 @@ +# Copyright (c) 2013 Intel Corporation +# +# Released under the MIT license (see COPYING.MIT) + +# DESCRIPTION +# This module is mainly used by scripts/oe-selftest and modules under meta/oeqa/selftest +# It provides a class and methods for running commands on the host in a convienent way for tests. + + + +import os +import sys +import signal +import subprocess +import threading +import logging + +class Command(object): + def __init__(self, command, bg=False, timeout=None, data=None, **options): + + self.defaultopts = { + "stdout": subprocess.PIPE, + "stderr": subprocess.STDOUT, + "stdin": None, + "shell": False, + "bufsize": -1, + } + + self.cmd = command + self.bg = bg + self.timeout = timeout + self.data = data + + self.options = dict(self.defaultopts) + if isinstance(self.cmd, basestring): + self.options["shell"] = True + if self.data: + self.options['stdin'] = subprocess.PIPE + self.options.update(options) + + self.status = None + self.output = None + self.error = None + self.thread = None + + self.log = logging.getLogger("utils.commands") + + def run(self): + self.process = subprocess.Popen(self.cmd, **self.options) + + def commThread(): + self.output, self.error = self.process.communicate(self.data) + + self.thread = threading.Thread(target=commThread) + self.thread.start() + + self.log.debug("Running command '%s'" % self.cmd) + + if not self.bg: + self.thread.join(self.timeout) + self.stop() + + def stop(self): + if self.thread.isAlive(): + self.process.terminate() + # let's give it more time to terminate gracefully before killing it + self.thread.join(5) + if self.thread.isAlive(): + self.process.kill() + self.thread.join() + + self.output = self.output.rstrip() + self.status = self.process.poll() + + self.log.debug("Command '%s' returned %d as exit code." % (self.cmd, self.status)) + # logging the complete output is insane + # bitbake -e output is really big + # and makes the log file useless + if self.status: + lout = "\n".join(self.output.splitlines()[-20:]) + self.log.debug("Last 20 lines:\n%s" % lout) + + +class Result(object): + pass + +def runCmd(command, ignore_status=False, timeout=None, **options): + + result = Result() + + cmd = Command(command, timeout=timeout, **options) + cmd.run() + + result.command = command + result.status = cmd.status + result.output = cmd.output + result.pid = cmd.process.pid + + if result.status and not ignore_status: + raise AssertionError("Command '%s' returned non-zero exit status %d:\n%s" % (command, result.status, result.output)) + + return result + + +def bitbake(command, ignore_status=False, timeout=None, **options): + if isinstance(command, basestring): + cmd = "bitbake " + command + else: + cmd = [ "bitbake" ] + command + + return runCmd(cmd, ignore_status, timeout, **options) + + +def get_bb_env(target=None): + if target: + return runCmd("bitbake -e %s" % target).output + else: + return runCmd("bitbake -e").output + +def get_bb_var(var, target=None): + val = None + bbenv = get_bb_env(target) + for line in bbenv.splitlines(): + if line.startswith(var + "="): + val = line.split('=')[1] + val = val.replace('\"','') + break + return val + +def get_test_layer(): + layers = get_bb_var("BBLAYERS").split() + testlayer = None + for l in layers: + if "/meta-selftest" in l and os.path.isdir(l): + testlayer = l + break + return testlayer diff --git a/meta/lib/oeqa/utils/decorators.py b/meta/lib/oeqa/utils/decorators.py new file mode 100644 index 0000000000..b99da8d76d --- /dev/null +++ b/meta/lib/oeqa/utils/decorators.py @@ -0,0 +1,50 @@ +# Copyright (C) 2013 Intel Corporation +# +# Released under the MIT license (see COPYING.MIT) + +# Some custom decorators that can be used by unittests +# Most useful is skipUnlessPassed which can be used for +# creating dependecies between two test methods. + +from oeqa.oetest import * + +class skipIfFailure(object): + + def __init__(self,testcase): + self.testcase = testcase + + def __call__(self,f): + def wrapped_f(*args): + if self.testcase in (oeTest.testFailures or oeTest.testErrors): + raise unittest.SkipTest("Testcase dependency not met: %s" % self.testcase) + return f(*args) + wrapped_f.__name__ = f.__name__ + return wrapped_f + +class skipIfSkipped(object): + + def __init__(self,testcase): + self.testcase = testcase + + def __call__(self,f): + def wrapped_f(*args): + if self.testcase in oeTest.testSkipped: + raise unittest.SkipTest("Testcase dependency not met: %s" % self.testcase) + return f(*args) + wrapped_f.__name__ = f.__name__ + return wrapped_f + +class skipUnlessPassed(object): + + def __init__(self,testcase): + self.testcase = testcase + + def __call__(self,f): + def wrapped_f(*args): + if self.testcase in oeTest.testSkipped or \ + self.testcase in oeTest.testFailures or \ + self.testcase in oeTest.testErrors: + raise unittest.SkipTest("Testcase dependency not met: %s" % self.testcase) + return f(*args) + wrapped_f.__name__ = f.__name__ + return wrapped_f diff --git a/meta/lib/oeqa/utils/ftools.py b/meta/lib/oeqa/utils/ftools.py new file mode 100644 index 0000000000..64ebe3d217 --- /dev/null +++ b/meta/lib/oeqa/utils/ftools.py @@ -0,0 +1,27 @@ +import os +import re + +def write_file(path, data): + wdata = data.rstrip() + "\n" + with open(path, "w") as f: + f.write(wdata) + +def append_file(path, data): + wdata = data.rstrip() + "\n" + with open(path, "a") as f: + f.write(wdata) + +def read_file(path): + data = None + with open(path) as f: + data = f.read() + return data + +def remove_from_file(path, data): + lines = read_file(path).splitlines() + rmdata = data.strip().splitlines() + for l in rmdata: + for c in range(0, lines.count(l)): + i = lines.index(l) + del(lines[i]) + write_file(path, "\n".join(lines)) diff --git a/meta/lib/oeqa/utils/httpserver.py b/meta/lib/oeqa/utils/httpserver.py new file mode 100644 index 0000000000..f161a1bddd --- /dev/null +++ b/meta/lib/oeqa/utils/httpserver.py @@ -0,0 +1,33 @@ +import SimpleHTTPServer +import multiprocessing +import os + +class HTTPServer(SimpleHTTPServer.BaseHTTPServer.HTTPServer): + + def server_start(self, root_dir): + os.chdir(root_dir) + self.serve_forever() + +class HTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler): + + def log_message(self, format_str, *args): + pass + +class HTTPService(object): + + def __init__(self, root_dir, host=''): + self.root_dir = root_dir + self.host = host + self.port = 0 + + def start(self): + self.server = HTTPServer((self.host, self.port), HTTPRequestHandler) + if self.port == 0: + self.port = self.server.server_port + self.process = multiprocessing.Process(target=self.server.server_start, args=[self.root_dir]) + self.process.start() + + def stop(self): + self.server.server_close() + self.process.terminate() + self.process.join() diff --git a/meta/lib/oeqa/utils/qemurunner.py b/meta/lib/oeqa/utils/qemurunner.py new file mode 100644 index 0000000000..f1a7e24ab7 --- /dev/null +++ b/meta/lib/oeqa/utils/qemurunner.py @@ -0,0 +1,237 @@ +# Copyright (C) 2013 Intel Corporation +# +# Released under the MIT license (see COPYING.MIT) + +# This module provides a class for starting qemu images using runqemu. +# It's used by testimage.bbclass. + +import subprocess +import os +import time +import signal +import re +import socket +import select +import bb + +class QemuRunner: + + def __init__(self, machine, rootfs, display, tmpdir, deploy_dir_image, logfile, boottime): + + # Popen object for runqemu + self.runqemu = None + # pid of the qemu process that runqemu will start + self.qemupid = None + # target ip - from the command line + self.ip = None + # host ip - where qemu is running + self.server_ip = None + + self.machine = machine + self.rootfs = rootfs + self.display = display + self.tmpdir = tmpdir + self.deploy_dir_image = deploy_dir_image + self.logfile = logfile + self.boottime = boottime + + self.runqemutime = 60 + + self.create_socket() + + + def create_socket(self): + + self.bootlog = '' + self.qemusock = None + + try: + self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + self.server_socket.setblocking(0) + self.server_socket.bind(("127.0.0.1",0)) + self.server_socket.listen(2) + self.serverport = self.server_socket.getsockname()[1] + bb.note("Created listening socket for qemu serial console on: 127.0.0.1:%s" % self.serverport) + except socket.error, msg: + self.server_socket.close() + bb.fatal("Failed to create listening socket: %s" %msg[1]) + + + def log(self, msg): + if self.logfile: + with open(self.logfile, "a") as f: + f.write("%s" % msg) + + def start(self, qemuparams = None): + + if self.display: + os.environ["DISPLAY"] = self.display + else: + bb.error("To start qemu I need a X desktop, please set DISPLAY correctly (e.g. DISPLAY=:1)") + return False + if not os.path.exists(self.rootfs): + bb.error("Invalid rootfs %s" % self.rootfs) + return False + if not os.path.exists(self.tmpdir): + bb.error("Invalid TMPDIR path %s" % self.tmpdir) + return False + else: + os.environ["OE_TMPDIR"] = self.tmpdir + if not os.path.exists(self.deploy_dir_image): + bb.error("Invalid DEPLOY_DIR_IMAGE path %s" % self.deploy_dir_image) + return False + else: + os.environ["DEPLOY_DIR_IMAGE"] = self.deploy_dir_image + + # Set this flag so that Qemu doesn't do any grabs as SDL grabs interact + # badly with screensavers. + os.environ["QEMU_DONT_GRAB"] = "1" + self.qemuparams = 'bootparams="console=tty1 console=ttyS0,115200n8" qemuparams="-serial tcp:127.0.0.1:%s"' % self.serverport + if qemuparams: + self.qemuparams = self.qemuparams[:-1] + " " + qemuparams + " " + '\"' + + launch_cmd = 'runqemu %s %s %s' % (self.machine, self.rootfs, self.qemuparams) + self.runqemu = subprocess.Popen(launch_cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.STDOUT,preexec_fn=os.setpgrp) + + bb.note("runqemu started, pid is %s" % self.runqemu.pid) + bb.note("waiting at most %s seconds for qemu pid" % self.runqemutime) + endtime = time.time() + self.runqemutime + while not self.is_alive() and time.time() < endtime: + time.sleep(1) + + if self.is_alive(): + bb.note("qemu started - qemu procces pid is %s" % self.qemupid) + cmdline = '' + with open('/proc/%s/cmdline' % self.qemupid) as p: + cmdline = p.read() + ips = re.findall("((?:[0-9]{1,3}\.){3}[0-9]{1,3})", cmdline.split("ip=")[1]) + if not ips or len(ips) != 3: + bb.note("Couldn't get ip from qemu process arguments! Here is the qemu command line used: %s" % cmdline) + self.stop() + return False + else: + self.ip = ips[0] + self.server_ip = ips[1] + bb.note("Target IP: %s" % self.ip) + bb.note("Server IP: %s" % self.server_ip) + bb.note("Waiting at most %d seconds for login banner" % self.boottime ) + endtime = time.time() + self.boottime + socklist = [self.server_socket] + reachedlogin = False + stopread = False + while time.time() < endtime and not stopread: + sread, swrite, serror = select.select(socklist, [], [], 5) + for sock in sread: + if sock is self.server_socket: + self.qemusock, addr = self.server_socket.accept() + self.qemusock.setblocking(0) + socklist.append(self.qemusock) + socklist.remove(self.server_socket) + bb.note("Connection from %s:%s" % addr) + else: + data = sock.recv(1024) + if data: + self.log(data) + self.bootlog += data + if re.search("qemu.* login:", self.bootlog): + stopread = True + reachedlogin = True + bb.note("Reached login banner") + else: + socklist.remove(sock) + sock.close() + stopread = True + + if not reachedlogin: + bb.note("Target didn't reached login boot in %d seconds" % self.boottime) + lines = "\n".join(self.bootlog.splitlines()[-5:]) + bb.note("Last 5 lines of text:\n%s" % lines) + bb.note("Check full boot log: %s" % self.logfile) + self.stop() + return False + else: + bb.note("Qemu pid didn't appeared in %s seconds" % self.runqemutime) + output = self.runqemu.stdout + self.stop() + bb.note("Output from runqemu:\n%s" % output.read()) + return False + + return self.is_alive() + + def stop(self): + + if self.runqemu: + bb.note("Sending SIGTERM to runqemu") + os.killpg(self.runqemu.pid, signal.SIGTERM) + endtime = time.time() + self.runqemutime + while self.runqemu.poll() is None and time.time() < endtime: + time.sleep(1) + if self.runqemu.poll() is None: + bb.note("Sending SIGKILL to runqemu") + os.killpg(self.runqemu.pid, signal.SIGKILL) + self.runqemu = None + if self.server_socket: + self.server_socket.close() + self.server_socket = None + self.qemupid = None + self.ip = None + + def restart(self, qemuparams = None): + bb.note("Restarting qemu process") + if self.runqemu.poll() is None: + self.stop() + self.create_socket() + if self.start(qemuparams): + return True + return False + + def is_alive(self): + qemu_child = self.find_child(str(self.runqemu.pid)) + if qemu_child: + self.qemupid = qemu_child[0] + if os.path.exists("/proc/" + str(self.qemupid)): + return True + return False + + def find_child(self,parent_pid): + # + # Walk the process tree from the process specified looking for a qemu-system. Return its [pid'cmd] + # + ps = subprocess.Popen(['ps', 'axww', '-o', 'pid,ppid,command'], stdout=subprocess.PIPE).communicate()[0] + processes = ps.split('\n') + nfields = len(processes[0].split()) - 1 + pids = {} + commands = {} + for row in processes[1:]: + data = row.split(None, nfields) + if len(data) != 3: + continue + if data[1] not in pids: + pids[data[1]] = [] + + pids[data[1]].append(data[0]) + commands[data[0]] = data[2] + + if parent_pid not in pids: + return [] + + parents = [] + newparents = pids[parent_pid] + while newparents: + next = [] + for p in newparents: + if p in pids: + for n in pids[p]: + if n not in parents and n not in next: + next.append(n) + if p not in parents: + parents.append(p) + newparents = next + #print "Children matching %s:" % str(parents) + for p in parents: + # Need to be careful here since runqemu-internal runs "ldd qemu-system-xxxx" + # Also, old versions of ldd (2.11) run "LD_XXXX qemu-system-xxxx" + basecmd = commands[p].split()[0] + basecmd = os.path.basename(basecmd) + if "qemu-system" in basecmd and "-serial tcp" in commands[p]: + return [int(p),commands[p]] diff --git a/meta/lib/oeqa/utils/sshcontrol.py b/meta/lib/oeqa/utils/sshcontrol.py new file mode 100644 index 0000000000..d355d5e8e9 --- /dev/null +++ b/meta/lib/oeqa/utils/sshcontrol.py @@ -0,0 +1,127 @@ +# Copyright (C) 2013 Intel Corporation +# +# Released under the MIT license (see COPYING.MIT) + +# Provides a class for setting up ssh connections, +# running commands and copying files to/from a target. +# It's used by testimage.bbclass and tests in lib/oeqa/runtime. + +import subprocess +import time +import os +import select + + +class SSHProcess(object): + def __init__(self, **options): + + self.defaultopts = { + "stdout": subprocess.PIPE, + "stderr": subprocess.STDOUT, + "stdin": None, + "shell": False, + "bufsize": -1, + "preexec_fn": os.setsid, + } + self.options = dict(self.defaultopts) + self.options.update(options) + self.status = None + self.output = None + self.process = None + self.starttime = None + + def run(self, command, timeout=None): + self.starttime = time.time() + output = '' + self.process = subprocess.Popen(command, **self.options) + if timeout: + endtime = self.starttime + timeout + eof = False + while time.time() < endtime and not eof: + if select.select([self.process.stdout], [], [], 5)[0] != []: + data = os.read(self.process.stdout.fileno(), 1024) + if not data: + self.process.stdout.close() + eof = True + else: + output += data + endtime = time.time() + timeout + + # process hasn't returned yet + if not eof: + self.process.terminate() + time.sleep(5) + try: + self.process.kill() + except OSError: + pass + output += "\nProcess killed - no output for %d seconds. Total running time: %d seconds." % (timeout, time.time() - self.starttime) + else: + output = self.process.communicate()[0] + + self.status = self.process.wait() + self.output = output.rstrip() + return (self.status, self.output) + + +class SSHControl(object): + def __init__(self, ip, logfile=None, timeout=300, user='root', port=None): + self.ip = ip + self.defaulttimeout = timeout + self.ignore_status = True + self.logfile = logfile + self.user = user + self.ssh_options = [ + '-o', 'UserKnownHostsFile=/dev/null', + '-o', 'StrictHostKeyChecking=no', + '-o', 'LogLevel=ERROR' + ] + self.ssh = ['ssh', '-l', self.user ] + self.ssh_options + self.scp = ['scp'] + self.ssh_options + if port: + self.ssh = self.ssh + [ '-p', port ] + self.scp = self.scp + [ '-P', port ] + + def log(self, msg): + if self.logfile: + with open(self.logfile, "a") as f: + f.write("%s\n" % msg) + + def _internal_run(self, command, timeout=None, ignore_status = True): + self.log("[Running]$ %s" % " ".join(command)) + + proc = SSHProcess() + status, output = proc.run(command, timeout) + + self.log("%s" % output) + self.log("[Command returned '%d' after %.2f seconds]" % (status, time.time() - proc.starttime)) + + if status and not ignore_status: + raise AssertionError("Command '%s' returned non-zero exit status %d:\n%s" % (command, status, output)) + + return (status, output) + + def run(self, command, timeout=None): + """ + command - ssh command to run + timeout= - kill command if there is no output after seconds + timeout=None - kill command if there is no output after a default value seconds + timeout=0 - no timeout, let command run until it returns + """ + + # We need to source /etc/profile for a proper PATH on the target + command = self.ssh + [self.ip, ' . /etc/profile; ' + command] + + if timeout is None: + return self._internal_run(command, self.defaulttimeout, self.ignore_status) + if timeout == 0: + return self._internal_run(command, None, self.ignore_status) + return self._internal_run(command, timeout, self.ignore_status) + + def copy_to(self, localpath, remotepath): + command = self.scp + [localpath, '%s@%s:%s' % (self.user, self.ip, remotepath)] + return self._internal_run(command, ignore_status=False) + + def copy_from(self, remotepath, localpath): + command = self.scp + ['%s@%s:%s' % (self.user, self.ip, remotepath), localpath] + return self._internal_run(command, ignore_status=False) diff --git a/meta/lib/oeqa/utils/targetbuild.py b/meta/lib/oeqa/utils/targetbuild.py new file mode 100644 index 0000000000..32296762c0 --- /dev/null +++ b/meta/lib/oeqa/utils/targetbuild.py @@ -0,0 +1,68 @@ +# Copyright (C) 2013 Intel Corporation +# +# Released under the MIT license (see COPYING.MIT) + +# Provides a class for automating build tests for projects + +import os +import re +import subprocess + + +class TargetBuildProject(): + + def __init__(self, target, d, uri, foldername=None): + self.target = target + self.d = d + self.uri = uri + self.targetdir = "~/" + self.archive = os.path.basename(uri) + self.localarchive = "/tmp/" + self.archive + self.fname = re.sub(r'.tar.bz2|tar.gz$', '', self.archive) + if foldername: + self.fname = foldername + + def download_archive(self): + + exportvars = ['HTTP_PROXY', 'http_proxy', + 'HTTPS_PROXY', 'https_proxy', + 'FTP_PROXY', 'ftp_proxy', + 'FTPS_PROXY', 'ftps_proxy', + 'NO_PROXY', 'no_proxy', + 'ALL_PROXY', 'all_proxy', + 'SOCKS5_USER', 'SOCKS5_PASSWD'] + + cmd = '' + for var in exportvars: + val = self.d.getVar(var, True) + if val: + cmd = 'export ' + var + '=\"%s\"; %s' % (val, cmd) + + cmd = cmd + "wget -O %s %s" % (self.localarchive, self.uri) + subprocess.check_call(cmd, shell=True) + + (status, output) = self.target.copy_to(self.localarchive, self.targetdir) + if status != 0: + raise Exception("Failed to copy archive to target, output: %s" % output) + + (status, output) = self.target.run('tar xf %s%s -C %s' % (self.targetdir, self.archive, self.targetdir)) + if status != 0: + raise Exception("Failed to extract archive, output: %s" % output) + + #Change targetdir to project folder + self.targetdir = self.targetdir + self.fname + + # The timeout parameter of target.run is set to 0 to make the ssh command + # run with no timeout. + def run_configure(self): + return self.target.run('cd %s; ./configure' % self.targetdir, 0)[0] + + def run_make(self): + return self.target.run('cd %s; make' % self.targetdir, 0)[0] + + def run_install(self): + return self.target.run('cd %s; make install' % self.targetdir, 0)[0] + + def clean(self): + self.target.run('rm -rf %s' % self.targetdir) + subprocess.call('rm -f %s' % self.localarchive, shell=True) -- cgit v1.2.3-54-g00ecf