From 972dcfcdbfe75dcfeb777150c136576cf1a71e99 Mon Sep 17 00:00:00 2001 From: Tudor Florea Date: Fri, 9 Oct 2015 22:59:03 +0200 Subject: initial commit for Enea Linux 5.0 arm Signed-off-by: Tudor Florea --- meta/lib/oe/__init__.py | 2 + meta/lib/oe/buildhistory_analysis.py | 456 +++++++ meta/lib/oe/cachedpath.py | 233 ++++ meta/lib/oe/classextend.py | 118 ++ meta/lib/oe/classutils.py | 43 + meta/lib/oe/data.py | 17 + meta/lib/oe/distro_check.py | 383 ++++++ meta/lib/oe/image.py | 345 +++++ meta/lib/oe/license.py | 116 ++ meta/lib/oe/lsb.py | 81 ++ meta/lib/oe/maketype.py | 99 ++ meta/lib/oe/manifest.py | 345 +++++ meta/lib/oe/package.py | 99 ++ meta/lib/oe/package_manager.py | 1797 +++++++++++++++++++++++++ meta/lib/oe/packagedata.py | 94 ++ meta/lib/oe/packagegroup.py | 36 + meta/lib/oe/patch.py | 447 ++++++ meta/lib/oe/path.py | 243 ++++ meta/lib/oe/prservice.py | 126 ++ meta/lib/oe/qa.py | 111 ++ meta/lib/oe/rootfs.py | 800 +++++++++++ meta/lib/oe/sdk.py | 326 +++++ meta/lib/oe/sstatesig.py | 276 ++++ meta/lib/oe/terminal.py | 208 +++ meta/lib/oe/tests/__init__.py | 0 meta/lib/oe/tests/test_license.py | 68 + meta/lib/oe/tests/test_path.py | 89 ++ meta/lib/oe/tests/test_types.py | 62 + meta/lib/oe/tests/test_utils.py | 51 + meta/lib/oe/types.py | 153 +++ meta/lib/oe/utils.py | 182 +++ meta/lib/oeqa/__init__.py | 0 meta/lib/oeqa/controllers/__init__.py | 3 + meta/lib/oeqa/controllers/masterimage.py | 201 +++ meta/lib/oeqa/controllers/testtargetloader.py | 70 + meta/lib/oeqa/oetest.py | 106 ++ meta/lib/oeqa/runexported.py | 140 ++ meta/lib/oeqa/runtime/__init__.py | 3 + meta/lib/oeqa/runtime/_ptest.py | 124 ++ meta/lib/oeqa/runtime/buildcvs.py | 31 + meta/lib/oeqa/runtime/buildiptables.py | 31 + meta/lib/oeqa/runtime/buildsudoku.py | 28 + meta/lib/oeqa/runtime/connman.py | 30 + meta/lib/oeqa/runtime/date.py | 23 + meta/lib/oeqa/runtime/df.py | 12 + meta/lib/oeqa/runtime/dmesg.py | 12 + meta/lib/oeqa/runtime/files/hellomod.c | 19 + meta/lib/oeqa/runtime/files/hellomod_makefile | 8 + meta/lib/oeqa/runtime/files/test.c | 26 + meta/lib/oeqa/runtime/files/test.cpp | 3 + meta/lib/oeqa/runtime/files/test.pl | 2 + meta/lib/oeqa/runtime/files/test.py | 6 + meta/lib/oeqa/runtime/files/testmakefile | 5 + meta/lib/oeqa/runtime/gcc.py | 46 + meta/lib/oeqa/runtime/kernelmodule.py | 34 + meta/lib/oeqa/runtime/ldd.py | 20 + meta/lib/oeqa/runtime/logrotate.py | 28 + meta/lib/oeqa/runtime/multilib.py | 18 + meta/lib/oeqa/runtime/pam.py | 25 + meta/lib/oeqa/runtime/parselogs.py | 178 +++ meta/lib/oeqa/runtime/perl.py | 29 + meta/lib/oeqa/runtime/ping.py | 20 + meta/lib/oeqa/runtime/python.py | 34 + meta/lib/oeqa/runtime/rpm.py | 53 + meta/lib/oeqa/runtime/scanelf.py | 28 + meta/lib/oeqa/runtime/scp.py | 22 + meta/lib/oeqa/runtime/skeletoninit.py | 29 + meta/lib/oeqa/runtime/smart.py | 121 ++ meta/lib/oeqa/runtime/ssh.py | 19 + meta/lib/oeqa/runtime/syslog.py | 48 + meta/lib/oeqa/runtime/systemd.py | 88 ++ meta/lib/oeqa/runtime/vnc.py | 20 + meta/lib/oeqa/runtime/x32lib.py | 18 + meta/lib/oeqa/runtime/xorg.py | 17 + meta/lib/oeqa/sdk/__init__.py | 3 + meta/lib/oeqa/sdk/buildcvs.py | 25 + meta/lib/oeqa/sdk/buildiptables.py | 26 + meta/lib/oeqa/sdk/buildsudoku.py | 26 + meta/lib/oeqa/selftest/__init__.py | 2 + meta/lib/oeqa/selftest/_sstatetests_noauto.py | 95 ++ meta/lib/oeqa/selftest/_toaster.py | 445 ++++++ meta/lib/oeqa/selftest/base.py | 131 ++ meta/lib/oeqa/selftest/bblayers.py | 43 + meta/lib/oeqa/selftest/bbtests.py | 178 +++ meta/lib/oeqa/selftest/buildhistory.py | 45 + meta/lib/oeqa/selftest/buildoptions.py | 120 ++ meta/lib/oeqa/selftest/oescripts.py | 54 + meta/lib/oeqa/selftest/prservice.py | 121 ++ meta/lib/oeqa/selftest/sstate.py | 53 + meta/lib/oeqa/selftest/sstatetests.py | 204 +++ meta/lib/oeqa/targetcontrol.py | 199 +++ meta/lib/oeqa/utils/__init__.py | 15 + meta/lib/oeqa/utils/commands.py | 154 +++ meta/lib/oeqa/utils/decorators.py | 158 +++ meta/lib/oeqa/utils/ftools.py | 27 + meta/lib/oeqa/utils/httpserver.py | 35 + meta/lib/oeqa/utils/logparser.py | 125 ++ meta/lib/oeqa/utils/qemurunner.py | 237 ++++ meta/lib/oeqa/utils/sshcontrol.py | 138 ++ meta/lib/oeqa/utils/targetbuild.py | 132 ++ 100 files changed, 11975 insertions(+) create mode 100644 meta/lib/oe/__init__.py create mode 100644 meta/lib/oe/buildhistory_analysis.py create mode 100644 meta/lib/oe/cachedpath.py create mode 100644 meta/lib/oe/classextend.py create mode 100644 meta/lib/oe/classutils.py create mode 100644 meta/lib/oe/data.py create mode 100644 meta/lib/oe/distro_check.py create mode 100644 meta/lib/oe/image.py create mode 100644 meta/lib/oe/license.py create mode 100644 meta/lib/oe/lsb.py create mode 100644 meta/lib/oe/maketype.py create mode 100644 meta/lib/oe/manifest.py create mode 100644 meta/lib/oe/package.py create mode 100644 meta/lib/oe/package_manager.py create mode 100644 meta/lib/oe/packagedata.py create mode 100644 meta/lib/oe/packagegroup.py create mode 100644 meta/lib/oe/patch.py create mode 100644 meta/lib/oe/path.py create mode 100644 meta/lib/oe/prservice.py create mode 100644 meta/lib/oe/qa.py create mode 100644 meta/lib/oe/rootfs.py create mode 100644 meta/lib/oe/sdk.py create mode 100644 meta/lib/oe/sstatesig.py create mode 100644 meta/lib/oe/terminal.py create mode 100644 meta/lib/oe/tests/__init__.py create mode 100644 meta/lib/oe/tests/test_license.py create mode 100644 meta/lib/oe/tests/test_path.py create mode 100644 meta/lib/oe/tests/test_types.py create mode 100644 meta/lib/oe/tests/test_utils.py create mode 100644 meta/lib/oe/types.py create mode 100644 meta/lib/oe/utils.py create mode 100644 meta/lib/oeqa/__init__.py create mode 100644 meta/lib/oeqa/controllers/__init__.py create mode 100644 meta/lib/oeqa/controllers/masterimage.py create mode 100644 meta/lib/oeqa/controllers/testtargetloader.py create mode 100644 meta/lib/oeqa/oetest.py create mode 100755 meta/lib/oeqa/runexported.py create mode 100644 meta/lib/oeqa/runtime/__init__.py create mode 100644 meta/lib/oeqa/runtime/_ptest.py create mode 100644 meta/lib/oeqa/runtime/buildcvs.py create mode 100644 meta/lib/oeqa/runtime/buildiptables.py create mode 100644 meta/lib/oeqa/runtime/buildsudoku.py create mode 100644 meta/lib/oeqa/runtime/connman.py create mode 100644 meta/lib/oeqa/runtime/date.py create mode 100644 meta/lib/oeqa/runtime/df.py create mode 100644 meta/lib/oeqa/runtime/dmesg.py create mode 100644 meta/lib/oeqa/runtime/files/hellomod.c create mode 100644 meta/lib/oeqa/runtime/files/hellomod_makefile create mode 100644 meta/lib/oeqa/runtime/files/test.c create mode 100644 meta/lib/oeqa/runtime/files/test.cpp create mode 100644 meta/lib/oeqa/runtime/files/test.pl create mode 100644 meta/lib/oeqa/runtime/files/test.py create mode 100644 meta/lib/oeqa/runtime/files/testmakefile create mode 100644 meta/lib/oeqa/runtime/gcc.py create mode 100644 meta/lib/oeqa/runtime/kernelmodule.py create mode 100644 meta/lib/oeqa/runtime/ldd.py create mode 100644 meta/lib/oeqa/runtime/logrotate.py create mode 100644 meta/lib/oeqa/runtime/multilib.py create mode 100644 meta/lib/oeqa/runtime/pam.py create mode 100644 meta/lib/oeqa/runtime/parselogs.py create mode 100644 meta/lib/oeqa/runtime/perl.py create mode 100644 meta/lib/oeqa/runtime/ping.py create mode 100644 meta/lib/oeqa/runtime/python.py create mode 100644 meta/lib/oeqa/runtime/rpm.py create mode 100644 meta/lib/oeqa/runtime/scanelf.py create mode 100644 meta/lib/oeqa/runtime/scp.py create mode 100644 meta/lib/oeqa/runtime/skeletoninit.py create mode 100644 meta/lib/oeqa/runtime/smart.py create mode 100644 meta/lib/oeqa/runtime/ssh.py create mode 100644 meta/lib/oeqa/runtime/syslog.py create mode 100644 meta/lib/oeqa/runtime/systemd.py create mode 100644 meta/lib/oeqa/runtime/vnc.py create mode 100644 meta/lib/oeqa/runtime/x32lib.py create mode 100644 meta/lib/oeqa/runtime/xorg.py create mode 100644 meta/lib/oeqa/sdk/__init__.py create mode 100644 meta/lib/oeqa/sdk/buildcvs.py create mode 100644 meta/lib/oeqa/sdk/buildiptables.py create mode 100644 meta/lib/oeqa/sdk/buildsudoku.py create mode 100644 meta/lib/oeqa/selftest/__init__.py create mode 100644 meta/lib/oeqa/selftest/_sstatetests_noauto.py create mode 100644 meta/lib/oeqa/selftest/_toaster.py create mode 100644 meta/lib/oeqa/selftest/base.py create mode 100644 meta/lib/oeqa/selftest/bblayers.py create mode 100644 meta/lib/oeqa/selftest/bbtests.py create mode 100644 meta/lib/oeqa/selftest/buildhistory.py create mode 100644 meta/lib/oeqa/selftest/buildoptions.py create mode 100644 meta/lib/oeqa/selftest/oescripts.py create mode 100644 meta/lib/oeqa/selftest/prservice.py create mode 100644 meta/lib/oeqa/selftest/sstate.py create mode 100644 meta/lib/oeqa/selftest/sstatetests.py create mode 100644 meta/lib/oeqa/targetcontrol.py create mode 100644 meta/lib/oeqa/utils/__init__.py create mode 100644 meta/lib/oeqa/utils/commands.py create mode 100644 meta/lib/oeqa/utils/decorators.py create mode 100644 meta/lib/oeqa/utils/ftools.py create mode 100644 meta/lib/oeqa/utils/httpserver.py create mode 100644 meta/lib/oeqa/utils/logparser.py create mode 100644 meta/lib/oeqa/utils/qemurunner.py create mode 100644 meta/lib/oeqa/utils/sshcontrol.py create mode 100644 meta/lib/oeqa/utils/targetbuild.py (limited to 'meta/lib') diff --git a/meta/lib/oe/__init__.py b/meta/lib/oe/__init__.py new file mode 100644 index 0000000000..3ad9513f40 --- /dev/null +++ b/meta/lib/oe/__init__.py @@ -0,0 +1,2 @@ +from pkgutil import extend_path +__path__ = extend_path(__path__, __name__) diff --git a/meta/lib/oe/buildhistory_analysis.py b/meta/lib/oe/buildhistory_analysis.py new file mode 100644 index 0000000000..5395c768a3 --- /dev/null +++ b/meta/lib/oe/buildhistory_analysis.py @@ -0,0 +1,456 @@ +# Report significant differences in the buildhistory repository since a specific revision +# +# Copyright (C) 2012 Intel Corporation +# Author: Paul Eggleton +# +# Note: requires GitPython 0.3.1+ +# +# You can use this from the command line by running scripts/buildhistory-diff +# + +import sys +import os.path +import difflib +import git +import re +import bb.utils + + +# How to display fields +list_fields = ['DEPENDS', 'RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RREPLACES', 'RCONFLICTS', 'FILES', 'FILELIST', 'USER_CLASSES', 'IMAGE_CLASSES', 'IMAGE_FEATURES', 'IMAGE_LINGUAS', 'IMAGE_INSTALL', 'BAD_RECOMMENDATIONS', 'PACKAGE_EXCLUDE'] +list_order_fields = ['PACKAGES'] +defaultval_map = {'PKG': 'PKG', 'PKGE': 'PE', 'PKGV': 'PV', 'PKGR': 'PR'} +numeric_fields = ['PKGSIZE', 'IMAGESIZE'] +# Fields to monitor +monitor_fields = ['RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RREPLACES', 'RCONFLICTS', 'PACKAGES', 'FILELIST', 'PKGSIZE', 'IMAGESIZE', 'PKG'] +ver_monitor_fields = ['PKGE', 'PKGV', 'PKGR'] +# Percentage change to alert for numeric fields +monitor_numeric_threshold = 10 +# Image files to monitor (note that image-info.txt is handled separately) +img_monitor_files = ['installed-package-names.txt', 'files-in-image.txt'] +# Related context fields for reporting (note: PE, PV & PR are always reported for monitored package fields) +related_fields = {} +related_fields['RDEPENDS'] = ['DEPENDS'] +related_fields['RRECOMMENDS'] = ['DEPENDS'] +related_fields['FILELIST'] = ['FILES'] +related_fields['PKGSIZE'] = ['FILELIST'] +related_fields['files-in-image.txt'] = ['installed-package-names.txt', 'USER_CLASSES', 'IMAGE_CLASSES', 'ROOTFS_POSTPROCESS_COMMAND', 'IMAGE_POSTPROCESS_COMMAND'] +related_fields['installed-package-names.txt'] = ['IMAGE_FEATURES', 'IMAGE_LINGUAS', 'IMAGE_INSTALL', 'BAD_RECOMMENDATIONS', 'NO_RECOMMENDATIONS', 'PACKAGE_EXCLUDE'] + + +class ChangeRecord: + def __init__(self, path, fieldname, oldvalue, newvalue, monitored): + self.path = path + self.fieldname = fieldname + self.oldvalue = oldvalue + self.newvalue = newvalue + self.monitored = monitored + self.related = [] + self.filechanges = None + + def __str__(self): + return self._str_internal(True) + + def _str_internal(self, outer): + if outer: + if '/image-files/' in self.path: + prefix = '%s: ' % self.path.split('/image-files/')[0] + else: + prefix = '%s: ' % self.path + else: + prefix = '' + + def pkglist_combine(depver): + pkglist = [] + for k,v in depver.iteritems(): + if v: + pkglist.append("%s (%s)" % (k,v)) + else: + pkglist.append(k) + return pkglist + + if self.fieldname in list_fields or self.fieldname in list_order_fields: + if self.fieldname in ['RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RREPLACES', 'RCONFLICTS']: + (depvera, depverb) = compare_pkg_lists(self.oldvalue, self.newvalue) + aitems = pkglist_combine(depvera) + bitems = pkglist_combine(depverb) + else: + aitems = self.oldvalue.split() + bitems = self.newvalue.split() + removed = list(set(aitems) - set(bitems)) + added = list(set(bitems) - set(aitems)) + + if removed or added: + if removed and not bitems: + out = '%s: removed all items "%s"' % (self.fieldname, ' '.join(removed)) + else: + out = '%s:%s%s' % (self.fieldname, ' removed "%s"' % ' '.join(removed) if removed else '', ' added "%s"' % ' '.join(added) if added else '') + else: + out = '%s changed order' % self.fieldname + elif self.fieldname in numeric_fields: + aval = int(self.oldvalue or 0) + bval = int(self.newvalue or 0) + if aval != 0: + percentchg = ((bval - aval) / float(aval)) * 100 + else: + percentchg = 100 + out = '%s changed from %s to %s (%s%d%%)' % (self.fieldname, self.oldvalue or "''", self.newvalue or "''", '+' if percentchg > 0 else '', percentchg) + elif self.fieldname in defaultval_map: + out = '%s changed from %s to %s' % (self.fieldname, self.oldvalue, self.newvalue) + if self.fieldname == 'PKG' and '[default]' in self.newvalue: + out += ' - may indicate debian renaming failure' + elif self.fieldname in ['pkg_preinst', 'pkg_postinst', 'pkg_prerm', 'pkg_postrm']: + if self.oldvalue and self.newvalue: + out = '%s changed:\n ' % self.fieldname + elif self.newvalue: + out = '%s added:\n ' % self.fieldname + elif self.oldvalue: + out = '%s cleared:\n ' % self.fieldname + alines = self.oldvalue.splitlines() + blines = self.newvalue.splitlines() + diff = difflib.unified_diff(alines, blines, self.fieldname, self.fieldname, lineterm='') + out += '\n '.join(list(diff)[2:]) + out += '\n --' + elif self.fieldname in img_monitor_files or '/image-files/' in self.path: + fieldname = self.fieldname + if '/image-files/' in self.path: + fieldname = os.path.join('/' + self.path.split('/image-files/')[1], self.fieldname) + out = 'Changes to %s:\n ' % fieldname + else: + if outer: + prefix = 'Changes to %s ' % self.path + out = '(%s):\n ' % self.fieldname + if self.filechanges: + out += '\n '.join(['%s' % i for i in self.filechanges]) + else: + alines = self.oldvalue.splitlines() + blines = self.newvalue.splitlines() + diff = difflib.unified_diff(alines, blines, fieldname, fieldname, lineterm='') + out += '\n '.join(list(diff)) + out += '\n --' + else: + out = '%s changed from "%s" to "%s"' % (self.fieldname, self.oldvalue, self.newvalue) + + if self.related: + for chg in self.related: + if not outer and chg.fieldname in ['PE', 'PV', 'PR']: + continue + for line in chg._str_internal(False).splitlines(): + out += '\n * %s' % line + + return '%s%s' % (prefix, out) + +class FileChange: + changetype_add = 'A' + changetype_remove = 'R' + changetype_type = 'T' + changetype_perms = 'P' + changetype_ownergroup = 'O' + changetype_link = 'L' + + def __init__(self, path, changetype, oldvalue = None, newvalue = None): + self.path = path + self.changetype = changetype + self.oldvalue = oldvalue + self.newvalue = newvalue + + def _ftype_str(self, ftype): + if ftype == '-': + return 'file' + elif ftype == 'd': + return 'directory' + elif ftype == 'l': + return 'symlink' + elif ftype == 'c': + return 'char device' + elif ftype == 'b': + return 'block device' + elif ftype == 'p': + return 'fifo' + elif ftype == 's': + return 'socket' + else: + return 'unknown (%s)' % ftype + + def __str__(self): + if self.changetype == self.changetype_add: + return '%s was added' % self.path + elif self.changetype == self.changetype_remove: + return '%s was removed' % self.path + elif self.changetype == self.changetype_type: + return '%s changed type from %s to %s' % (self.path, self._ftype_str(self.oldvalue), self._ftype_str(self.newvalue)) + elif self.changetype == self.changetype_perms: + return '%s changed permissions from %s to %s' % (self.path, self.oldvalue, self.newvalue) + elif self.changetype == self.changetype_ownergroup: + return '%s changed owner/group from %s to %s' % (self.path, self.oldvalue, self.newvalue) + elif self.changetype == self.changetype_link: + return '%s changed symlink target from %s to %s' % (self.path, self.oldvalue, self.newvalue) + else: + return '%s changed (unknown)' % self.path + + +def blob_to_dict(blob): + alines = blob.data_stream.read().splitlines() + adict = {} + for line in alines: + splitv = [i.strip() for i in line.split('=',1)] + if len(splitv) > 1: + adict[splitv[0]] = splitv[1] + return adict + + +def file_list_to_dict(lines): + adict = {} + for line in lines: + # Leave the last few fields intact so we handle file names containing spaces + splitv = line.split(None,4) + # Grab the path and remove the leading . + path = splitv[4][1:].strip() + # Handle symlinks + if(' -> ' in path): + target = path.split(' -> ')[1] + path = path.split(' -> ')[0] + adict[path] = splitv[0:3] + [target] + else: + adict[path] = splitv[0:3] + return adict + + +def compare_file_lists(alines, blines): + adict = file_list_to_dict(alines) + bdict = file_list_to_dict(blines) + filechanges = [] + for path, splitv in adict.iteritems(): + newsplitv = bdict.pop(path, None) + if newsplitv: + # Check type + oldvalue = splitv[0][0] + newvalue = newsplitv[0][0] + if oldvalue != newvalue: + filechanges.append(FileChange(path, FileChange.changetype_type, oldvalue, newvalue)) + # Check permissions + oldvalue = splitv[0][1:] + newvalue = newsplitv[0][1:] + if oldvalue != newvalue: + filechanges.append(FileChange(path, FileChange.changetype_perms, oldvalue, newvalue)) + # Check owner/group + oldvalue = '%s/%s' % (splitv[1], splitv[2]) + newvalue = '%s/%s' % (newsplitv[1], newsplitv[2]) + if oldvalue != newvalue: + filechanges.append(FileChange(path, FileChange.changetype_ownergroup, oldvalue, newvalue)) + # Check symlink target + if newsplitv[0][0] == 'l': + if len(splitv) > 3: + oldvalue = splitv[3] + else: + oldvalue = None + newvalue = newsplitv[3] + if oldvalue != newvalue: + filechanges.append(FileChange(path, FileChange.changetype_link, oldvalue, newvalue)) + else: + filechanges.append(FileChange(path, FileChange.changetype_remove)) + + # Whatever is left over has been added + for path in bdict: + filechanges.append(FileChange(path, FileChange.changetype_add)) + + return filechanges + + +def compare_lists(alines, blines): + removed = list(set(alines) - set(blines)) + added = list(set(blines) - set(alines)) + + filechanges = [] + for pkg in removed: + filechanges.append(FileChange(pkg, FileChange.changetype_remove)) + for pkg in added: + filechanges.append(FileChange(pkg, FileChange.changetype_add)) + + return filechanges + + +def compare_pkg_lists(astr, bstr): + depvera = bb.utils.explode_dep_versions2(astr) + depverb = bb.utils.explode_dep_versions2(bstr) + + # Strip out changes where the version has increased + remove = [] + for k in depvera: + if k in depverb: + dva = depvera[k] + dvb = depverb[k] + if dva and dvb and len(dva) == len(dvb): + # Since length is the same, sort so that prefixes (e.g. >=) will line up + dva.sort() + dvb.sort() + removeit = True + for dvai, dvbi in zip(dva, dvb): + if dvai != dvbi: + aiprefix = dvai.split(' ')[0] + biprefix = dvbi.split(' ')[0] + if aiprefix == biprefix and aiprefix in ['>=', '=']: + if bb.utils.vercmp(bb.utils.split_version(dvai), bb.utils.split_version(dvbi)) > 0: + removeit = False + break + else: + removeit = False + break + if removeit: + remove.append(k) + + for k in remove: + depvera.pop(k) + depverb.pop(k) + + return (depvera, depverb) + + +def compare_dict_blobs(path, ablob, bblob, report_all, report_ver): + adict = blob_to_dict(ablob) + bdict = blob_to_dict(bblob) + + pkgname = os.path.basename(path) + + defaultvals = {} + defaultvals['PKG'] = pkgname + defaultvals['PKGE'] = '0' + + changes = [] + keys = list(set(adict.keys()) | set(bdict.keys()) | set(defaultval_map.keys())) + for key in keys: + astr = adict.get(key, '') + bstr = bdict.get(key, '') + if key in ver_monitor_fields: + monitored = report_ver or astr or bstr + else: + monitored = key in monitor_fields + mapped_key = defaultval_map.get(key, '') + if mapped_key: + if not astr: + astr = '%s [default]' % adict.get(mapped_key, defaultvals.get(key, '')) + if not bstr: + bstr = '%s [default]' % bdict.get(mapped_key, defaultvals.get(key, '')) + + if astr != bstr: + if (not report_all) and key in numeric_fields: + aval = int(astr or 0) + bval = int(bstr or 0) + if aval != 0: + percentchg = ((bval - aval) / float(aval)) * 100 + else: + percentchg = 100 + if abs(percentchg) < monitor_numeric_threshold: + continue + elif (not report_all) and key in list_fields: + if key == "FILELIST" and path.endswith("-dbg") and bstr.strip() != '': + continue + if key in ['RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RREPLACES', 'RCONFLICTS']: + (depvera, depverb) = compare_pkg_lists(astr, bstr) + if depvera == depverb: + continue + alist = astr.split() + alist.sort() + blist = bstr.split() + blist.sort() + # We don't care about the removal of self-dependencies + if pkgname in alist and not pkgname in blist: + alist.remove(pkgname) + if ' '.join(alist) == ' '.join(blist): + continue + + chg = ChangeRecord(path, key, astr, bstr, monitored) + changes.append(chg) + return changes + + +def process_changes(repopath, revision1, revision2='HEAD', report_all=False, report_ver=False): + repo = git.Repo(repopath) + assert repo.bare == False + commit = repo.commit(revision1) + diff = commit.diff(revision2) + + changes = [] + for d in diff.iter_change_type('M'): + path = os.path.dirname(d.a_blob.path) + if path.startswith('packages/'): + filename = os.path.basename(d.a_blob.path) + if filename == 'latest': + changes.extend(compare_dict_blobs(path, d.a_blob, d.b_blob, report_all, report_ver)) + elif filename.startswith('latest.'): + chg = ChangeRecord(path, filename, d.a_blob.data_stream.read(), d.b_blob.data_stream.read(), True) + changes.append(chg) + elif path.startswith('images/'): + filename = os.path.basename(d.a_blob.path) + if filename in img_monitor_files: + if filename == 'files-in-image.txt': + alines = d.a_blob.data_stream.read().splitlines() + blines = d.b_blob.data_stream.read().splitlines() + filechanges = compare_file_lists(alines,blines) + if filechanges: + chg = ChangeRecord(path, filename, None, None, True) + chg.filechanges = filechanges + changes.append(chg) + elif filename == 'installed-package-names.txt': + alines = d.a_blob.data_stream.read().splitlines() + blines = d.b_blob.data_stream.read().splitlines() + filechanges = compare_lists(alines,blines) + if filechanges: + chg = ChangeRecord(path, filename, None, None, True) + chg.filechanges = filechanges + changes.append(chg) + else: + chg = ChangeRecord(path, filename, d.a_blob.data_stream.read(), d.b_blob.data_stream.read(), True) + changes.append(chg) + elif filename == 'image-info.txt': + changes.extend(compare_dict_blobs(path, d.a_blob, d.b_blob, report_all, report_ver)) + elif '/image-files/' in path: + chg = ChangeRecord(path, filename, d.a_blob.data_stream.read(), d.b_blob.data_stream.read(), True) + changes.append(chg) + + # Look for added preinst/postinst/prerm/postrm + # (without reporting newly added recipes) + addedpkgs = [] + addedchanges = [] + for d in diff.iter_change_type('A'): + path = os.path.dirname(d.b_blob.path) + if path.startswith('packages/'): + filename = os.path.basename(d.b_blob.path) + if filename == 'latest': + addedpkgs.append(path) + elif filename.startswith('latest.'): + chg = ChangeRecord(path, filename[7:], '', d.b_blob.data_stream.read(), True) + addedchanges.append(chg) + for chg in addedchanges: + found = False + for pkg in addedpkgs: + if chg.path.startswith(pkg): + found = True + break + if not found: + changes.append(chg) + + # Look for cleared preinst/postinst/prerm/postrm + for d in diff.iter_change_type('D'): + path = os.path.dirname(d.a_blob.path) + if path.startswith('packages/'): + filename = os.path.basename(d.a_blob.path) + if filename != 'latest' and filename.startswith('latest.'): + chg = ChangeRecord(path, filename[7:], d.a_blob.data_stream.read(), '', True) + changes.append(chg) + + # Link related changes + for chg in changes: + if chg.monitored: + for chg2 in changes: + # (Check dirname in the case of fields from recipe info files) + if chg.path == chg2.path or os.path.dirname(chg.path) == chg2.path: + if chg2.fieldname in related_fields.get(chg.fieldname, []): + chg.related.append(chg2) + elif chg.path == chg2.path and chg.path.startswith('packages/') and chg2.fieldname in ['PE', 'PV', 'PR']: + chg.related.append(chg2) + + if report_all: + return changes + else: + return [chg for chg in changes if chg.monitored] diff --git a/meta/lib/oe/cachedpath.py b/meta/lib/oe/cachedpath.py new file mode 100644 index 0000000000..0840cc4c3f --- /dev/null +++ b/meta/lib/oe/cachedpath.py @@ -0,0 +1,233 @@ +# +# Based on standard python library functions but avoid +# repeated stat calls. Its assumed the files will not change from under us +# so we can cache stat calls. +# + +import os +import errno +import stat as statmod + +class CachedPath(object): + def __init__(self): + self.statcache = {} + self.lstatcache = {} + self.normpathcache = {} + return + + def updatecache(self, x): + x = self.normpath(x) + if x in self.statcache: + del self.statcache[x] + if x in self.lstatcache: + del self.lstatcache[x] + + def normpath(self, path): + if path in self.normpathcache: + return self.normpathcache[path] + newpath = os.path.normpath(path) + self.normpathcache[path] = newpath + return newpath + + def _callstat(self, path): + if path in self.statcache: + return self.statcache[path] + try: + st = os.stat(path) + self.statcache[path] = st + return st + except os.error: + self.statcache[path] = False + return False + + # We might as well call lstat and then only + # call stat as well in the symbolic link case + # since this turns out to be much more optimal + # in real world usage of this cache + def callstat(self, path): + path = self.normpath(path) + self.calllstat(path) + return self.statcache[path] + + def calllstat(self, path): + path = self.normpath(path) + if path in self.lstatcache: + return self.lstatcache[path] + #bb.error("LStatpath:" + path) + try: + lst = os.lstat(path) + self.lstatcache[path] = lst + if not statmod.S_ISLNK(lst.st_mode): + self.statcache[path] = lst + else: + self._callstat(path) + return lst + except (os.error, AttributeError): + self.lstatcache[path] = False + self.statcache[path] = False + return False + + # This follows symbolic links, so both islink() and isdir() can be true + # for the same path ono systems that support symlinks + def isfile(self, path): + """Test whether a path is a regular file""" + st = self.callstat(path) + if not st: + return False + return statmod.S_ISREG(st.st_mode) + + # Is a path a directory? + # This follows symbolic links, so both islink() and isdir() + # can be true for the same path on systems that support symlinks + def isdir(self, s): + """Return true if the pathname refers to an existing directory.""" + st = self.callstat(s) + if not st: + return False + return statmod.S_ISDIR(st.st_mode) + + def islink(self, path): + """Test whether a path is a symbolic link""" + st = self.calllstat(path) + if not st: + return False + return statmod.S_ISLNK(st.st_mode) + + # Does a path exist? + # This is false for dangling symbolic links on systems that support them. + def exists(self, path): + """Test whether a path exists. Returns False for broken symbolic links""" + if self.callstat(path): + return True + return False + + def lexists(self, path): + """Test whether a path exists. Returns True for broken symbolic links""" + if self.calllstat(path): + return True + return False + + def stat(self, path): + return self.callstat(path) + + def lstat(self, path): + return self.calllstat(path) + + def walk(self, top, topdown=True, onerror=None, followlinks=False): + # Matches os.walk, not os.path.walk() + + # We may not have read permission for top, in which case we can't + # get a list of the files the directory contains. os.path.walk + # always suppressed the exception then, rather than blow up for a + # minor reason when (say) a thousand readable directories are still + # left to visit. That logic is copied here. + try: + names = os.listdir(top) + except os.error as err: + if onerror is not None: + onerror(err) + return + + dirs, nondirs = [], [] + for name in names: + if self.isdir(os.path.join(top, name)): + dirs.append(name) + else: + nondirs.append(name) + + if topdown: + yield top, dirs, nondirs + for name in dirs: + new_path = os.path.join(top, name) + if followlinks or not self.islink(new_path): + for x in self.walk(new_path, topdown, onerror, followlinks): + yield x + if not topdown: + yield top, dirs, nondirs + + ## realpath() related functions + def __is_path_below(self, file, root): + return (file + os.path.sep).startswith(root) + + def __realpath_rel(self, start, rel_path, root, loop_cnt, assume_dir): + """Calculates real path of symlink 'start' + 'rel_path' below + 'root'; no part of 'start' below 'root' must contain symlinks. """ + have_dir = True + + for d in rel_path.split(os.path.sep): + if not have_dir and not assume_dir: + raise OSError(errno.ENOENT, "no such directory %s" % start) + + if d == os.path.pardir: # '..' + if len(start) >= len(root): + # do not follow '..' before root + start = os.path.dirname(start) + else: + # emit warning? + pass + else: + (start, have_dir) = self.__realpath(os.path.join(start, d), + root, loop_cnt, assume_dir) + + assert(self.__is_path_below(start, root)) + + return start + + def __realpath(self, file, root, loop_cnt, assume_dir): + while self.islink(file) and len(file) >= len(root): + if loop_cnt == 0: + raise OSError(errno.ELOOP, file) + + loop_cnt -= 1 + target = os.path.normpath(os.readlink(file)) + + if not os.path.isabs(target): + tdir = os.path.dirname(file) + assert(self.__is_path_below(tdir, root)) + else: + tdir = root + + file = self.__realpath_rel(tdir, target, root, loop_cnt, assume_dir) + + try: + is_dir = self.isdir(file) + except: + is_dir = False + + return (file, is_dir) + + def realpath(self, file, root, use_physdir = True, loop_cnt = 100, assume_dir = False): + """ Returns the canonical path of 'file' with assuming a + toplevel 'root' directory. When 'use_physdir' is set, all + preceding path components of 'file' will be resolved first; + this flag should be set unless it is guaranteed that there is + no symlink in the path. When 'assume_dir' is not set, missing + path components will raise an ENOENT error""" + + root = os.path.normpath(root) + file = os.path.normpath(file) + + if not root.endswith(os.path.sep): + # letting root end with '/' makes some things easier + root = root + os.path.sep + + if not self.__is_path_below(file, root): + raise OSError(errno.EINVAL, "file '%s' is not below root" % file) + + try: + if use_physdir: + file = self.__realpath_rel(root, file[(len(root) - 1):], root, loop_cnt, assume_dir) + else: + file = self.__realpath(file, root, loop_cnt, assume_dir)[0] + except OSError as e: + if e.errno == errno.ELOOP: + # make ELOOP more readable; without catching it, there will + # be printed a backtrace with 100s of OSError exceptions + # else + raise OSError(errno.ELOOP, + "too much recursions while resolving '%s'; loop in '%s'" % + (file, e.strerror)) + + raise + + return file diff --git a/meta/lib/oe/classextend.py b/meta/lib/oe/classextend.py new file mode 100644 index 0000000000..8da87b771a --- /dev/null +++ b/meta/lib/oe/classextend.py @@ -0,0 +1,118 @@ +class ClassExtender(object): + def __init__(self, extname, d): + self.extname = extname + self.d = d + self.pkgs_mapping = [] + + def extend_name(self, name): + if name.startswith("kernel-") or name == "virtual/kernel": + return name + if name.startswith("rtld"): + return name + if name.endswith("-crosssdk"): + return name + if name.endswith("-" + self.extname): + name = name.replace("-" + self.extname, "") + if name.startswith("virtual/"): + subs = name.split("/", 1)[1] + if not subs.startswith(self.extname): + return "virtual/" + self.extname + "-" + subs + return name + if not name.startswith(self.extname): + return self.extname + "-" + name + return name + + def map_variable(self, varname, setvar = True): + var = self.d.getVar(varname, True) + if not var: + return "" + var = var.split() + newvar = [] + for v in var: + newvar.append(self.extend_name(v)) + newdata = " ".join(newvar) + if setvar: + self.d.setVar(varname, newdata) + return newdata + + def map_regexp_variable(self, varname, setvar = True): + var = self.d.getVar(varname, True) + if not var: + return "" + var = var.split() + newvar = [] + for v in var: + if v.startswith("^" + self.extname): + newvar.append(v) + elif v.startswith("^"): + newvar.append("^" + self.extname + "-" + v[1:]) + else: + newvar.append(self.extend_name(v)) + newdata = " ".join(newvar) + if setvar: + self.d.setVar(varname, newdata) + return newdata + + def map_depends(self, dep): + if dep.endswith(("-native", "-native-runtime")) or ('nativesdk-' in dep) or ('cross-canadian' in dep) or ('-crosssdk-' in dep): + return dep + else: + # Do not extend for that already have multilib prefix + var = self.d.getVar("MULTILIB_VARIANTS", True) + if var: + var = var.split() + for v in var: + if dep.startswith(v): + return dep + return self.extend_name(dep) + + def map_depends_variable(self, varname, suffix = ""): + # We need to preserve EXTENDPKGV so it can be expanded correctly later + if suffix: + varname = varname + "_" + suffix + orig = self.d.getVar("EXTENDPKGV", False) + self.d.setVar("EXTENDPKGV", "EXTENDPKGV") + deps = self.d.getVar(varname, True) + if not deps: + self.d.setVar("EXTENDPKGV", orig) + return + deps = bb.utils.explode_dep_versions2(deps) + newdeps = {} + for dep in deps: + newdeps[self.map_depends(dep)] = deps[dep] + + self.d.setVar(varname, bb.utils.join_deps(newdeps, False).replace("EXTENDPKGV", "${EXTENDPKGV}")) + self.d.setVar("EXTENDPKGV", orig) + + def map_packagevars(self): + for pkg in (self.d.getVar("PACKAGES", True).split() + [""]): + self.map_depends_variable("RDEPENDS", pkg) + self.map_depends_variable("RRECOMMENDS", pkg) + self.map_depends_variable("RSUGGESTS", pkg) + self.map_depends_variable("RPROVIDES", pkg) + self.map_depends_variable("RREPLACES", pkg) + self.map_depends_variable("RCONFLICTS", pkg) + self.map_depends_variable("PKG", pkg) + + def rename_packages(self): + for pkg in (self.d.getVar("PACKAGES", True) or "").split(): + if pkg.startswith(self.extname): + self.pkgs_mapping.append([pkg.split(self.extname + "-")[1], pkg]) + continue + self.pkgs_mapping.append([pkg, self.extend_name(pkg)]) + + self.d.setVar("PACKAGES", " ".join([row[1] for row in self.pkgs_mapping])) + + def rename_package_variables(self, variables): + for pkg_mapping in self.pkgs_mapping: + for subs in variables: + self.d.renameVar("%s_%s" % (subs, pkg_mapping[0]), "%s_%s" % (subs, pkg_mapping[1])) + +class NativesdkClassExtender(ClassExtender): + def map_depends(self, dep): + if dep.endswith(("-gcc-initial", "-gcc", "-g++")): + return dep + "-crosssdk" + elif dep.endswith(("-native", "-native-runtime")) or ('nativesdk-' in dep) or ('-cross-' in dep) or ('-crosssdk-' in dep): + return dep + else: + return self.extend_name(dep) diff --git a/meta/lib/oe/classutils.py b/meta/lib/oe/classutils.py new file mode 100644 index 0000000000..58188fdd6e --- /dev/null +++ b/meta/lib/oe/classutils.py @@ -0,0 +1,43 @@ +class ClassRegistry(type): + """Maintain a registry of classes, indexed by name. + +Note that this implementation requires that the names be unique, as it uses +a dictionary to hold the classes by name. + +The name in the registry can be overridden via the 'name' attribute of the +class, and the 'priority' attribute controls priority. The prioritized() +method returns the registered classes in priority order. + +Subclasses of ClassRegistry may define an 'implemented' property to exert +control over whether the class will be added to the registry (e.g. to keep +abstract base classes out of the registry).""" + priority = 0 + class __metaclass__(type): + """Give each ClassRegistry their own registry""" + def __init__(cls, name, bases, attrs): + cls.registry = {} + type.__init__(cls, name, bases, attrs) + + def __init__(cls, name, bases, attrs): + super(ClassRegistry, cls).__init__(name, bases, attrs) + try: + if not cls.implemented: + return + except AttributeError: + pass + + try: + cls.name + except AttributeError: + cls.name = name + cls.registry[cls.name] = cls + + @classmethod + def prioritized(tcls): + return sorted(tcls.registry.values(), + key=lambda v: v.priority, reverse=True) + + def unregister(cls): + for key in cls.registry.keys(): + if cls.registry[key] is cls: + del cls.registry[key] diff --git a/meta/lib/oe/data.py b/meta/lib/oe/data.py new file mode 100644 index 0000000000..4cc0e02968 --- /dev/null +++ b/meta/lib/oe/data.py @@ -0,0 +1,17 @@ +import oe.maketype + +def typed_value(key, d): + """Construct a value for the specified metadata variable, using its flags + to determine the type and parameters for construction.""" + var_type = d.getVarFlag(key, 'type') + flags = d.getVarFlags(key) + if flags is not None: + flags = dict((flag, d.expand(value)) + for flag, value in flags.iteritems()) + else: + flags = {} + + try: + return oe.maketype.create(d.getVar(key, True) or '', var_type, **flags) + except (TypeError, ValueError), exc: + bb.msg.fatal("Data", "%s: %s" % (key, str(exc))) diff --git a/meta/lib/oe/distro_check.py b/meta/lib/oe/distro_check.py new file mode 100644 index 0000000000..8ed5b0ec80 --- /dev/null +++ b/meta/lib/oe/distro_check.py @@ -0,0 +1,383 @@ +def get_links_from_url(url): + "Return all the href links found on the web location" + + import urllib, sgmllib + + class LinksParser(sgmllib.SGMLParser): + def parse(self, s): + "Parse the given string 's'." + self.feed(s) + self.close() + + def __init__(self, verbose=0): + "Initialise an object passing 'verbose' to the superclass." + sgmllib.SGMLParser.__init__(self, verbose) + self.hyperlinks = [] + + def start_a(self, attributes): + "Process a hyperlink and its 'attributes'." + for name, value in attributes: + if name == "href": + self.hyperlinks.append(value.strip('/')) + + def get_hyperlinks(self): + "Return the list of hyperlinks." + return self.hyperlinks + + sock = urllib.urlopen(url) + webpage = sock.read() + sock.close() + + linksparser = LinksParser() + linksparser.parse(webpage) + return linksparser.get_hyperlinks() + +def find_latest_numeric_release(url): + "Find the latest listed numeric release on the given url" + max=0 + maxstr="" + for link in get_links_from_url(url): + try: + release = float(link) + except: + release = 0 + if release > max: + max = release + maxstr = link + return maxstr + +def is_src_rpm(name): + "Check if the link is pointing to a src.rpm file" + if name[-8:] == ".src.rpm": + return True + else: + return False + +def package_name_from_srpm(srpm): + "Strip out the package name from the src.rpm filename" + strings = srpm.split('-') + package_name = strings[0] + for i in range(1, len (strings) - 1): + str = strings[i] + if not str[0].isdigit(): + package_name += '-' + str + return package_name + +def clean_package_list(package_list): + "Removes multiple entries of packages and sorts the list" + set = {} + map(set.__setitem__, package_list, []) + return set.keys() + + +def get_latest_released_meego_source_package_list(): + "Returns list of all the name os packages in the latest meego distro" + + package_names = [] + try: + f = open("/tmp/Meego-1.1", "r") + for line in f: + package_names.append(line[:-1] + ":" + "main") # Also strip the '\n' at the end + except IOError: pass + package_list=clean_package_list(package_names) + return "1.0", package_list + +def get_source_package_list_from_url(url, section): + "Return a sectioned list of package names from a URL list" + + bb.note("Reading %s: %s" % (url, section)) + links = get_links_from_url(url) + srpms = filter(is_src_rpm, links) + names_list = map(package_name_from_srpm, srpms) + + new_pkgs = [] + for pkgs in names_list: + new_pkgs.append(pkgs + ":" + section) + + return new_pkgs + +def get_latest_released_fedora_source_package_list(): + "Returns list of all the name os packages in the latest fedora distro" + latest = find_latest_numeric_release("http://archive.fedoraproject.org/pub/fedora/linux/releases/") + + package_names = get_source_package_list_from_url("http://archive.fedoraproject.org/pub/fedora/linux/releases/%s/Fedora/source/SRPMS/" % latest, "main") + +# package_names += get_source_package_list_from_url("http://download.fedora.redhat.com/pub/fedora/linux/releases/%s/Everything/source/SPRMS/" % latest, "everything") + package_names += get_source_package_list_from_url("http://archive.fedoraproject.org/pub/fedora/linux/updates/%s/SRPMS/" % latest, "updates") + + package_list=clean_package_list(package_names) + + return latest, package_list + +def get_latest_released_opensuse_source_package_list(): + "Returns list of all the name os packages in the latest opensuse distro" + latest = find_latest_numeric_release("http://download.opensuse.org/source/distribution/") + + package_names = get_source_package_list_from_url("http://download.opensuse.org/source/distribution/%s/repo/oss/suse/src/" % latest, "main") + package_names += get_source_package_list_from_url("http://download.opensuse.org/update/%s/rpm/src/" % latest, "updates") + + package_list=clean_package_list(package_names) + return latest, package_list + +def get_latest_released_mandriva_source_package_list(): + "Returns list of all the name os packages in the latest mandriva distro" + latest = find_latest_numeric_release("http://distrib-coffee.ipsl.jussieu.fr/pub/linux/MandrivaLinux/official/") + package_names = get_source_package_list_from_url("http://distrib-coffee.ipsl.jussieu.fr/pub/linux/MandrivaLinux/official/%s/SRPMS/main/release/" % latest, "main") +# package_names += get_source_package_list_from_url("http://distrib-coffee.ipsl.jussieu.fr/pub/linux/MandrivaLinux/official/%s/SRPMS/contrib/release/" % latest, "contrib") + package_names += get_source_package_list_from_url("http://distrib-coffee.ipsl.jussieu.fr/pub/linux/MandrivaLinux/official/%s/SRPMS/main/updates/" % latest, "updates") + + package_list=clean_package_list(package_names) + return latest, package_list + +def find_latest_debian_release(url): + "Find the latest listed debian release on the given url" + + releases = [] + for link in get_links_from_url(url): + if link[:6] == "Debian": + if ';' not in link: + releases.append(link) + releases.sort() + try: + return releases.pop()[6:] + except: + return "_NotFound_" + +def get_debian_style_source_package_list(url, section): + "Return the list of package-names stored in the debian style Sources.gz file" + import urllib + sock = urllib.urlopen(url) + import tempfile + tmpfile = tempfile.NamedTemporaryFile(mode='wb', prefix='oecore.', suffix='.tmp', delete=False) + tmpfilename=tmpfile.name + tmpfile.write(sock.read()) + sock.close() + tmpfile.close() + import gzip + bb.note("Reading %s: %s" % (url, section)) + + f = gzip.open(tmpfilename) + package_names = [] + for line in f: + if line[:9] == "Package: ": + package_names.append(line[9:-1] + ":" + section) # Also strip the '\n' at the end + os.unlink(tmpfilename) + + return package_names + +def get_latest_released_debian_source_package_list(): + "Returns list of all the name os packages in the latest debian distro" + latest = find_latest_debian_release("http://ftp.debian.org/debian/dists/") + url = "http://ftp.debian.org/debian/dists/stable/main/source/Sources.gz" + package_names = get_debian_style_source_package_list(url, "main") +# url = "http://ftp.debian.org/debian/dists/stable/contrib/source/Sources.gz" +# package_names += get_debian_style_source_package_list(url, "contrib") + url = "http://ftp.debian.org/debian/dists/stable-proposed-updates/main/source/Sources.gz" + package_names += get_debian_style_source_package_list(url, "updates") + package_list=clean_package_list(package_names) + return latest, package_list + +def find_latest_ubuntu_release(url): + "Find the latest listed ubuntu release on the given url" + url += "?C=M;O=D" # Descending Sort by Last Modified + for link in get_links_from_url(url): + if link[-8:] == "-updates": + return link[:-8] + return "_NotFound_" + +def get_latest_released_ubuntu_source_package_list(): + "Returns list of all the name os packages in the latest ubuntu distro" + latest = find_latest_ubuntu_release("http://archive.ubuntu.com/ubuntu/dists/") + url = "http://archive.ubuntu.com/ubuntu/dists/%s/main/source/Sources.gz" % latest + package_names = get_debian_style_source_package_list(url, "main") +# url = "http://archive.ubuntu.com/ubuntu/dists/%s/multiverse/source/Sources.gz" % latest +# package_names += get_debian_style_source_package_list(url, "multiverse") +# url = "http://archive.ubuntu.com/ubuntu/dists/%s/universe/source/Sources.gz" % latest +# package_names += get_debian_style_source_package_list(url, "universe") + url = "http://archive.ubuntu.com/ubuntu/dists/%s-updates/main/source/Sources.gz" % latest + package_names += get_debian_style_source_package_list(url, "updates") + package_list=clean_package_list(package_names) + return latest, package_list + +def create_distro_packages_list(distro_check_dir): + pkglst_dir = os.path.join(distro_check_dir, "package_lists") + if not os.path.isdir (pkglst_dir): + os.makedirs(pkglst_dir) + # first clear old stuff + for file in os.listdir(pkglst_dir): + os.unlink(os.path.join(pkglst_dir, file)) + + per_distro_functions = [ + ["Debian", get_latest_released_debian_source_package_list], + ["Ubuntu", get_latest_released_ubuntu_source_package_list], + ["Fedora", get_latest_released_fedora_source_package_list], + ["OpenSuSE", get_latest_released_opensuse_source_package_list], + ["Mandriva", get_latest_released_mandriva_source_package_list], + ["Meego", get_latest_released_meego_source_package_list] + ] + + from datetime import datetime + begin = datetime.now() + for distro in per_distro_functions: + name = distro[0] + release, package_list = distro[1]() + bb.note("Distro: %s, Latest Release: %s, # src packages: %d" % (name, release, len(package_list))) + package_list_file = os.path.join(pkglst_dir, name + "-" + release) + f = open(package_list_file, "w+b") + for pkg in package_list: + f.write(pkg + "\n") + f.close() + end = datetime.now() + delta = end - begin + bb.note("package_list generatiosn took this much time: %d seconds" % delta.seconds) + +def update_distro_data(distro_check_dir, datetime): + """ + If distro packages list data is old then rebuild it. + The operations has to be protected by a lock so that + only one thread performes it at a time. + """ + if not os.path.isdir (distro_check_dir): + try: + bb.note ("Making new directory: %s" % distro_check_dir) + os.makedirs (distro_check_dir) + except OSError: + raise Exception('Unable to create directory %s' % (distro_check_dir)) + + + datetime_file = os.path.join(distro_check_dir, "build_datetime") + saved_datetime = "_invalid_" + import fcntl + try: + if not os.path.exists(datetime_file): + open(datetime_file, 'w+b').close() # touch the file so that the next open won't fail + + f = open(datetime_file, "r+b") + fcntl.lockf(f, fcntl.LOCK_EX) + saved_datetime = f.read() + if saved_datetime[0:8] != datetime[0:8]: + bb.note("The build datetime did not match: saved:%s current:%s" % (saved_datetime, datetime)) + bb.note("Regenerating distro package lists") + create_distro_packages_list(distro_check_dir) + f.seek(0) + f.write(datetime) + + except OSError: + raise Exception('Unable to read/write this file: %s' % (datetime_file)) + finally: + fcntl.lockf(f, fcntl.LOCK_UN) + f.close() + +def compare_in_distro_packages_list(distro_check_dir, d): + if not os.path.isdir(distro_check_dir): + raise Exception("compare_in_distro_packages_list: invalid distro_check_dir passed") + + localdata = bb.data.createCopy(d) + pkglst_dir = os.path.join(distro_check_dir, "package_lists") + matching_distros = [] + pn = d.getVar('PN', True) + recipe_name = d.getVar('PN', True) + bb.note("Checking: %s" % pn) + + trim_dict = dict({"-native":"-native", "-cross":"-cross", "-initial":"-initial"}) + + if pn.find("-native") != -1: + pnstripped = pn.split("-native") + localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True)) + bb.data.update_data(localdata) + recipe_name = pnstripped[0] + + if pn.startswith("nativesdk-"): + pnstripped = pn.split("nativesdk-") + localdata.setVar('OVERRIDES', "pn-" + pnstripped[1] + ":" + d.getVar('OVERRIDES', True)) + bb.data.update_data(localdata) + recipe_name = pnstripped[1] + + if pn.find("-cross") != -1: + pnstripped = pn.split("-cross") + localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True)) + bb.data.update_data(localdata) + recipe_name = pnstripped[0] + + if pn.find("-initial") != -1: + pnstripped = pn.split("-initial") + localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True)) + bb.data.update_data(localdata) + recipe_name = pnstripped[0] + + bb.note("Recipe: %s" % recipe_name) + tmp = localdata.getVar('DISTRO_PN_ALIAS', True) + + distro_exceptions = dict({"OE-Core":'OE-Core', "OpenedHand":'OpenedHand', "Intel":'Intel', "Upstream":'Upstream', "Windriver":'Windriver', "OSPDT":'OSPDT Approved', "Poky":'poky'}) + + if tmp: + list = tmp.split(' ') + for str in list: + if str and str.find("=") == -1 and distro_exceptions[str]: + matching_distros.append(str) + + distro_pn_aliases = {} + if tmp: + list = tmp.split(' ') + for str in list: + if str.find("=") != -1: + (dist, pn_alias) = str.split('=') + distro_pn_aliases[dist.strip().lower()] = pn_alias.strip() + + for file in os.listdir(pkglst_dir): + (distro, distro_release) = file.split("-") + f = open(os.path.join(pkglst_dir, file), "rb") + for line in f: + (pkg, section) = line.split(":") + if distro.lower() in distro_pn_aliases: + pn = distro_pn_aliases[distro.lower()] + else: + pn = recipe_name + if pn == pkg: + matching_distros.append(distro + "-" + section[:-1]) # strip the \n at the end + f.close() + break + f.close() + + + if tmp != None: + list = tmp.split(' ') + for item in list: + matching_distros.append(item) + bb.note("Matching: %s" % matching_distros) + return matching_distros + +def create_log_file(d, logname): + import subprocess + logpath = d.getVar('LOG_DIR', True) + bb.utils.mkdirhier(logpath) + logfn, logsuffix = os.path.splitext(logname) + logfile = os.path.join(logpath, "%s.%s%s" % (logfn, d.getVar('DATETIME', True), logsuffix)) + if not os.path.exists(logfile): + slogfile = os.path.join(logpath, logname) + if os.path.exists(slogfile): + os.remove(slogfile) + subprocess.call("touch %s" % logfile, shell=True) + os.symlink(logfile, slogfile) + d.setVar('LOG_FILE', logfile) + return logfile + + +def save_distro_check_result(result, datetime, result_file, d): + pn = d.getVar('PN', True) + logdir = d.getVar('LOG_DIR', True) + if not logdir: + bb.error("LOG_DIR variable is not defined, can't write the distro_check results") + return + if not os.path.isdir(logdir): + os.makedirs(logdir) + line = pn + for i in result: + line = line + "," + i + f = open(result_file, "a") + import fcntl + fcntl.lockf(f, fcntl.LOCK_EX) + f.seek(0, os.SEEK_END) # seek to the end of file + f.write(line + "\n") + fcntl.lockf(f, fcntl.LOCK_UN) + f.close() diff --git a/meta/lib/oe/image.py b/meta/lib/oe/image.py new file mode 100644 index 0000000000..7e080b00dd --- /dev/null +++ b/meta/lib/oe/image.py @@ -0,0 +1,345 @@ +from oe.utils import execute_pre_post_process +import os +import subprocess +import multiprocessing + + +def generate_image(arg): + (type, subimages, create_img_cmd) = arg + + bb.note("Running image creation script for %s: %s ..." % + (type, create_img_cmd)) + + try: + subprocess.check_output(create_img_cmd, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + return("Error: The image creation script '%s' returned %d:\n%s" % + (e.cmd, e.returncode, e.output)) + + return None + + +""" +This class will help compute IMAGE_FSTYPE dependencies and group them in batches +that can be executed in parallel. + +The next example is for illustration purposes, highly unlikely to happen in real life. +It's just one of the test cases I used to test the algorithm: + +For: +IMAGE_FSTYPES = "i1 i2 i3 i4 i5" +IMAGE_TYPEDEP_i4 = "i2" +IMAGE_TYPEDEP_i5 = "i6 i4" +IMAGE_TYPEDEP_i6 = "i7" +IMAGE_TYPEDEP_i7 = "i2" + +We get the following list of batches that can be executed in parallel, having the +dependencies satisfied: + +[['i1', 'i3', 'i2'], ['i4', 'i7'], ['i6'], ['i5']] +""" +class ImageDepGraph(object): + def __init__(self, d): + self.d = d + self.graph = dict() + self.deps_array = dict() + + def _construct_dep_graph(self, image_fstypes): + graph = dict() + + def add_node(node): + deps = (self.d.getVar('IMAGE_TYPEDEP_' + node, True) or "") + if deps != "": + graph[node] = deps + + for dep in deps.split(): + if not dep in graph: + add_node(dep) + else: + graph[node] = "" + + for fstype in image_fstypes: + add_node(fstype) + + return graph + + def _clean_graph(self): + # Live and VMDK images will be processed via inheriting + # bbclass and does not get processed here. Remove them from the fstypes + # graph. Their dependencies are already added, so no worries here. + remove_list = (self.d.getVar('IMAGE_TYPES_MASKED', True) or "").split() + + for item in remove_list: + self.graph.pop(item, None) + + def _compute_dependencies(self): + """ + returns dict object of nodes with [no_of_depends_on, no_of_depended_by] + for each node + """ + deps_array = dict() + for node in self.graph: + deps_array[node] = [0, 0] + + for node in self.graph: + deps = self.graph[node].split() + deps_array[node][0] += len(deps) + for dep in deps: + deps_array[dep][1] += 1 + + return deps_array + + def _sort_graph(self): + sorted_list = [] + group = [] + for node in self.graph: + if node not in self.deps_array: + continue + + depends_on = self.deps_array[node][0] + + if depends_on == 0: + group.append(node) + + if len(group) == 0 and len(self.deps_array) != 0: + bb.fatal("possible fstype circular dependency...") + + sorted_list.append(group) + + # remove added nodes from deps_array + for item in group: + for node in self.graph: + if item in self.graph[node].split(): + self.deps_array[node][0] -= 1 + + self.deps_array.pop(item, None) + + if len(self.deps_array): + # recursive call, to find the next group + sorted_list += self._sort_graph() + + return sorted_list + + def group_fstypes(self, image_fstypes): + self.graph = self._construct_dep_graph(image_fstypes) + + self._clean_graph() + + self.deps_array = self._compute_dependencies() + + alltypes = [node for node in self.graph] + + return (alltypes, self._sort_graph()) + + +class Image(ImageDepGraph): + def __init__(self, d): + self.d = d + + super(Image, self).__init__(d) + + def _get_rootfs_size(self): + """compute the rootfs size""" + rootfs_alignment = int(self.d.getVar('IMAGE_ROOTFS_ALIGNMENT', True)) + overhead_factor = float(self.d.getVar('IMAGE_OVERHEAD_FACTOR', True)) + rootfs_req_size = int(self.d.getVar('IMAGE_ROOTFS_SIZE', True)) + rootfs_extra_space = eval(self.d.getVar('IMAGE_ROOTFS_EXTRA_SPACE', True)) + rootfs_maxsize = self.d.getVar('IMAGE_ROOTFS_MAXSIZE', True) + + output = subprocess.check_output(['du', '-ks', + self.d.getVar('IMAGE_ROOTFS', True)]) + size_kb = int(output.split()[0]) + base_size = size_kb * overhead_factor + base_size = (base_size, rootfs_req_size)[base_size < rootfs_req_size] + \ + rootfs_extra_space + + if base_size != int(base_size): + base_size = int(base_size + 1) + + base_size += rootfs_alignment - 1 + base_size -= base_size % rootfs_alignment + + # Check the rootfs size against IMAGE_ROOTFS_MAXSIZE (if set) + if rootfs_maxsize: + rootfs_maxsize_int = int(rootfs_maxsize) + if base_size > rootfs_maxsize_int: + bb.fatal("The rootfs size %d(K) overrides the max size %d(K)" % \ + (base_size, rootfs_maxsize_int)) + + return base_size + + def _create_symlinks(self, subimages): + """create symlinks to the newly created image""" + deploy_dir = self.d.getVar('DEPLOY_DIR_IMAGE', True) + img_name = self.d.getVar('IMAGE_NAME', True) + link_name = self.d.getVar('IMAGE_LINK_NAME', True) + manifest_name = self.d.getVar('IMAGE_MANIFEST', True) + + os.chdir(deploy_dir) + + if link_name is not None: + for type in subimages: + if os.path.exists(img_name + ".rootfs." + type): + dst = link_name + "." + type + src = img_name + ".rootfs." + type + bb.note("Creating symlink: %s -> %s" % (dst, src)) + os.symlink(src, dst) + + if manifest_name is not None and \ + os.path.exists(manifest_name) and \ + not os.path.exists(link_name + ".manifest"): + os.symlink(os.path.basename(manifest_name), + link_name + ".manifest") + + def _remove_old_symlinks(self): + """remove the symlinks to old binaries""" + + if self.d.getVar('IMAGE_LINK_NAME', True): + deploy_dir = self.d.getVar('DEPLOY_DIR_IMAGE', True) + for img in os.listdir(deploy_dir): + if img.find(self.d.getVar('IMAGE_LINK_NAME', True)) == 0: + img = os.path.join(deploy_dir, img) + if os.path.islink(img): + if self.d.getVar('RM_OLD_IMAGE', True) == "1" and \ + os.path.exists(os.path.realpath(img)): + os.remove(os.path.realpath(img)) + + os.remove(img) + + """ + This function will just filter out the compressed image types from the + fstype groups returning a (filtered_fstype_groups, cimages) tuple. + """ + def _filter_out_commpressed(self, fstype_groups): + ctypes = self.d.getVar('COMPRESSIONTYPES', True).split() + cimages = {} + + filtered_groups = [] + for group in fstype_groups: + filtered_group = [] + for type in group: + basetype = None + for ctype in ctypes: + if type.endswith("." + ctype): + basetype = type[:-len("." + ctype)] + if basetype not in filtered_group: + filtered_group.append(basetype) + if basetype not in cimages: + cimages[basetype] = [] + if ctype not in cimages[basetype]: + cimages[basetype].append(ctype) + break + if not basetype and type not in filtered_group: + filtered_group.append(type) + + filtered_groups.append(filtered_group) + + return (filtered_groups, cimages) + + def _get_image_types(self): + """returns a (types, cimages) tuple""" + + alltypes, fstype_groups = self.group_fstypes(self.d.getVar('IMAGE_FSTYPES', True).split()) + + filtered_groups, cimages = self._filter_out_commpressed(fstype_groups) + + return (alltypes, filtered_groups, cimages) + + def _write_script(self, type, cmds): + tempdir = self.d.getVar('T', True) + script_name = os.path.join(tempdir, "create_image." + type) + + self.d.setVar('img_creation_func', '\n'.join(cmds)) + self.d.setVarFlag('img_creation_func', 'func', 1) + self.d.setVarFlag('img_creation_func', 'fakeroot', 1) + + with open(script_name, "w+") as script: + script.write("%s" % bb.build.shell_trap_code()) + script.write("export ROOTFS_SIZE=%d\n" % self._get_rootfs_size()) + bb.data.emit_func('img_creation_func', script, self.d) + script.write("img_creation_func\n") + + os.chmod(script_name, 0775) + + return script_name + + def _get_imagecmds(self): + old_overrides = self.d.getVar('OVERRIDES', 0) + + alltypes, fstype_groups, cimages = self._get_image_types() + + image_cmd_groups = [] + + bb.note("The image creation groups are: %s" % str(fstype_groups)) + for fstype_group in fstype_groups: + image_cmds = [] + for type in fstype_group: + cmds = [] + subimages = [] + + localdata = bb.data.createCopy(self.d) + localdata.setVar('OVERRIDES', '%s:%s' % (type, old_overrides)) + bb.data.update_data(localdata) + localdata.setVar('type', type) + + cmds.append("\t" + localdata.getVar("IMAGE_CMD", True)) + cmds.append(localdata.expand("\tcd ${DEPLOY_DIR_IMAGE}")) + + if type in cimages: + for ctype in cimages[type]: + cmds.append("\t" + localdata.getVar("COMPRESS_CMD_" + ctype, True)) + subimages.append(type + "." + ctype) + + if type not in alltypes: + cmds.append(localdata.expand("\trm ${IMAGE_NAME}.rootfs.${type}")) + else: + subimages.append(type) + + script_name = self._write_script(type, cmds) + + image_cmds.append((type, subimages, script_name)) + + image_cmd_groups.append(image_cmds) + + return image_cmd_groups + + def create(self): + bb.note("###### Generate images #######") + pre_process_cmds = self.d.getVar("IMAGE_PREPROCESS_COMMAND", True) + post_process_cmds = self.d.getVar("IMAGE_POSTPROCESS_COMMAND", True) + + execute_pre_post_process(self.d, pre_process_cmds) + + self._remove_old_symlinks() + + image_cmd_groups = self._get_imagecmds() + + for image_cmds in image_cmd_groups: + # create the images in parallel + nproc = multiprocessing.cpu_count() + pool = bb.utils.multiprocessingpool(nproc) + results = list(pool.imap(generate_image, image_cmds)) + pool.close() + pool.join() + + for result in results: + if result is not None: + bb.fatal(result) + + for image_type, subimages, script in image_cmds: + bb.note("Creating symlinks for %s image ..." % image_type) + self._create_symlinks(subimages) + + execute_pre_post_process(self.d, post_process_cmds) + + +def create_image(d): + Image(d).create() + +if __name__ == "__main__": + """ + Image creation can be called independent from bitbake environment. + """ + """ + TBD + """ diff --git a/meta/lib/oe/license.py b/meta/lib/oe/license.py new file mode 100644 index 0000000000..340da61102 --- /dev/null +++ b/meta/lib/oe/license.py @@ -0,0 +1,116 @@ +# vi:sts=4:sw=4:et +"""Code for parsing OpenEmbedded license strings""" + +import ast +import re +from fnmatch import fnmatchcase as fnmatch + +class LicenseError(Exception): + pass + +class LicenseSyntaxError(LicenseError): + def __init__(self, licensestr, exc): + self.licensestr = licensestr + self.exc = exc + LicenseError.__init__(self) + + def __str__(self): + return "error in '%s': %s" % (self.licensestr, self.exc) + +class InvalidLicense(LicenseError): + def __init__(self, license): + self.license = license + LicenseError.__init__(self) + + def __str__(self): + return "invalid characters in license '%s'" % self.license + +license_operator = re.compile('([&|() ])') +license_pattern = re.compile('[a-zA-Z0-9.+_\-]+$') + +class LicenseVisitor(ast.NodeVisitor): + """Syntax tree visitor which can accept OpenEmbedded license strings""" + def visit_string(self, licensestr): + new_elements = [] + elements = filter(lambda x: x.strip(), license_operator.split(licensestr)) + for pos, element in enumerate(elements): + if license_pattern.match(element): + if pos > 0 and license_pattern.match(elements[pos-1]): + new_elements.append('&') + element = '"' + element + '"' + elif not license_operator.match(element): + raise InvalidLicense(element) + new_elements.append(element) + + self.visit(ast.parse(' '.join(new_elements))) + +class FlattenVisitor(LicenseVisitor): + """Flatten a license tree (parsed from a string) by selecting one of each + set of OR options, in the way the user specifies""" + def __init__(self, choose_licenses): + self.choose_licenses = choose_licenses + self.licenses = [] + LicenseVisitor.__init__(self) + + def visit_Str(self, node): + self.licenses.append(node.s) + + def visit_BinOp(self, node): + if isinstance(node.op, ast.BitOr): + left = FlattenVisitor(self.choose_licenses) + left.visit(node.left) + + right = FlattenVisitor(self.choose_licenses) + right.visit(node.right) + + selected = self.choose_licenses(left.licenses, right.licenses) + self.licenses.extend(selected) + else: + self.generic_visit(node) + +def flattened_licenses(licensestr, choose_licenses): + """Given a license string and choose_licenses function, return a flat list of licenses""" + flatten = FlattenVisitor(choose_licenses) + try: + flatten.visit_string(licensestr) + except SyntaxError as exc: + raise LicenseSyntaxError(licensestr, exc) + return flatten.licenses + +def is_included(licensestr, whitelist=None, blacklist=None): + """Given a license string and whitelist and blacklist, determine if the + license string matches the whitelist and does not match the blacklist. + + Returns a tuple holding the boolean state and a list of the applicable + licenses which were excluded (or None, if the state is True) + """ + + def include_license(license): + return any(fnmatch(license, pattern) for pattern in whitelist) + + def exclude_license(license): + return any(fnmatch(license, pattern) for pattern in blacklist) + + def choose_licenses(alpha, beta): + """Select the option in an OR which is the 'best' (has the most + included licenses).""" + alpha_weight = len(filter(include_license, alpha)) + beta_weight = len(filter(include_license, beta)) + if alpha_weight > beta_weight: + return alpha + else: + return beta + + if not whitelist: + whitelist = ['*'] + + if not blacklist: + blacklist = [] + + licenses = flattened_licenses(licensestr, choose_licenses) + excluded = filter(lambda lic: exclude_license(lic), licenses) + included = filter(lambda lic: include_license(lic), licenses) + if excluded: + return False, excluded + else: + return True, included diff --git a/meta/lib/oe/lsb.py b/meta/lib/oe/lsb.py new file mode 100644 index 0000000000..b53f361035 --- /dev/null +++ b/meta/lib/oe/lsb.py @@ -0,0 +1,81 @@ +def release_dict(): + """Return the output of lsb_release -ir as a dictionary""" + from subprocess import PIPE + + try: + output, err = bb.process.run(['lsb_release', '-ir'], stderr=PIPE) + except bb.process.CmdError as exc: + return None + + data = {} + for line in output.splitlines(): + try: + key, value = line.split(":\t", 1) + except ValueError: + continue + else: + data[key] = value + return data + +def release_dict_file(): + """ Try to gather LSB release information manually when lsb_release tool is unavailable """ + data = None + try: + if os.path.exists('/etc/lsb-release'): + data = {} + with open('/etc/lsb-release') as f: + for line in f: + key, value = line.split("=", 1) + data[key] = value.strip() + elif os.path.exists('/etc/redhat-release'): + data = {} + with open('/etc/redhat-release') as f: + distro = f.readline().strip() + import re + match = re.match(r'(.*) release (.*) \((.*)\)', distro) + if match: + data['DISTRIB_ID'] = match.group(1) + data['DISTRIB_RELEASE'] = match.group(2) + elif os.path.exists('/etc/SuSE-release'): + data = {} + data['DISTRIB_ID'] = 'SUSE LINUX' + with open('/etc/SuSE-release') as f: + for line in f: + if line.startswith('VERSION = '): + data['DISTRIB_RELEASE'] = line[10:].rstrip() + break + elif os.path.exists('/etc/os-release'): + data = {} + with open('/etc/os-release') as f: + for line in f: + if line.startswith('NAME='): + data['DISTRIB_ID'] = line[5:].rstrip().strip('"') + if line.startswith('VERSION_ID='): + data['DISTRIB_RELEASE'] = line[11:].rstrip().strip('"') + except IOError: + return None + return data + +def distro_identifier(adjust_hook=None): + """Return a distro identifier string based upon lsb_release -ri, + with optional adjustment via a hook""" + + lsb_data = release_dict() + if lsb_data: + distro_id, release = lsb_data['Distributor ID'], lsb_data['Release'] + else: + lsb_data_file = release_dict_file() + if lsb_data_file: + distro_id, release = lsb_data_file['DISTRIB_ID'], lsb_data_file.get('DISTRIB_RELEASE', None) + else: + distro_id, release = None, None + + if adjust_hook: + distro_id, release = adjust_hook(distro_id, release) + if not distro_id: + return "Unknown" + if release: + id_str = '{0}-{1}'.format(distro_id, release) + else: + id_str = distro_id + return id_str.replace(' ','-').replace('/','-') diff --git a/meta/lib/oe/maketype.py b/meta/lib/oe/maketype.py new file mode 100644 index 0000000000..139f333691 --- /dev/null +++ b/meta/lib/oe/maketype.py @@ -0,0 +1,99 @@ +"""OpenEmbedded variable typing support + +Types are defined in the metadata by name, using the 'type' flag on a +variable. Other flags may be utilized in the construction of the types. See +the arguments of the type's factory for details. +""" + +import inspect +import types + +available_types = {} + +class MissingFlag(TypeError): + """A particular flag is required to construct the type, but has not been + provided.""" + def __init__(self, flag, type): + self.flag = flag + self.type = type + TypeError.__init__(self) + + def __str__(self): + return "Type '%s' requires flag '%s'" % (self.type, self.flag) + +def factory(var_type): + """Return the factory for a specified type.""" + if var_type is None: + raise TypeError("No type specified. Valid types: %s" % + ', '.join(available_types)) + try: + return available_types[var_type] + except KeyError: + raise TypeError("Invalid type '%s':\n Valid types: %s" % + (var_type, ', '.join(available_types))) + +def create(value, var_type, **flags): + """Create an object of the specified type, given the specified flags and + string value.""" + obj = factory(var_type) + objflags = {} + for flag in obj.flags: + if flag not in flags: + if flag not in obj.optflags: + raise MissingFlag(flag, var_type) + else: + objflags[flag] = flags[flag] + + return obj(value, **objflags) + +def get_callable_args(obj): + """Grab all but the first argument of the specified callable, returning + the list, as well as a list of which of the arguments have default + values.""" + if type(obj) is type: + obj = obj.__init__ + + args, varargs, keywords, defaults = inspect.getargspec(obj) + flaglist = [] + if args: + if len(args) > 1 and args[0] == 'self': + args = args[1:] + flaglist.extend(args) + + optional = set() + if defaults: + optional |= set(flaglist[-len(defaults):]) + return flaglist, optional + +def factory_setup(name, obj): + """Prepare a factory for use.""" + args, optional = get_callable_args(obj) + extra_args = args[1:] + if extra_args: + obj.flags, optional = extra_args, optional + obj.optflags = set(optional) + else: + obj.flags = obj.optflags = () + + if not hasattr(obj, 'name'): + obj.name = name + +def register(name, factory): + """Register a type, given its name and a factory callable. + + Determines the required and optional flags from the factory's + arguments.""" + factory_setup(name, factory) + available_types[factory.name] = factory + + +# Register all our included types +for name in dir(types): + if name.startswith('_'): + continue + + obj = getattr(types, name) + if not callable(obj): + continue + + register(name, obj) diff --git a/meta/lib/oe/manifest.py b/meta/lib/oe/manifest.py new file mode 100644 index 0000000000..42832f15d2 --- /dev/null +++ b/meta/lib/oe/manifest.py @@ -0,0 +1,345 @@ +from abc import ABCMeta, abstractmethod +import os +import re +import bb + + +class Manifest(object): + """ + This is an abstract class. Do not instantiate this directly. + """ + __metaclass__ = ABCMeta + + PKG_TYPE_MUST_INSTALL = "mip" + PKG_TYPE_MULTILIB = "mlp" + PKG_TYPE_LANGUAGE = "lgp" + PKG_TYPE_ATTEMPT_ONLY = "aop" + + MANIFEST_TYPE_IMAGE = "image" + MANIFEST_TYPE_SDK_HOST = "sdk_host" + MANIFEST_TYPE_SDK_TARGET = "sdk_target" + + var_maps = { + MANIFEST_TYPE_IMAGE: { + "PACKAGE_INSTALL": PKG_TYPE_MUST_INSTALL, + "PACKAGE_INSTALL_ATTEMPTONLY": PKG_TYPE_ATTEMPT_ONLY, + "LINGUAS_INSTALL": PKG_TYPE_LANGUAGE + }, + MANIFEST_TYPE_SDK_HOST: { + "TOOLCHAIN_HOST_TASK": PKG_TYPE_MUST_INSTALL, + "TOOLCHAIN_HOST_TASK_ATTEMPTONLY": PKG_TYPE_ATTEMPT_ONLY + }, + MANIFEST_TYPE_SDK_TARGET: { + "TOOLCHAIN_TARGET_TASK": PKG_TYPE_MUST_INSTALL, + "TOOLCHAIN_TARGET_TASK_ATTEMPTONLY": PKG_TYPE_ATTEMPT_ONLY + } + } + + INSTALL_ORDER = [ + PKG_TYPE_LANGUAGE, + PKG_TYPE_MUST_INSTALL, + PKG_TYPE_ATTEMPT_ONLY, + PKG_TYPE_MULTILIB + ] + + initial_manifest_file_header = \ + "# This file was generated automatically and contains the packages\n" \ + "# passed on to the package manager in order to create the rootfs.\n\n" \ + "# Format:\n" \ + "# ,\n" \ + "# where:\n" \ + "# can be:\n" \ + "# 'mip' = must install package\n" \ + "# 'aop' = attempt only package\n" \ + "# 'mlp' = multilib package\n" \ + "# 'lgp' = language package\n\n" + + def __init__(self, d, manifest_dir=None, manifest_type=MANIFEST_TYPE_IMAGE): + self.d = d + self.manifest_type = manifest_type + + if manifest_dir is None: + if manifest_type != self.MANIFEST_TYPE_IMAGE: + self.manifest_dir = self.d.getVar('SDK_DIR', True) + else: + self.manifest_dir = self.d.getVar('WORKDIR', True) + else: + self.manifest_dir = manifest_dir + + bb.utils.mkdirhier(self.manifest_dir) + + self.initial_manifest = os.path.join(self.manifest_dir, "%s_initial_manifest" % manifest_type) + self.final_manifest = os.path.join(self.manifest_dir, "%s_final_manifest" % manifest_type) + self.full_manifest = os.path.join(self.manifest_dir, "%s_full_manifest" % manifest_type) + + # packages in the following vars will be split in 'must install' and + # 'multilib' + self.vars_to_split = ["PACKAGE_INSTALL", + "TOOLCHAIN_HOST_TASK", + "TOOLCHAIN_TARGET_TASK"] + + """ + This creates a standard initial manifest for core-image-(minimal|sato|sato-sdk). + This will be used for testing until the class is implemented properly! + """ + def _create_dummy_initial(self): + image_rootfs = self.d.getVar('IMAGE_ROOTFS', True) + pkg_list = dict() + if image_rootfs.find("core-image-sato-sdk") > 0: + pkg_list[self.PKG_TYPE_MUST_INSTALL] = \ + "packagegroup-core-x11-sato-games packagegroup-base-extended " \ + "packagegroup-core-x11-sato packagegroup-core-x11-base " \ + "packagegroup-core-sdk packagegroup-core-tools-debug " \ + "packagegroup-core-boot packagegroup-core-tools-testapps " \ + "packagegroup-core-eclipse-debug packagegroup-core-qt-demoapps " \ + "apt packagegroup-core-tools-profile psplash " \ + "packagegroup-core-standalone-sdk-target " \ + "packagegroup-core-ssh-openssh dpkg kernel-dev" + pkg_list[self.PKG_TYPE_LANGUAGE] = \ + "locale-base-en-us locale-base-en-gb" + elif image_rootfs.find("core-image-sato") > 0: + pkg_list[self.PKG_TYPE_MUST_INSTALL] = \ + "packagegroup-core-ssh-dropbear packagegroup-core-x11-sato-games " \ + "packagegroup-core-x11-base psplash apt dpkg packagegroup-base-extended " \ + "packagegroup-core-x11-sato packagegroup-core-boot" + pkg_list['lgp'] = \ + "locale-base-en-us locale-base-en-gb" + elif image_rootfs.find("core-image-minimal") > 0: + pkg_list[self.PKG_TYPE_MUST_INSTALL] = "run-postinsts packagegroup-core-boot" + + with open(self.initial_manifest, "w+") as manifest: + manifest.write(self.initial_manifest_file_header) + + for pkg_type in pkg_list: + for pkg in pkg_list[pkg_type].split(): + manifest.write("%s,%s\n" % (pkg_type, pkg)) + + """ + This will create the initial manifest which will be used by Rootfs class to + generate the rootfs + """ + @abstractmethod + def create_initial(self): + pass + + """ + This creates the manifest after everything has been installed. + """ + @abstractmethod + def create_final(self): + pass + + """ + This creates the manifest after the package in initial manifest has been + dummy installed. It lists all *to be installed* packages. There is no real + installation, just a test. + """ + @abstractmethod + def create_full(self, pm): + pass + + """ + The following function parses an initial manifest and returns a dictionary + object with the must install, attempt only, multilib and language packages. + """ + def parse_initial_manifest(self): + pkgs = dict() + + with open(self.initial_manifest) as manifest: + for line in manifest.read().split('\n'): + comment = re.match("^#.*", line) + pattern = "^(%s|%s|%s|%s),(.*)$" % \ + (self.PKG_TYPE_MUST_INSTALL, + self.PKG_TYPE_ATTEMPT_ONLY, + self.PKG_TYPE_MULTILIB, + self.PKG_TYPE_LANGUAGE) + pkg = re.match(pattern, line) + + if comment is not None: + continue + + if pkg is not None: + pkg_type = pkg.group(1) + pkg_name = pkg.group(2) + + if not pkg_type in pkgs: + pkgs[pkg_type] = [pkg_name] + else: + pkgs[pkg_type].append(pkg_name) + + return pkgs + + ''' + This following function parses a full manifest and return a list + object with packages. + ''' + def parse_full_manifest(self): + installed_pkgs = list() + if not os.path.exists(self.full_manifest): + bb.note('full manifest not exist') + return installed_pkgs + + with open(self.full_manifest, 'r') as manifest: + for pkg in manifest.read().split('\n'): + installed_pkgs.append(pkg.strip()) + + return installed_pkgs + + +class RpmManifest(Manifest): + """ + Returns a dictionary object with mip and mlp packages. + """ + def _split_multilib(self, pkg_list): + pkgs = dict() + + for pkg in pkg_list.split(): + pkg_type = self.PKG_TYPE_MUST_INSTALL + + ml_variants = self.d.getVar('MULTILIB_VARIANTS', True).split() + + for ml_variant in ml_variants: + if pkg.startswith(ml_variant + '-'): + pkg_type = self.PKG_TYPE_MULTILIB + + if not pkg_type in pkgs: + pkgs[pkg_type] = pkg + else: + pkgs[pkg_type] += " " + pkg + + return pkgs + + def create_initial(self): + pkgs = dict() + + with open(self.initial_manifest, "w+") as manifest: + manifest.write(self.initial_manifest_file_header) + + for var in self.var_maps[self.manifest_type]: + if var in self.vars_to_split: + split_pkgs = self._split_multilib(self.d.getVar(var, True)) + if split_pkgs is not None: + pkgs = dict(pkgs.items() + split_pkgs.items()) + else: + pkg_list = self.d.getVar(var, True) + if pkg_list is not None: + pkgs[self.var_maps[self.manifest_type][var]] = self.d.getVar(var, True) + + for pkg_type in pkgs: + for pkg in pkgs[pkg_type].split(): + manifest.write("%s,%s\n" % (pkg_type, pkg)) + + def create_final(self): + pass + + def create_full(self, pm): + pass + + +class OpkgManifest(Manifest): + """ + Returns a dictionary object with mip and mlp packages. + """ + def _split_multilib(self, pkg_list): + pkgs = dict() + + for pkg in pkg_list.split(): + pkg_type = self.PKG_TYPE_MUST_INSTALL + + ml_variants = self.d.getVar('MULTILIB_VARIANTS', True).split() + + for ml_variant in ml_variants: + if pkg.startswith(ml_variant + '-'): + pkg_type = self.PKG_TYPE_MULTILIB + + if not pkg_type in pkgs: + pkgs[pkg_type] = pkg + else: + pkgs[pkg_type] += " " + pkg + + return pkgs + + def create_initial(self): + pkgs = dict() + + with open(self.initial_manifest, "w+") as manifest: + manifest.write(self.initial_manifest_file_header) + + for var in self.var_maps[self.manifest_type]: + if var in self.vars_to_split: + split_pkgs = self._split_multilib(self.d.getVar(var, True)) + if split_pkgs is not None: + pkgs = dict(pkgs.items() + split_pkgs.items()) + else: + pkg_list = self.d.getVar(var, True) + if pkg_list is not None: + pkgs[self.var_maps[self.manifest_type][var]] = self.d.getVar(var, True) + + for pkg_type in pkgs: + for pkg in pkgs[pkg_type].split(): + manifest.write("%s,%s\n" % (pkg_type, pkg)) + + def create_final(self): + pass + + def create_full(self, pm): + if not os.path.exists(self.initial_manifest): + self.create_initial() + + initial_manifest = self.parse_initial_manifest() + pkgs_to_install = list() + for pkg_type in initial_manifest: + pkgs_to_install += initial_manifest[pkg_type] + if len(pkgs_to_install) == 0: + return + + output = pm.dummy_install(pkgs_to_install) + + with open(self.full_manifest, 'w+') as manifest: + pkg_re = re.compile('^Installing ([^ ]+) [^ ].*') + for line in set(output.split('\n')): + m = pkg_re.match(line) + if m: + manifest.write(m.group(1) + '\n') + + return + + +class DpkgManifest(Manifest): + def create_initial(self): + with open(self.initial_manifest, "w+") as manifest: + manifest.write(self.initial_manifest_file_header) + + for var in self.var_maps[self.manifest_type]: + pkg_list = self.d.getVar(var, True) + + if pkg_list is None: + continue + + for pkg in pkg_list.split(): + manifest.write("%s,%s\n" % + (self.var_maps[self.manifest_type][var], pkg)) + + def create_final(self): + pass + + def create_full(self, pm): + pass + + +def create_manifest(d, final_manifest=False, manifest_dir=None, + manifest_type=Manifest.MANIFEST_TYPE_IMAGE): + manifest_map = {'rpm': RpmManifest, + 'ipk': OpkgManifest, + 'deb': DpkgManifest} + + manifest = manifest_map[d.getVar('IMAGE_PKGTYPE', True)](d, manifest_dir, manifest_type) + + if final_manifest: + manifest.create_final() + else: + manifest.create_initial() + + +if __name__ == "__main__": + pass diff --git a/meta/lib/oe/package.py b/meta/lib/oe/package.py new file mode 100644 index 0000000000..f8b532220a --- /dev/null +++ b/meta/lib/oe/package.py @@ -0,0 +1,99 @@ +def runstrip(arg): + # Function to strip a single file, called from split_and_strip_files below + # A working 'file' (one which works on the target architecture) + # + # The elftype is a bit pattern (explained in split_and_strip_files) to tell + # us what type of file we're processing... + # 4 - executable + # 8 - shared library + # 16 - kernel module + + import commands, stat, subprocess + + (file, elftype, strip) = arg + + newmode = None + if not os.access(file, os.W_OK) or os.access(file, os.R_OK): + origmode = os.stat(file)[stat.ST_MODE] + newmode = origmode | stat.S_IWRITE | stat.S_IREAD + os.chmod(file, newmode) + + extraflags = "" + + # kernel module + if elftype & 16: + extraflags = "--strip-debug --remove-section=.comment --remove-section=.note --preserve-dates" + # .so and shared library + elif ".so" in file and elftype & 8: + extraflags = "--remove-section=.comment --remove-section=.note --strip-unneeded" + # shared or executable: + elif elftype & 8 or elftype & 4: + extraflags = "--remove-section=.comment --remove-section=.note" + + stripcmd = "'%s' %s '%s'" % (strip, extraflags, file) + bb.debug(1, "runstrip: %s" % stripcmd) + + ret = subprocess.call(stripcmd, shell=True) + + if newmode: + os.chmod(file, origmode) + + if ret: + bb.error("runstrip: '%s' strip command failed" % stripcmd) + + return + + +def file_translate(file): + ft = file.replace("@", "@at@") + ft = ft.replace(" ", "@space@") + ft = ft.replace("\t", "@tab@") + ft = ft.replace("[", "@openbrace@") + ft = ft.replace("]", "@closebrace@") + ft = ft.replace("_", "@underscore@") + return ft + +def filedeprunner(arg): + import re, subprocess, shlex + + (pkg, pkgfiles, rpmdeps, pkgdest) = arg + provides = {} + requires = {} + + r = re.compile(r'[<>=]+ +[^ ]*') + + def process_deps(pipe, pkg, pkgdest, provides, requires): + for line in pipe: + f = line.split(" ", 1)[0].strip() + line = line.split(" ", 1)[1].strip() + + if line.startswith("Requires:"): + i = requires + elif line.startswith("Provides:"): + i = provides + else: + continue + + file = f.replace(pkgdest + "/" + pkg, "") + file = file_translate(file) + value = line.split(":", 1)[1].strip() + value = r.sub(r'(\g<0>)', value) + + if value.startswith("rpmlib("): + continue + if value == "python": + continue + if file not in i: + i[file] = [] + i[file].append(value) + + return provides, requires + + try: + dep_popen = subprocess.Popen(shlex.split(rpmdeps) + pkgfiles, stdout=subprocess.PIPE) + provides, requires = process_deps(dep_popen.stdout, pkg, pkgdest, provides, requires) + except OSError as e: + bb.error("rpmdeps: '%s' command failed, '%s'" % (shlex.split(rpmdeps) + pkgfiles, e)) + raise e + + return (pkg, provides, requires) diff --git a/meta/lib/oe/package_manager.py b/meta/lib/oe/package_manager.py new file mode 100644 index 0000000000..505509543d --- /dev/null +++ b/meta/lib/oe/package_manager.py @@ -0,0 +1,1797 @@ +from abc import ABCMeta, abstractmethod +import os +import glob +import subprocess +import shutil +import multiprocessing +import re +import bb +import tempfile +import oe.utils + + +# this can be used by all PM backends to create the index files in parallel +def create_index(arg): + index_cmd = arg + + try: + bb.note("Executing '%s' ..." % index_cmd) + result = subprocess.check_output(index_cmd, stderr=subprocess.STDOUT, shell=True) + except subprocess.CalledProcessError as e: + return("Index creation command '%s' failed with return code %d:\n%s" % + (e.cmd, e.returncode, e.output)) + + if result: + bb.note(result) + + return None + + +class Indexer(object): + __metaclass__ = ABCMeta + + def __init__(self, d, deploy_dir): + self.d = d + self.deploy_dir = deploy_dir + + @abstractmethod + def write_index(self): + pass + + +class RpmIndexer(Indexer): + def get_ml_prefix_and_os_list(self, arch_var=None, os_var=None): + package_archs = { + 'default': [], + } + + target_os = { + 'default': "", + } + + if arch_var is not None and os_var is not None: + package_archs['default'] = self.d.getVar(arch_var, True).split() + package_archs['default'].reverse() + target_os['default'] = self.d.getVar(os_var, True).strip() + else: + package_archs['default'] = self.d.getVar("PACKAGE_ARCHS", True).split() + # arch order is reversed. This ensures the -best- match is + # listed first! + package_archs['default'].reverse() + target_os['default'] = self.d.getVar("TARGET_OS", True).strip() + multilibs = self.d.getVar('MULTILIBS', True) or "" + for ext in multilibs.split(): + eext = ext.split(':') + if len(eext) > 1 and eext[0] == 'multilib': + localdata = bb.data.createCopy(self.d) + default_tune_key = "DEFAULTTUNE_virtclass-multilib-" + eext[1] + default_tune = localdata.getVar(default_tune_key, False) + if default_tune is None: + default_tune_key = "DEFAULTTUNE_ML_" + eext[1] + default_tune = localdata.getVar(default_tune_key, False) + if default_tune: + localdata.setVar("DEFAULTTUNE", default_tune) + bb.data.update_data(localdata) + package_archs[eext[1]] = localdata.getVar('PACKAGE_ARCHS', + True).split() + package_archs[eext[1]].reverse() + target_os[eext[1]] = localdata.getVar("TARGET_OS", + True).strip() + + ml_prefix_list = dict() + for mlib in package_archs: + if mlib == 'default': + ml_prefix_list[mlib] = package_archs[mlib] + else: + ml_prefix_list[mlib] = list() + for arch in package_archs[mlib]: + if arch in ['all', 'noarch', 'any']: + ml_prefix_list[mlib].append(arch) + else: + ml_prefix_list[mlib].append(mlib + "_" + arch) + + return (ml_prefix_list, target_os) + + def write_index(self): + sdk_pkg_archs = (self.d.getVar('SDK_PACKAGE_ARCHS', True) or "").replace('-', '_').split() + all_mlb_pkg_archs = (self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS', True) or "").replace('-', '_').split() + + mlb_prefix_list = self.get_ml_prefix_and_os_list()[0] + + archs = set() + for item in mlb_prefix_list: + archs = archs.union(set(i.replace('-', '_') for i in mlb_prefix_list[item])) + + if len(archs) == 0: + archs = archs.union(set(all_mlb_pkg_archs)) + + archs = archs.union(set(sdk_pkg_archs)) + + rpm_createrepo = bb.utils.which(os.getenv('PATH'), "createrepo") + index_cmds = [] + rpm_dirs_found = False + for arch in archs: + arch_dir = os.path.join(self.deploy_dir, arch) + if not os.path.isdir(arch_dir): + continue + + index_cmds.append("%s --update -q %s" % (rpm_createrepo, arch_dir)) + + rpm_dirs_found = True + + if not rpm_dirs_found: + bb.note("There are no packages in %s" % self.deploy_dir) + return + + result = oe.utils.multiprocess_exec(index_cmds, create_index) + if result: + bb.fatal('%s' % ('\n'.join(result))) + + +class OpkgIndexer(Indexer): + def write_index(self): + arch_vars = ["ALL_MULTILIB_PACKAGE_ARCHS", + "SDK_PACKAGE_ARCHS", + "MULTILIB_ARCHS"] + + opkg_index_cmd = bb.utils.which(os.getenv('PATH'), "opkg-make-index") + + if not os.path.exists(os.path.join(self.deploy_dir, "Packages")): + open(os.path.join(self.deploy_dir, "Packages"), "w").close() + + index_cmds = [] + for arch_var in arch_vars: + archs = self.d.getVar(arch_var, True) + if archs is None: + continue + + for arch in archs.split(): + pkgs_dir = os.path.join(self.deploy_dir, arch) + pkgs_file = os.path.join(pkgs_dir, "Packages") + + if not os.path.isdir(pkgs_dir): + continue + + if not os.path.exists(pkgs_file): + open(pkgs_file, "w").close() + + index_cmds.append('%s -r %s -p %s -m %s' % + (opkg_index_cmd, pkgs_file, pkgs_file, pkgs_dir)) + + if len(index_cmds) == 0: + bb.note("There are no packages in %s!" % self.deploy_dir) + return + + result = oe.utils.multiprocess_exec(index_cmds, create_index) + if result: + bb.fatal('%s' % ('\n'.join(result))) + + + +class DpkgIndexer(Indexer): + def write_index(self): + pkg_archs = self.d.getVar('PACKAGE_ARCHS', True) + if pkg_archs is not None: + arch_list = pkg_archs.split() + sdk_pkg_archs = self.d.getVar('SDK_PACKAGE_ARCHS', True) + if sdk_pkg_archs is not None: + for a in sdk_pkg_archs.split(): + if a not in pkg_archs: + arch_list.append(a) + + all_mlb_pkg_arch_list = (self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS', True) or "").replace('-', '_').split() + arch_list.extend(arch for arch in all_mlb_pkg_arch_list if arch not in arch_list) + + apt_ftparchive = bb.utils.which(os.getenv('PATH'), "apt-ftparchive") + gzip = bb.utils.which(os.getenv('PATH'), "gzip") + + index_cmds = [] + deb_dirs_found = False + for arch in arch_list: + arch_dir = os.path.join(self.deploy_dir, arch) + if not os.path.isdir(arch_dir): + continue + + cmd = "cd %s; PSEUDO_UNLOAD=1 %s packages . > Packages;" % (arch_dir, apt_ftparchive) + + cmd += "%s -fc Packages > Packages.gz;" % gzip + + with open(os.path.join(arch_dir, "Release"), "w+") as release: + release.write("Label: %s\n" % arch) + + cmd += "PSEUDO_UNLOAD=1 %s release . >> Release" % apt_ftparchive + + index_cmds.append(cmd) + + deb_dirs_found = True + + if not deb_dirs_found: + bb.note("There are no packages in %s" % self.deploy_dir) + return + + result = oe.utils.multiprocess_exec(index_cmds, create_index) + if result: + bb.fatal('%s' % ('\n'.join(result))) + + + +class PkgsList(object): + __metaclass__ = ABCMeta + + def __init__(self, d, rootfs_dir): + self.d = d + self.rootfs_dir = rootfs_dir + + @abstractmethod + def list(self, format=None): + pass + + +class RpmPkgsList(PkgsList): + def __init__(self, d, rootfs_dir, arch_var=None, os_var=None): + super(RpmPkgsList, self).__init__(d, rootfs_dir) + + self.rpm_cmd = bb.utils.which(os.getenv('PATH'), "rpm") + self.image_rpmlib = os.path.join(self.rootfs_dir, 'var/lib/rpm') + + self.ml_prefix_list, self.ml_os_list = \ + RpmIndexer(d, rootfs_dir).get_ml_prefix_and_os_list(arch_var, os_var) + + # Determine rpm version + cmd = "%s --version" % self.rpm_cmd + try: + output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) + except subprocess.CalledProcessError as e: + bb.fatal("Getting rpm version failed. Command '%s' " + "returned %d:\n%s" % (cmd, e.returncode, e.output)) + self.rpm_version = int(output.split()[-1].split('.')[0]) + + ''' + Translate the RPM/Smart format names to the OE multilib format names + ''' + def _pkg_translate_smart_to_oe(self, pkg, arch): + new_pkg = pkg + new_arch = arch + fixed_arch = arch.replace('_', '-') + found = 0 + for mlib in self.ml_prefix_list: + for cmp_arch in self.ml_prefix_list[mlib]: + fixed_cmp_arch = cmp_arch.replace('_', '-') + if fixed_arch == fixed_cmp_arch: + if mlib == 'default': + new_pkg = pkg + new_arch = cmp_arch + else: + new_pkg = mlib + '-' + pkg + # We need to strip off the ${mlib}_ prefix on the arch + new_arch = cmp_arch.replace(mlib + '_', '') + + # Workaround for bug 3565. Simply look to see if we + # know of a package with that name, if not try again! + filename = os.path.join(self.d.getVar('PKGDATA_DIR', True), + 'runtime-reverse', + new_pkg) + if os.path.exists(filename): + found = 1 + break + + if found == 1 and fixed_arch == fixed_cmp_arch: + break + #bb.note('%s, %s -> %s, %s' % (pkg, arch, new_pkg, new_arch)) + return new_pkg, new_arch + + def _list_pkg_deps(self): + cmd = [bb.utils.which(os.getenv('PATH'), "rpmresolve"), + "-t", self.image_rpmlib] + + try: + output = subprocess.check_output(cmd, stderr=subprocess.STDOUT).strip() + except subprocess.CalledProcessError as e: + bb.fatal("Cannot get the package dependencies. Command '%s' " + "returned %d:\n%s" % (' '.join(cmd), e.returncode, e.output)) + + return output + + def list(self, format=None): + if format == "deps": + if self.rpm_version == 4: + bb.fatal("'deps' format dependency listings are not supported with rpm 4 since rpmresolve does not work") + return self._list_pkg_deps() + + cmd = self.rpm_cmd + ' --root ' + self.rootfs_dir + cmd += ' -D "_dbpath /var/lib/rpm" -qa' + if self.rpm_version == 4: + cmd += " --qf '[%{NAME} %{ARCH} %{VERSION}\n]'" + else: + cmd += " --qf '[%{NAME} %{ARCH} %{VERSION} %{PACKAGEORIGIN}\n]'" + + try: + # bb.note(cmd) + tmp_output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).strip() + + except subprocess.CalledProcessError as e: + bb.fatal("Cannot get the installed packages list. Command '%s' " + "returned %d:\n%s" % (cmd, e.returncode, e.output)) + + output = list() + for line in tmp_output.split('\n'): + if len(line.strip()) == 0: + continue + pkg = line.split()[0] + arch = line.split()[1] + ver = line.split()[2] + if self.rpm_version == 4: + pkgorigin = "unknown" + else: + pkgorigin = line.split()[3] + new_pkg, new_arch = self._pkg_translate_smart_to_oe(pkg, arch) + + if format == "arch": + output.append('%s %s' % (new_pkg, new_arch)) + elif format == "file": + output.append('%s %s %s' % (new_pkg, pkgorigin, new_arch)) + elif format == "ver": + output.append('%s %s %s' % (new_pkg, new_arch, ver)) + else: + output.append('%s' % (new_pkg)) + + output.sort() + + return '\n'.join(output) + + +class OpkgPkgsList(PkgsList): + def __init__(self, d, rootfs_dir, config_file): + super(OpkgPkgsList, self).__init__(d, rootfs_dir) + + self.opkg_cmd = bb.utils.which(os.getenv('PATH'), "opkg-cl") + self.opkg_args = "-f %s -o %s " % (config_file, rootfs_dir) + self.opkg_args += self.d.getVar("OPKG_ARGS", True) + + def list(self, format=None): + opkg_query_cmd = bb.utils.which(os.getenv('PATH'), "opkg-query-helper.py") + + if format == "arch": + cmd = "%s %s status | %s -a" % \ + (self.opkg_cmd, self.opkg_args, opkg_query_cmd) + elif format == "file": + cmd = "%s %s status | %s -f" % \ + (self.opkg_cmd, self.opkg_args, opkg_query_cmd) + elif format == "ver": + cmd = "%s %s status | %s -v" % \ + (self.opkg_cmd, self.opkg_args, opkg_query_cmd) + elif format == "deps": + cmd = "%s %s status | %s" % \ + (self.opkg_cmd, self.opkg_args, opkg_query_cmd) + else: + cmd = "%s %s list_installed | cut -d' ' -f1" % \ + (self.opkg_cmd, self.opkg_args) + + try: + output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).strip() + except subprocess.CalledProcessError as e: + bb.fatal("Cannot get the installed packages list. Command '%s' " + "returned %d:\n%s" % (cmd, e.returncode, e.output)) + + if output and format == "file": + tmp_output = "" + for line in output.split('\n'): + pkg, pkg_file, pkg_arch = line.split() + full_path = os.path.join(self.rootfs_dir, pkg_arch, pkg_file) + if os.path.exists(full_path): + tmp_output += "%s %s %s\n" % (pkg, full_path, pkg_arch) + else: + tmp_output += "%s %s %s\n" % (pkg, pkg_file, pkg_arch) + + output = tmp_output + + return output + + +class DpkgPkgsList(PkgsList): + def list(self, format=None): + cmd = [bb.utils.which(os.getenv('PATH'), "dpkg-query"), + "--admindir=%s/var/lib/dpkg" % self.rootfs_dir, + "-W"] + + if format == "arch": + cmd.append("-f=${Package} ${PackageArch}\n") + elif format == "file": + cmd.append("-f=${Package} ${Package}_${Version}_${Architecture}.deb ${PackageArch}\n") + elif format == "ver": + cmd.append("-f=${Package} ${PackageArch} ${Version}\n") + elif format == "deps": + cmd.append("-f=Package: ${Package}\nDepends: ${Depends}\nRecommends: ${Recommends}\n\n") + else: + cmd.append("-f=${Package}\n") + + try: + output = subprocess.check_output(cmd, stderr=subprocess.STDOUT).strip() + except subprocess.CalledProcessError as e: + bb.fatal("Cannot get the installed packages list. Command '%s' " + "returned %d:\n%s" % (' '.join(cmd), e.returncode, e.output)) + + if format == "file": + tmp_output = "" + for line in tuple(output.split('\n')): + pkg, pkg_file, pkg_arch = line.split() + full_path = os.path.join(self.rootfs_dir, pkg_arch, pkg_file) + if os.path.exists(full_path): + tmp_output += "%s %s %s\n" % (pkg, full_path, pkg_arch) + else: + tmp_output += "%s %s %s\n" % (pkg, pkg_file, pkg_arch) + + output = tmp_output + elif format == "deps": + opkg_query_cmd = bb.utils.which(os.getenv('PATH'), "opkg-query-helper.py") + file_out = tempfile.NamedTemporaryFile() + file_out.write(output) + file_out.flush() + + try: + output = subprocess.check_output("cat %s | %s" % + (file_out.name, opkg_query_cmd), + stderr=subprocess.STDOUT, + shell=True) + except subprocess.CalledProcessError as e: + file_out.close() + bb.fatal("Cannot compute packages dependencies. Command '%s' " + "returned %d:\n%s" % (e.cmd, e.returncode, e.output)) + + file_out.close() + + return output + + +class PackageManager(object): + """ + This is an abstract class. Do not instantiate this directly. + """ + __metaclass__ = ABCMeta + + def __init__(self, d): + self.d = d + self.deploy_dir = None + self.deploy_lock = None + self.feed_uris = self.d.getVar('PACKAGE_FEED_URIS', True) or "" + + """ + Update the package manager package database. + """ + @abstractmethod + def update(self): + pass + + """ + Install a list of packages. 'pkgs' is a list object. If 'attempt_only' is + True, installation failures are ignored. + """ + @abstractmethod + def install(self, pkgs, attempt_only=False): + pass + + """ + Remove a list of packages. 'pkgs' is a list object. If 'with_dependencies' + is False, the any dependencies are left in place. + """ + @abstractmethod + def remove(self, pkgs, with_dependencies=True): + pass + + """ + This function creates the index files + """ + @abstractmethod + def write_index(self): + pass + + @abstractmethod + def remove_packaging_data(self): + pass + + @abstractmethod + def list_installed(self, format=None): + pass + + @abstractmethod + def insert_feeds_uris(self): + pass + + """ + Install complementary packages based upon the list of currently installed + packages e.g. locales, *-dev, *-dbg, etc. This will only attempt to install + these packages, if they don't exist then no error will occur. Note: every + backend needs to call this function explicitly after the normal package + installation + """ + def install_complementary(self, globs=None): + # we need to write the list of installed packages to a file because the + # oe-pkgdata-util reads it from a file + installed_pkgs_file = os.path.join(self.d.getVar('WORKDIR', True), + "installed_pkgs.txt") + with open(installed_pkgs_file, "w+") as installed_pkgs: + installed_pkgs.write(self.list_installed("arch")) + + if globs is None: + globs = self.d.getVar('IMAGE_INSTALL_COMPLEMENTARY', True) + split_linguas = set() + + for translation in self.d.getVar('IMAGE_LINGUAS', True).split(): + split_linguas.add(translation) + split_linguas.add(translation.split('-')[0]) + + split_linguas = sorted(split_linguas) + + for lang in split_linguas: + globs += " *-locale-%s" % lang + + if globs is None: + return + + cmd = [bb.utils.which(os.getenv('PATH'), "oe-pkgdata-util"), + "glob", self.d.getVar('PKGDATA_DIR', True), installed_pkgs_file, + globs] + exclude = self.d.getVar('PACKAGE_EXCLUDE_COMPLEMENTARY', True) + if exclude: + cmd.extend(['-x', exclude]) + try: + bb.note("Installing complementary packages ...") + complementary_pkgs = subprocess.check_output(cmd, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + bb.fatal("Could not compute complementary packages list. Command " + "'%s' returned %d:\n%s" % + (' '.join(cmd), e.returncode, e.output)) + + self.install(complementary_pkgs.split(), attempt_only=True) + + def deploy_dir_lock(self): + if self.deploy_dir is None: + raise RuntimeError("deploy_dir is not set!") + + lock_file_name = os.path.join(self.deploy_dir, "deploy.lock") + + self.deploy_lock = bb.utils.lockfile(lock_file_name) + + def deploy_dir_unlock(self): + if self.deploy_lock is None: + return + + bb.utils.unlockfile(self.deploy_lock) + + self.deploy_lock = None + + +class RpmPM(PackageManager): + def __init__(self, + d, + target_rootfs, + target_vendor, + task_name='target', + providename=None, + arch_var=None, + os_var=None): + super(RpmPM, self).__init__(d) + self.target_rootfs = target_rootfs + self.target_vendor = target_vendor + self.task_name = task_name + self.providename = providename + self.fullpkglist = list() + self.deploy_dir = self.d.getVar('DEPLOY_DIR_RPM', True) + self.etcrpm_dir = os.path.join(self.target_rootfs, "etc/rpm") + self.install_dir = os.path.join(self.target_rootfs, "install") + self.rpm_cmd = bb.utils.which(os.getenv('PATH'), "rpm") + self.smart_cmd = bb.utils.which(os.getenv('PATH'), "smart") + self.smart_opt = "--quiet --data-dir=" + os.path.join(target_rootfs, + 'var/lib/smart') + self.scriptlet_wrapper = self.d.expand('${WORKDIR}/scriptlet_wrapper') + self.solution_manifest = self.d.expand('${T}/saved/%s_solution' % + self.task_name) + self.saved_rpmlib = self.d.expand('${T}/saved/%s' % self.task_name) + self.image_rpmlib = os.path.join(self.target_rootfs, 'var/lib/rpm') + + if not os.path.exists(self.d.expand('${T}/saved')): + bb.utils.mkdirhier(self.d.expand('${T}/saved')) + + self.indexer = RpmIndexer(self.d, self.deploy_dir) + self.pkgs_list = RpmPkgsList(self.d, self.target_rootfs, arch_var, os_var) + self.rpm_version = self.pkgs_list.rpm_version + + self.ml_prefix_list, self.ml_os_list = self.indexer.get_ml_prefix_and_os_list(arch_var, os_var) + + def insert_feeds_uris(self): + if self.feed_uris == "": + return + + # List must be prefered to least preferred order + default_platform_extra = set() + platform_extra = set() + bbextendvariant = self.d.getVar('BBEXTENDVARIANT', True) or "" + for mlib in self.ml_os_list: + for arch in self.ml_prefix_list[mlib]: + plt = arch.replace('-', '_') + '-.*-' + self.ml_os_list[mlib] + if mlib == bbextendvariant: + default_platform_extra.add(plt) + else: + platform_extra.add(plt) + + platform_extra = platform_extra.union(default_platform_extra) + + arch_list = [] + for canonical_arch in platform_extra: + arch = canonical_arch.split('-')[0] + if not os.path.exists(os.path.join(self.deploy_dir, arch)): + continue + arch_list.append(arch) + + uri_iterator = 0 + channel_priority = 10 + 5 * len(self.feed_uris.split()) * len(arch_list) + + for uri in self.feed_uris.split(): + for arch in arch_list: + bb.note('Note: adding Smart channel url%d%s (%s)' % + (uri_iterator, arch, channel_priority)) + self._invoke_smart('channel --add url%d-%s type=rpm-md baseurl=%s/rpm/%s -y' + % (uri_iterator, arch, uri, arch)) + self._invoke_smart('channel --set url%d-%s priority=%d' % + (uri_iterator, arch, channel_priority)) + channel_priority -= 5 + uri_iterator += 1 + + ''' + Create configs for rpm and smart, and multilib is supported + ''' + def create_configs(self): + target_arch = self.d.getVar('TARGET_ARCH', True) + platform = '%s%s-%s' % (target_arch.replace('-', '_'), + self.target_vendor, + self.ml_os_list['default']) + + # List must be prefered to least preferred order + default_platform_extra = list() + platform_extra = list() + bbextendvariant = self.d.getVar('BBEXTENDVARIANT', True) or "" + for mlib in self.ml_os_list: + for arch in self.ml_prefix_list[mlib]: + plt = arch.replace('-', '_') + '-.*-' + self.ml_os_list[mlib] + if mlib == bbextendvariant: + if plt not in default_platform_extra: + default_platform_extra.append(plt) + else: + if plt not in platform_extra: + platform_extra.append(plt) + platform_extra = default_platform_extra + platform_extra + + self._create_configs(platform, platform_extra) + + def _invoke_smart(self, args): + cmd = "%s %s %s" % (self.smart_cmd, self.smart_opt, args) + # bb.note(cmd) + try: + complementary_pkgs = subprocess.check_output(cmd, + stderr=subprocess.STDOUT, + shell=True) + # bb.note(complementary_pkgs) + return complementary_pkgs + except subprocess.CalledProcessError as e: + bb.fatal("Could not invoke smart. Command " + "'%s' returned %d:\n%s" % (cmd, e.returncode, e.output)) + + def _search_pkg_name_in_feeds(self, pkg, feed_archs): + for arch in feed_archs: + arch = arch.replace('-', '_') + for p in self.fullpkglist: + regex_match = r"^%s-[^-]*-[^-]*@%s$" % \ + (re.escape(pkg), re.escape(arch)) + if re.match(regex_match, p) is not None: + # First found is best match + # bb.note('%s -> %s' % (pkg, pkg + '@' + arch)) + return pkg + '@' + arch + + return "" + + ''' + Translate the OE multilib format names to the RPM/Smart format names + It searched the RPM/Smart format names in probable multilib feeds first, + and then searched the default base feed. + ''' + def _pkg_translate_oe_to_smart(self, pkgs, attempt_only=False): + new_pkgs = list() + + for pkg in pkgs: + new_pkg = pkg + # Search new_pkg in probable multilibs first + for mlib in self.ml_prefix_list: + # Jump the default archs + if mlib == 'default': + continue + + subst = pkg.replace(mlib + '-', '') + # if the pkg in this multilib feed + if subst != pkg: + feed_archs = self.ml_prefix_list[mlib] + new_pkg = self._search_pkg_name_in_feeds(subst, feed_archs) + if not new_pkg: + # Failed to translate, package not found! + err_msg = '%s not found in the %s feeds (%s).\n' % \ + (pkg, mlib, " ".join(feed_archs)) + if not attempt_only: + err_msg += " ".join(self.fullpkglist) + bb.fatal(err_msg) + bb.warn(err_msg) + else: + new_pkgs.append(new_pkg) + + break + + # Apparently not a multilib package... + if pkg == new_pkg: + # Search new_pkg in default archs + default_archs = self.ml_prefix_list['default'] + new_pkg = self._search_pkg_name_in_feeds(pkg, default_archs) + if not new_pkg: + err_msg = '%s not found in the base feeds (%s).\n' % \ + (pkg, ' '.join(default_archs)) + if not attempt_only: + err_msg += " ".join(self.fullpkglist) + bb.fatal(err_msg) + bb.warn(err_msg) + else: + new_pkgs.append(new_pkg) + + return new_pkgs + + def _create_configs(self, platform, platform_extra): + # Setup base system configuration + bb.note("configuring RPM platform settings") + + # Configure internal RPM environment when using Smart + os.environ['RPM_ETCRPM'] = self.etcrpm_dir + bb.utils.mkdirhier(self.etcrpm_dir) + + # Setup temporary directory -- install... + if os.path.exists(self.install_dir): + bb.utils.remove(self.install_dir, True) + bb.utils.mkdirhier(os.path.join(self.install_dir, 'tmp')) + + channel_priority = 5 + platform_dir = os.path.join(self.etcrpm_dir, "platform") + sdkos = self.d.getVar("SDK_OS", True) + with open(platform_dir, "w+") as platform_fd: + platform_fd.write(platform + '\n') + for pt in platform_extra: + channel_priority += 5 + if sdkos: + tmp = re.sub("-%s$" % sdkos, "-%s\n" % sdkos, pt) + tmp = re.sub("-linux.*$", "-linux.*\n", tmp) + platform_fd.write(tmp) + + # Tell RPM that the "/" directory exist and is available + bb.note("configuring RPM system provides") + sysinfo_dir = os.path.join(self.etcrpm_dir, "sysinfo") + bb.utils.mkdirhier(sysinfo_dir) + with open(os.path.join(sysinfo_dir, "Dirnames"), "w+") as dirnames: + dirnames.write("/\n") + + if self.providename: + providename_dir = os.path.join(sysinfo_dir, "Providename") + if not os.path.exists(providename_dir): + providename_content = '\n'.join(self.providename) + providename_content += '\n' + open(providename_dir, "w+").write(providename_content) + + # Configure RPM... we enforce these settings! + bb.note("configuring RPM DB settings") + # After change the __db.* cache size, log file will not be + # generated automatically, that will raise some warnings, + # so touch a bare log for rpm write into it. + if self.rpm_version == 5: + rpmlib_log = os.path.join(self.image_rpmlib, 'log', 'log.0000000001') + if not os.path.exists(rpmlib_log): + bb.utils.mkdirhier(os.path.join(self.image_rpmlib, 'log')) + open(rpmlib_log, 'w+').close() + + DB_CONFIG_CONTENT = "# ================ Environment\n" \ + "set_data_dir .\n" \ + "set_create_dir .\n" \ + "set_lg_dir ./log\n" \ + "set_tmp_dir ./tmp\n" \ + "set_flags db_log_autoremove on\n" \ + "\n" \ + "# -- thread_count must be >= 8\n" \ + "set_thread_count 64\n" \ + "\n" \ + "# ================ Logging\n" \ + "\n" \ + "# ================ Memory Pool\n" \ + "set_cachesize 0 1048576 0\n" \ + "set_mp_mmapsize 268435456\n" \ + "\n" \ + "# ================ Locking\n" \ + "set_lk_max_locks 16384\n" \ + "set_lk_max_lockers 16384\n" \ + "set_lk_max_objects 16384\n" \ + "mutex_set_max 163840\n" \ + "\n" \ + "# ================ Replication\n" + + db_config_dir = os.path.join(self.image_rpmlib, 'DB_CONFIG') + if not os.path.exists(db_config_dir): + open(db_config_dir, 'w+').write(DB_CONFIG_CONTENT) + + # Create database so that smart doesn't complain (lazy init) + opt = "-qa" + if self.rpm_version == 4: + opt = "--initdb" + cmd = "%s --root %s --dbpath /var/lib/rpm %s > /dev/null" % ( + self.rpm_cmd, self.target_rootfs, opt) + try: + subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) + except subprocess.CalledProcessError as e: + bb.fatal("Create rpm database failed. Command '%s' " + "returned %d:\n%s" % (cmd, e.returncode, e.output)) + + # Configure smart + bb.note("configuring Smart settings") + bb.utils.remove(os.path.join(self.target_rootfs, 'var/lib/smart'), + True) + self._invoke_smart('config --set rpm-root=%s' % self.target_rootfs) + self._invoke_smart('config --set rpm-dbpath=/var/lib/rpm') + self._invoke_smart('config --set rpm-extra-macros._var=%s' % + self.d.getVar('localstatedir', True)) + cmd = 'config --set rpm-extra-macros._tmppath=/install/tmp' + + prefer_color = self.d.getVar('RPM_PREFER_ELF_ARCH', True) + if prefer_color: + if prefer_color not in ['0', '1', '2', '4']: + bb.fatal("Invalid RPM_PREFER_ELF_ARCH: %s, it should be one of:\n" + "\t1: ELF32 wins\n" + "\t2: ELF64 wins\n" + "\t4: ELF64 N32 wins (mips64 or mips64el only)" % + prefer_color) + if prefer_color == "4" and self.d.getVar("TUNE_ARCH", True) not in \ + ['mips64', 'mips64el']: + bb.fatal("RPM_PREFER_ELF_ARCH = \"4\" is for mips64 or mips64el " + "only.") + self._invoke_smart('config --set rpm-extra-macros._prefer_color=%s' + % prefer_color) + + self._invoke_smart(cmd) + + # Write common configuration for host and target usage + self._invoke_smart('config --set rpm-nolinktos=1') + self._invoke_smart('config --set rpm-noparentdirs=1') + check_signature = self.d.getVar('RPM_CHECK_SIGNATURES', True) + if check_signature and check_signature.strip() == "0": + self._invoke_smart('config --set rpm-check-signatures=false') + for i in self.d.getVar('BAD_RECOMMENDATIONS', True).split(): + self._invoke_smart('flag --set ignore-recommends %s' % i) + + # Do the following configurations here, to avoid them being + # saved for field upgrade + if self.d.getVar('NO_RECOMMENDATIONS', True).strip() == "1": + self._invoke_smart('config --set ignore-all-recommends=1') + pkg_exclude = self.d.getVar('PACKAGE_EXCLUDE', True) or "" + for i in pkg_exclude.split(): + self._invoke_smart('flag --set exclude-packages %s' % i) + + # Optional debugging + # self._invoke_smart('config --set rpm-log-level=debug') + # cmd = 'config --set rpm-log-file=/tmp/smart-debug-logfile' + # self._invoke_smart(cmd) + ch_already_added = [] + for canonical_arch in platform_extra: + arch = canonical_arch.split('-')[0] + arch_channel = os.path.join(self.deploy_dir, arch) + if os.path.exists(arch_channel) and not arch in ch_already_added: + bb.note('Note: adding Smart channel %s (%s)' % + (arch, channel_priority)) + self._invoke_smart('channel --add %s type=rpm-md baseurl=%s -y' + % (arch, arch_channel)) + self._invoke_smart('channel --set %s priority=%d' % + (arch, channel_priority)) + channel_priority -= 5 + + ch_already_added.append(arch) + + bb.note('adding Smart RPM DB channel') + self._invoke_smart('channel --add rpmsys type=rpm-sys -y') + + # Construct install scriptlet wrapper. + # Scripts need to be ordered when executed, this ensures numeric order. + # If we ever run into needing more the 899 scripts, we'll have to. + # change num to start with 1000. + # + if self.rpm_version == 4: + scriptletcmd = "$2 $3 $4\n" + else: + scriptletcmd = "$2 $1/$3 $4\n" + + SCRIPTLET_FORMAT = "#!/bin/bash\n" \ + "\n" \ + "export PATH=%s\n" \ + "export D=%s\n" \ + 'export OFFLINE_ROOT="$D"\n' \ + 'export IPKG_OFFLINE_ROOT="$D"\n' \ + 'export OPKG_OFFLINE_ROOT="$D"\n' \ + "export INTERCEPT_DIR=%s\n" \ + "export NATIVE_ROOT=%s\n" \ + "\n" \ + + scriptletcmd + \ + "if [ $? -ne 0 ]; then\n" \ + " if [ $4 -eq 1 ]; then\n" \ + " mkdir -p $1/etc/rpm-postinsts\n" \ + " num=100\n" \ + " while [ -e $1/etc/rpm-postinsts/${num}-* ]; do num=$((num + 1)); done\n" \ + " name=`head -1 $1/$3 | cut -d\' \' -f 2`\n" \ + ' echo "#!$2" > $1/etc/rpm-postinsts/${num}-${name}\n' \ + ' echo "# Arg: $4" >> $1/etc/rpm-postinsts/${num}-${name}\n' \ + " cat $1/$3 >> $1/etc/rpm-postinsts/${num}-${name}\n" \ + " chmod +x $1/etc/rpm-postinsts/${num}-${name}\n" \ + " else\n" \ + ' echo "Error: pre/post remove scriptlet failed"\n' \ + " fi\n" \ + "fi\n" + + intercept_dir = self.d.expand('${WORKDIR}/intercept_scripts') + native_root = self.d.getVar('STAGING_DIR_NATIVE', True) + scriptlet_content = SCRIPTLET_FORMAT % (os.environ['PATH'], + self.target_rootfs, + intercept_dir, + native_root) + open(self.scriptlet_wrapper, 'w+').write(scriptlet_content) + + bb.note("Note: configuring RPM cross-install scriptlet_wrapper") + os.chmod(self.scriptlet_wrapper, 0755) + cmd = 'config --set rpm-extra-macros._cross_scriptlet_wrapper=%s' % \ + self.scriptlet_wrapper + self._invoke_smart(cmd) + + # Debug to show smart config info + # bb.note(self._invoke_smart('config --show')) + + def update(self): + self._invoke_smart('update rpmsys') + + ''' + Install pkgs with smart, the pkg name is oe format + ''' + def install(self, pkgs, attempt_only=False): + + bb.note("Installing the following packages: %s" % ' '.join(pkgs)) + if attempt_only and len(pkgs) == 0: + return + pkgs = self._pkg_translate_oe_to_smart(pkgs, attempt_only) + + if not attempt_only: + bb.note('to be installed: %s' % ' '.join(pkgs)) + cmd = "%s %s install -y %s" % \ + (self.smart_cmd, self.smart_opt, ' '.join(pkgs)) + bb.note(cmd) + else: + bb.note('installing attempt only packages...') + bb.note('Attempting %s' % ' '.join(pkgs)) + cmd = "%s %s install --attempt -y %s" % \ + (self.smart_cmd, self.smart_opt, ' '.join(pkgs)) + try: + output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) + bb.note(output) + except subprocess.CalledProcessError as e: + bb.fatal("Unable to install packages. Command '%s' " + "returned %d:\n%s" % (cmd, e.returncode, e.output)) + + ''' + Remove pkgs with smart, the pkg name is smart/rpm format + ''' + def remove(self, pkgs, with_dependencies=True): + bb.note('to be removed: ' + ' '.join(pkgs)) + + if not with_dependencies: + cmd = "%s -e --nodeps " % self.rpm_cmd + cmd += "--root=%s " % self.target_rootfs + cmd += "--dbpath=/var/lib/rpm " + cmd += "--define='_cross_scriptlet_wrapper %s' " % \ + self.scriptlet_wrapper + cmd += "--define='_tmppath /install/tmp' %s" % ' '.join(pkgs) + else: + # for pkg in pkgs: + # bb.note('Debug: What required: %s' % pkg) + # bb.note(self._invoke_smart('query %s --show-requiredby' % pkg)) + + cmd = "%s %s remove -y %s" % (self.smart_cmd, + self.smart_opt, + ' '.join(pkgs)) + + try: + bb.note(cmd) + output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) + bb.note(output) + except subprocess.CalledProcessError as e: + bb.note("Unable to remove packages. Command '%s' " + "returned %d:\n%s" % (cmd, e.returncode, e.output)) + + def upgrade(self): + bb.note('smart upgrade') + self._invoke_smart('upgrade') + + def write_index(self): + result = self.indexer.write_index() + + if result is not None: + bb.fatal(result) + + def remove_packaging_data(self): + bb.utils.remove(self.image_rpmlib, True) + bb.utils.remove(os.path.join(self.target_rootfs, 'var/lib/smart'), + True) + bb.utils.remove(os.path.join(self.target_rootfs, 'var/lib/opkg'), True) + + # remove temp directory + bb.utils.remove(self.d.expand('${IMAGE_ROOTFS}/install'), True) + + def backup_packaging_data(self): + # Save the rpmlib for increment rpm image generation + if os.path.exists(self.saved_rpmlib): + bb.utils.remove(self.saved_rpmlib, True) + shutil.copytree(self.image_rpmlib, + self.saved_rpmlib, + symlinks=True) + + def recovery_packaging_data(self): + # Move the rpmlib back + if os.path.exists(self.saved_rpmlib): + if os.path.exists(self.image_rpmlib): + bb.utils.remove(self.image_rpmlib, True) + + bb.note('Recovery packaging data') + shutil.copytree(self.saved_rpmlib, + self.image_rpmlib, + symlinks=True) + + def list_installed(self, format=None): + return self.pkgs_list.list(format) + + ''' + If incremental install, we need to determine what we've got, + what we need to add, and what to remove... + The dump_install_solution will dump and save the new install + solution. + ''' + def dump_install_solution(self, pkgs): + bb.note('creating new install solution for incremental install') + if len(pkgs) == 0: + return + + pkgs = self._pkg_translate_oe_to_smart(pkgs, False) + install_pkgs = list() + + cmd = "%s %s install -y --dump %s 2>%s" % \ + (self.smart_cmd, + self.smart_opt, + ' '.join(pkgs), + self.solution_manifest) + try: + # Disable rpmsys channel for the fake install + self._invoke_smart('channel --disable rpmsys') + + subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) + with open(self.solution_manifest, 'r') as manifest: + for pkg in manifest.read().split('\n'): + if '@' in pkg: + install_pkgs.append(pkg) + except subprocess.CalledProcessError as e: + bb.note("Unable to dump install packages. Command '%s' " + "returned %d:\n%s" % (cmd, e.returncode, e.output)) + # Recovery rpmsys channel + self._invoke_smart('channel --enable rpmsys') + return install_pkgs + + ''' + If incremental install, we need to determine what we've got, + what we need to add, and what to remove... + The load_old_install_solution will load the previous install + solution + ''' + def load_old_install_solution(self): + bb.note('load old install solution for incremental install') + installed_pkgs = list() + if not os.path.exists(self.solution_manifest): + bb.note('old install solution not exist') + return installed_pkgs + + with open(self.solution_manifest, 'r') as manifest: + for pkg in manifest.read().split('\n'): + if '@' in pkg: + installed_pkgs.append(pkg.strip()) + + return installed_pkgs + + ''' + Dump all available packages in feeds, it should be invoked after the + newest rpm index was created + ''' + def dump_all_available_pkgs(self): + available_manifest = self.d.expand('${T}/saved/available_pkgs.txt') + available_pkgs = list() + cmd = "%s %s query --output %s" % \ + (self.smart_cmd, self.smart_opt, available_manifest) + try: + subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) + with open(available_manifest, 'r') as manifest: + for pkg in manifest.read().split('\n'): + if '@' in pkg: + available_pkgs.append(pkg.strip()) + except subprocess.CalledProcessError as e: + bb.note("Unable to list all available packages. Command '%s' " + "returned %d:\n%s" % (cmd, e.returncode, e.output)) + + self.fullpkglist = available_pkgs + + return + + def save_rpmpostinst(self, pkg): + mlibs = (self.d.getVar('MULTILIB_GLOBAL_VARIANTS') or "").split() + + new_pkg = pkg + # Remove any multilib prefix from the package name + for mlib in mlibs: + if mlib in pkg: + new_pkg = pkg.replace(mlib + '-', '') + break + + bb.note(' * postponing %s' % new_pkg) + saved_dir = self.target_rootfs + self.d.expand('${sysconfdir}/rpm-postinsts/') + new_pkg + + cmd = self.rpm_cmd + ' -q --scripts --root ' + self.target_rootfs + cmd += ' --dbpath=/var/lib/rpm ' + new_pkg + cmd += ' | sed -n -e "/^postinstall scriptlet (using .*):$/,/^.* scriptlet (using .*):$/ {/.*/p}"' + cmd += ' | sed -e "/postinstall scriptlet (using \(.*\)):$/d"' + cmd += ' -e "/^.* scriptlet (using .*):$/d" > %s' % saved_dir + + try: + bb.note(cmd) + output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).strip() + bb.note(output) + os.chmod(saved_dir, 0755) + except subprocess.CalledProcessError as e: + bb.fatal("Invoke save_rpmpostinst failed. Command '%s' " + "returned %d:\n%s" % (cmd, e.returncode, e.output)) + + '''Write common configuration for target usage''' + def rpm_setup_smart_target_config(self): + bb.utils.remove(os.path.join(self.target_rootfs, 'var/lib/smart'), + True) + + self._invoke_smart('config --set rpm-nolinktos=1') + self._invoke_smart('config --set rpm-noparentdirs=1') + for i in self.d.getVar('BAD_RECOMMENDATIONS', True).split(): + self._invoke_smart('flag --set ignore-recommends %s' % i) + self._invoke_smart('channel --add rpmsys type=rpm-sys -y') + + ''' + The rpm db lock files were produced after invoking rpm to query on + build system, and they caused the rpm on target didn't work, so we + need to unlock the rpm db by removing the lock files. + ''' + def unlock_rpm_db(self): + # Remove rpm db lock files + rpm_db_locks = glob.glob('%s/var/lib/rpm/__db.*' % self.target_rootfs) + for f in rpm_db_locks: + bb.utils.remove(f, True) + + +class OpkgPM(PackageManager): + def __init__(self, d, target_rootfs, config_file, archs, task_name='target'): + super(OpkgPM, self).__init__(d) + + self.target_rootfs = target_rootfs + self.config_file = config_file + self.pkg_archs = archs + self.task_name = task_name + + self.deploy_dir = self.d.getVar("DEPLOY_DIR_IPK", True) + self.deploy_lock_file = os.path.join(self.deploy_dir, "deploy.lock") + self.opkg_cmd = bb.utils.which(os.getenv('PATH'), "opkg-cl") + self.opkg_args = "-f %s -o %s " % (self.config_file, target_rootfs) + self.opkg_args += self.d.getVar("OPKG_ARGS", True) + + opkg_lib_dir = self.d.getVar('OPKGLIBDIR', True) + if opkg_lib_dir[0] == "/": + opkg_lib_dir = opkg_lib_dir[1:] + + self.opkg_dir = os.path.join(target_rootfs, opkg_lib_dir, "opkg") + + bb.utils.mkdirhier(self.opkg_dir) + + self.saved_opkg_dir = self.d.expand('${T}/saved/%s' % self.task_name) + if not os.path.exists(self.d.expand('${T}/saved')): + bb.utils.mkdirhier(self.d.expand('${T}/saved')) + + if (self.d.getVar('BUILD_IMAGES_FROM_FEEDS', True) or "") != "1": + self._create_config() + else: + self._create_custom_config() + + self.indexer = OpkgIndexer(self.d, self.deploy_dir) + + """ + This function will change a package's status in /var/lib/opkg/status file. + If 'packages' is None then the new_status will be applied to all + packages + """ + def mark_packages(self, status_tag, packages=None): + status_file = os.path.join(self.opkg_dir, "status") + + with open(status_file, "r") as sf: + with open(status_file + ".tmp", "w+") as tmp_sf: + if packages is None: + tmp_sf.write(re.sub(r"Package: (.*?)\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)", + r"Package: \1\n\2Status: \3%s" % status_tag, + sf.read())) + else: + if type(packages).__name__ != "list": + raise TypeError("'packages' should be a list object") + + status = sf.read() + for pkg in packages: + status = re.sub(r"Package: %s\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)" % pkg, + r"Package: %s\n\1Status: \2%s" % (pkg, status_tag), + status) + + tmp_sf.write(status) + + os.rename(status_file + ".tmp", status_file) + + def _create_custom_config(self): + bb.note("Building from feeds activated!") + + with open(self.config_file, "w+") as config_file: + priority = 1 + for arch in self.pkg_archs.split(): + config_file.write("arch %s %d\n" % (arch, priority)) + priority += 5 + + for line in (self.d.getVar('IPK_FEED_URIS', True) or "").split(): + feed_match = re.match("^[ \t]*(.*)##([^ \t]*)[ \t]*$", line) + + if feed_match is not None: + feed_name = feed_match.group(1) + feed_uri = feed_match.group(2) + + bb.note("Add %s feed with URL %s" % (feed_name, feed_uri)) + + config_file.write("src/gz %s %s\n" % (feed_name, feed_uri)) + + """ + Allow to use package deploy directory contents as quick devel-testing + feed. This creates individual feed configs for each arch subdir of those + specified as compatible for the current machine. + NOTE: Development-helper feature, NOT a full-fledged feed. + """ + if (self.d.getVar('FEED_DEPLOYDIR_BASE_URI', True) or "") != "": + for arch in self.pkg_archs.split(): + cfg_file_name = os.path.join(self.target_rootfs, + self.d.getVar("sysconfdir", True), + "opkg", + "local-%s-feed.conf" % arch) + + with open(cfg_file_name, "w+") as cfg_file: + cfg_file.write("src/gz local-%s %s/%s" % + (arch, + self.d.getVar('FEED_DEPLOYDIR_BASE_URI', True), + arch)) + + def _create_config(self): + with open(self.config_file, "w+") as config_file: + priority = 1 + for arch in self.pkg_archs.split(): + config_file.write("arch %s %d\n" % (arch, priority)) + priority += 5 + + config_file.write("src oe file:%s\n" % self.deploy_dir) + + for arch in self.pkg_archs.split(): + pkgs_dir = os.path.join(self.deploy_dir, arch) + if os.path.isdir(pkgs_dir): + config_file.write("src oe-%s file:%s\n" % + (arch, pkgs_dir)) + + def insert_feeds_uris(self): + if self.feed_uris == "": + return + + rootfs_config = os.path.join('%s/etc/opkg/base-feeds.conf' + % self.target_rootfs) + + with open(rootfs_config, "w+") as config_file: + uri_iterator = 0 + for uri in self.feed_uris.split(): + config_file.write("src/gz url-%d %s/ipk\n" % + (uri_iterator, uri)) + + for arch in self.pkg_archs.split(): + if not os.path.exists(os.path.join(self.deploy_dir, arch)): + continue + bb.note('Note: adding opkg channel url-%s-%d (%s)' % + (arch, uri_iterator, uri)) + + config_file.write("src/gz uri-%s-%d %s/ipk/%s\n" % + (arch, uri_iterator, uri, arch)) + uri_iterator += 1 + + def update(self): + self.deploy_dir_lock() + + cmd = "%s %s update" % (self.opkg_cmd, self.opkg_args) + + try: + subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + self.deploy_dir_unlock() + bb.fatal("Unable to update the package index files. Command '%s' " + "returned %d:\n%s" % (cmd, e.returncode, e.output)) + + self.deploy_dir_unlock() + + def install(self, pkgs, attempt_only=False): + if attempt_only and len(pkgs) == 0: + return + + cmd = "%s %s install %s" % (self.opkg_cmd, self.opkg_args, ' '.join(pkgs)) + + os.environ['D'] = self.target_rootfs + os.environ['OFFLINE_ROOT'] = self.target_rootfs + os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs + os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs + os.environ['INTERCEPT_DIR'] = os.path.join(self.d.getVar('WORKDIR', True), + "intercept_scripts") + os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE', True) + + try: + bb.note("Installing the following packages: %s" % ' '.join(pkgs)) + bb.note(cmd) + output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) + bb.note(output) + except subprocess.CalledProcessError as e: + (bb.fatal, bb.note)[attempt_only]("Unable to install packages. " + "Command '%s' returned %d:\n%s" % + (cmd, e.returncode, e.output)) + + def remove(self, pkgs, with_dependencies=True): + if with_dependencies: + cmd = "%s %s --force-depends --force-remove --force-removal-of-dependent-packages remove %s" % \ + (self.opkg_cmd, self.opkg_args, ' '.join(pkgs)) + else: + cmd = "%s %s --force-depends remove %s" % \ + (self.opkg_cmd, self.opkg_args, ' '.join(pkgs)) + + try: + bb.note(cmd) + output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) + bb.note(output) + except subprocess.CalledProcessError as e: + bb.fatal("Unable to remove packages. Command '%s' " + "returned %d:\n%s" % (e.cmd, e.returncode, e.output)) + + def write_index(self): + self.deploy_dir_lock() + + result = self.indexer.write_index() + + self.deploy_dir_unlock() + + if result is not None: + bb.fatal(result) + + def remove_packaging_data(self): + bb.utils.remove(self.opkg_dir, True) + # create the directory back, it's needed by PM lock + bb.utils.mkdirhier(self.opkg_dir) + + def list_installed(self, format=None): + return OpkgPkgsList(self.d, self.target_rootfs, self.config_file).list(format) + + def handle_bad_recommendations(self): + bad_recommendations = self.d.getVar("BAD_RECOMMENDATIONS", True) or "" + if bad_recommendations.strip() == "": + return + + status_file = os.path.join(self.opkg_dir, "status") + + # If status file existed, it means the bad recommendations has already + # been handled + if os.path.exists(status_file): + return + + cmd = "%s %s info " % (self.opkg_cmd, self.opkg_args) + + with open(status_file, "w+") as status: + for pkg in bad_recommendations.split(): + pkg_info = cmd + pkg + + try: + output = subprocess.check_output(pkg_info.split(), stderr=subprocess.STDOUT).strip() + except subprocess.CalledProcessError as e: + bb.fatal("Cannot get package info. Command '%s' " + "returned %d:\n%s" % (pkg_info, e.returncode, e.output)) + + if output == "": + bb.note("Ignored bad recommendation: '%s' is " + "not a package" % pkg) + continue + + for line in output.split('\n'): + if line.startswith("Status:"): + status.write("Status: deinstall hold not-installed\n") + else: + status.write(line + "\n") + + # Append a blank line after each package entry to ensure that it + # is separated from the following entry + status.write("\n") + + ''' + The following function dummy installs pkgs and returns the log of output. + ''' + def dummy_install(self, pkgs): + if len(pkgs) == 0: + return + + # Create an temp dir as opkg root for dummy installation + temp_rootfs = self.d.expand('${T}/opkg') + temp_opkg_dir = os.path.join(temp_rootfs, 'var/lib/opkg') + bb.utils.mkdirhier(temp_opkg_dir) + + opkg_args = "-f %s -o %s " % (self.config_file, temp_rootfs) + opkg_args += self.d.getVar("OPKG_ARGS", True) + + cmd = "%s %s update" % (self.opkg_cmd, opkg_args) + try: + subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) + except subprocess.CalledProcessError as e: + bb.fatal("Unable to update. Command '%s' " + "returned %d:\n%s" % (cmd, e.returncode, e.output)) + + # Dummy installation + cmd = "%s %s --noaction install %s " % (self.opkg_cmd, + opkg_args, + ' '.join(pkgs)) + try: + output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) + except subprocess.CalledProcessError as e: + bb.fatal("Unable to dummy install packages. Command '%s' " + "returned %d:\n%s" % (cmd, e.returncode, e.output)) + + bb.utils.remove(temp_rootfs, True) + + return output + + def backup_packaging_data(self): + # Save the opkglib for increment ipk image generation + if os.path.exists(self.saved_opkg_dir): + bb.utils.remove(self.saved_opkg_dir, True) + shutil.copytree(self.opkg_dir, + self.saved_opkg_dir, + symlinks=True) + + def recover_packaging_data(self): + # Move the opkglib back + if os.path.exists(self.saved_opkg_dir): + if os.path.exists(self.opkg_dir): + bb.utils.remove(self.opkg_dir, True) + + bb.note('Recover packaging data') + shutil.copytree(self.saved_opkg_dir, + self.opkg_dir, + symlinks=True) + + +class DpkgPM(PackageManager): + def __init__(self, d, target_rootfs, archs, base_archs, apt_conf_dir=None): + super(DpkgPM, self).__init__(d) + self.target_rootfs = target_rootfs + self.deploy_dir = self.d.getVar('DEPLOY_DIR_DEB', True) + if apt_conf_dir is None: + self.apt_conf_dir = self.d.expand("${APTCONF_TARGET}/apt") + else: + self.apt_conf_dir = apt_conf_dir + self.apt_conf_file = os.path.join(self.apt_conf_dir, "apt.conf") + self.apt_get_cmd = bb.utils.which(os.getenv('PATH'), "apt-get") + + self.apt_args = d.getVar("APT_ARGS", True) + + self.all_arch_list = archs.split() + all_mlb_pkg_arch_list = (self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS', True) or "").replace('-', '_').split() + self.all_arch_list.extend(arch for arch in all_mlb_pkg_arch_list if arch not in self.all_arch_list) + + self._create_configs(archs, base_archs) + + self.indexer = DpkgIndexer(self.d, self.deploy_dir) + + """ + This function will change a package's status in /var/lib/dpkg/status file. + If 'packages' is None then the new_status will be applied to all + packages + """ + def mark_packages(self, status_tag, packages=None): + status_file = self.target_rootfs + "/var/lib/dpkg/status" + + with open(status_file, "r") as sf: + with open(status_file + ".tmp", "w+") as tmp_sf: + if packages is None: + tmp_sf.write(re.sub(r"Package: (.*?)\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)", + r"Package: \1\n\2Status: \3%s" % status_tag, + sf.read())) + else: + if type(packages).__name__ != "list": + raise TypeError("'packages' should be a list object") + + status = sf.read() + for pkg in packages: + status = re.sub(r"Package: %s\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)" % pkg, + r"Package: %s\n\1Status: \2%s" % (pkg, status_tag), + status) + + tmp_sf.write(status) + + os.rename(status_file + ".tmp", status_file) + + """ + Run the pre/post installs for package "package_name". If package_name is + None, then run all pre/post install scriptlets. + """ + def run_pre_post_installs(self, package_name=None): + info_dir = self.target_rootfs + "/var/lib/dpkg/info" + suffixes = [(".preinst", "Preinstall"), (".postinst", "Postinstall")] + status_file = self.target_rootfs + "/var/lib/dpkg/status" + installed_pkgs = [] + + with open(status_file, "r") as status: + for line in status.read().split('\n'): + m = re.match("^Package: (.*)", line) + if m is not None: + installed_pkgs.append(m.group(1)) + + if package_name is not None and not package_name in installed_pkgs: + return + + os.environ['D'] = self.target_rootfs + os.environ['OFFLINE_ROOT'] = self.target_rootfs + os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs + os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs + os.environ['INTERCEPT_DIR'] = os.path.join(self.d.getVar('WORKDIR', True), + "intercept_scripts") + os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE', True) + + failed_pkgs = [] + for pkg_name in installed_pkgs: + for suffix in suffixes: + p_full = os.path.join(info_dir, pkg_name + suffix[0]) + if os.path.exists(p_full): + try: + bb.note("Executing %s for package: %s ..." % + (suffix[1].lower(), pkg_name)) + subprocess.check_output(p_full, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + bb.note("%s for package %s failed with %d:\n%s" % + (suffix[1], pkg_name, e.returncode, e.output)) + failed_pkgs.append(pkg_name) + break + + if len(failed_pkgs): + self.mark_packages("unpacked", failed_pkgs) + + def update(self): + os.environ['APT_CONFIG'] = self.apt_conf_file + + self.deploy_dir_lock() + + cmd = "%s update" % self.apt_get_cmd + + try: + subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + bb.fatal("Unable to update the package index files. Command '%s' " + "returned %d:\n%s" % (e.cmd, e.returncode, e.output)) + + self.deploy_dir_unlock() + + def install(self, pkgs, attempt_only=False): + if attempt_only and len(pkgs) == 0: + return + + os.environ['APT_CONFIG'] = self.apt_conf_file + + cmd = "%s %s install --force-yes --allow-unauthenticated %s" % \ + (self.apt_get_cmd, self.apt_args, ' '.join(pkgs)) + + try: + bb.note("Installing the following packages: %s" % ' '.join(pkgs)) + subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + (bb.fatal, bb.note)[attempt_only]("Unable to install packages. " + "Command '%s' returned %d:\n%s" % + (cmd, e.returncode, e.output)) + + # rename *.dpkg-new files/dirs + for root, dirs, files in os.walk(self.target_rootfs): + for dir in dirs: + new_dir = re.sub("\.dpkg-new", "", dir) + if dir != new_dir: + os.rename(os.path.join(root, dir), + os.path.join(root, new_dir)) + + for file in files: + new_file = re.sub("\.dpkg-new", "", file) + if file != new_file: + os.rename(os.path.join(root, file), + os.path.join(root, new_file)) + + + def remove(self, pkgs, with_dependencies=True): + if with_dependencies: + os.environ['APT_CONFIG'] = self.apt_conf_file + cmd = "%s remove %s" % (self.apt_get_cmd, ' '.join(pkgs)) + else: + cmd = "%s --admindir=%s/var/lib/dpkg --instdir=%s" \ + " -r --force-depends %s" % \ + (bb.utils.which(os.getenv('PATH'), "dpkg"), + self.target_rootfs, self.target_rootfs, ' '.join(pkgs)) + + try: + subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + bb.fatal("Unable to remove packages. Command '%s' " + "returned %d:\n%s" % (e.cmd, e.returncode, e.output)) + + def write_index(self): + self.deploy_dir_lock() + + result = self.indexer.write_index() + + self.deploy_dir_unlock() + + if result is not None: + bb.fatal(result) + + def insert_feeds_uris(self): + if self.feed_uris == "": + return + + sources_conf = os.path.join("%s/etc/apt/sources.list" + % self.target_rootfs) + arch_list = [] + + for arch in self.all_arch_list: + if not os.path.exists(os.path.join(self.deploy_dir, arch)): + continue + arch_list.append(arch) + + with open(sources_conf, "w+") as sources_file: + for uri in self.feed_uris.split(): + for arch in arch_list: + bb.note('Note: adding dpkg channel at (%s)' % uri) + sources_file.write("deb %s/deb/%s ./\n" % + (uri, arch)) + + def _create_configs(self, archs, base_archs): + base_archs = re.sub("_", "-", base_archs) + + if os.path.exists(self.apt_conf_dir): + bb.utils.remove(self.apt_conf_dir, True) + + bb.utils.mkdirhier(self.apt_conf_dir) + bb.utils.mkdirhier(self.apt_conf_dir + "/lists/partial/") + bb.utils.mkdirhier(self.apt_conf_dir + "/apt.conf.d/") + + arch_list = [] + for arch in self.all_arch_list: + if not os.path.exists(os.path.join(self.deploy_dir, arch)): + continue + arch_list.append(arch) + + with open(os.path.join(self.apt_conf_dir, "preferences"), "w+") as prefs_file: + priority = 801 + for arch in arch_list: + prefs_file.write( + "Package: *\n" + "Pin: release l=%s\n" + "Pin-Priority: %d\n\n" % (arch, priority)) + + priority += 5 + + pkg_exclude = self.d.getVar('PACKAGE_EXCLUDE', True) or "" + for pkg in pkg_exclude.split(): + prefs_file.write( + "Package: %s\n" + "Pin: release *\n" + "Pin-Priority: -1\n\n" % pkg) + + arch_list.reverse() + + with open(os.path.join(self.apt_conf_dir, "sources.list"), "w+") as sources_file: + for arch in arch_list: + sources_file.write("deb file:%s/ ./\n" % + os.path.join(self.deploy_dir, arch)) + + base_arch_list = base_archs.split() + multilib_variants = self.d.getVar("MULTILIB_VARIANTS", True); + for variant in multilib_variants.split(): + if variant == "lib32": + base_arch_list.append("i386") + elif variant == "lib64": + base_arch_list.append("amd64") + + with open(self.apt_conf_file, "w+") as apt_conf: + with open(self.d.expand("${STAGING_ETCDIR_NATIVE}/apt/apt.conf.sample")) as apt_conf_sample: + for line in apt_conf_sample.read().split("\n"): + match_arch = re.match(" Architecture \".*\";$", line) + architectures = "" + if match_arch: + for base_arch in base_arch_list: + architectures += "\"%s\";" % base_arch + apt_conf.write(" Architectures {%s};\n" % architectures); + apt_conf.write(" Architecture \"%s\";\n" % base_archs) + else: + line = re.sub("#ROOTFS#", self.target_rootfs, line) + line = re.sub("#APTCONF#", self.apt_conf_dir, line) + apt_conf.write(line + "\n") + + target_dpkg_dir = "%s/var/lib/dpkg" % self.target_rootfs + bb.utils.mkdirhier(os.path.join(target_dpkg_dir, "info")) + + bb.utils.mkdirhier(os.path.join(target_dpkg_dir, "updates")) + + if not os.path.exists(os.path.join(target_dpkg_dir, "status")): + open(os.path.join(target_dpkg_dir, "status"), "w+").close() + if not os.path.exists(os.path.join(target_dpkg_dir, "available")): + open(os.path.join(target_dpkg_dir, "available"), "w+").close() + + def remove_packaging_data(self): + bb.utils.remove(os.path.join(self.target_rootfs, + self.d.getVar('opkglibdir', True)), True) + bb.utils.remove(self.target_rootfs + "/var/lib/dpkg/", True) + + def fix_broken_dependencies(self): + os.environ['APT_CONFIG'] = self.apt_conf_file + + cmd = "%s %s -f install" % (self.apt_get_cmd, self.apt_args) + + try: + subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + bb.fatal("Cannot fix broken dependencies. Command '%s' " + "returned %d:\n%s" % (cmd, e.returncode, e.output)) + + def list_installed(self, format=None): + return DpkgPkgsList(self.d, self.target_rootfs).list() + + +def generate_index_files(d): + classes = d.getVar('PACKAGE_CLASSES', True).replace("package_", "").split() + + indexer_map = { + "rpm": (RpmIndexer, d.getVar('DEPLOY_DIR_RPM', True)), + "ipk": (OpkgIndexer, d.getVar('DEPLOY_DIR_IPK', True)), + "deb": (DpkgIndexer, d.getVar('DEPLOY_DIR_DEB', True)) + } + + result = None + + for pkg_class in classes: + if not pkg_class in indexer_map: + continue + + if os.path.exists(indexer_map[pkg_class][1]): + result = indexer_map[pkg_class][0](d, indexer_map[pkg_class][1]).write_index() + + if result is not None: + bb.fatal(result) + +if __name__ == "__main__": + """ + We should be able to run this as a standalone script, from outside bitbake + environment. + """ + """ + TBD + """ diff --git a/meta/lib/oe/packagedata.py b/meta/lib/oe/packagedata.py new file mode 100644 index 0000000000..cd5f0445f5 --- /dev/null +++ b/meta/lib/oe/packagedata.py @@ -0,0 +1,94 @@ +import codecs + +def packaged(pkg, d): + return os.access(get_subpkgedata_fn(pkg, d) + '.packaged', os.R_OK) + +def read_pkgdatafile(fn): + pkgdata = {} + + def decode(str): + c = codecs.getdecoder("string_escape") + return c(str)[0] + + if os.access(fn, os.R_OK): + import re + f = open(fn, 'r') + lines = f.readlines() + f.close() + r = re.compile("([^:]+):\s*(.*)") + for l in lines: + m = r.match(l) + if m: + pkgdata[m.group(1)] = decode(m.group(2)) + + return pkgdata + +def get_subpkgedata_fn(pkg, d): + return d.expand('${PKGDATA_DIR}/runtime/%s' % pkg) + +def has_subpkgdata(pkg, d): + return os.access(get_subpkgedata_fn(pkg, d), os.R_OK) + +def read_subpkgdata(pkg, d): + return read_pkgdatafile(get_subpkgedata_fn(pkg, d)) + +def has_pkgdata(pn, d): + fn = d.expand('${PKGDATA_DIR}/%s' % pn) + return os.access(fn, os.R_OK) + +def read_pkgdata(pn, d): + fn = d.expand('${PKGDATA_DIR}/%s' % pn) + return read_pkgdatafile(fn) + +# +# Collapse FOO_pkg variables into FOO +# +def read_subpkgdata_dict(pkg, d): + ret = {} + subd = read_pkgdatafile(get_subpkgedata_fn(pkg, d)) + for var in subd: + newvar = var.replace("_" + pkg, "") + if newvar == var and var + "_" + pkg in subd: + continue + ret[newvar] = subd[var] + return ret + +def _pkgmap(d): + """Return a dictionary mapping package to recipe name.""" + + pkgdatadir = d.getVar("PKGDATA_DIR", True) + + pkgmap = {} + try: + files = os.listdir(pkgdatadir) + except OSError: + bb.warn("No files in %s?" % pkgdatadir) + files = [] + + for pn in filter(lambda f: not os.path.isdir(os.path.join(pkgdatadir, f)), files): + try: + pkgdata = read_pkgdatafile(os.path.join(pkgdatadir, pn)) + except OSError: + continue + + packages = pkgdata.get("PACKAGES") or "" + for pkg in packages.split(): + pkgmap[pkg] = pn + + return pkgmap + +def pkgmap(d): + """Return a dictionary mapping package to recipe name. + Cache the mapping in the metadata""" + + pkgmap_data = d.getVar("__pkgmap_data", False) + if pkgmap_data is None: + pkgmap_data = _pkgmap(d) + d.setVar("__pkgmap_data", pkgmap_data) + + return pkgmap_data + +def recipename(pkg, d): + """Return the recipe name for the given binary package name.""" + + return pkgmap(d).get(pkg) diff --git a/meta/lib/oe/packagegroup.py b/meta/lib/oe/packagegroup.py new file mode 100644 index 0000000000..12eb4212ff --- /dev/null +++ b/meta/lib/oe/packagegroup.py @@ -0,0 +1,36 @@ +import itertools + +def is_optional(feature, d): + packages = d.getVar("FEATURE_PACKAGES_%s" % feature, True) + if packages: + return bool(d.getVarFlag("FEATURE_PACKAGES_%s" % feature, "optional")) + else: + return bool(d.getVarFlag("PACKAGE_GROUP_%s" % feature, "optional")) + +def packages(features, d): + for feature in features: + packages = d.getVar("FEATURE_PACKAGES_%s" % feature, True) + if not packages: + packages = d.getVar("PACKAGE_GROUP_%s" % feature, True) + for pkg in (packages or "").split(): + yield pkg + +def required_packages(features, d): + req = filter(lambda feature: not is_optional(feature, d), features) + return packages(req, d) + +def optional_packages(features, d): + opt = filter(lambda feature: is_optional(feature, d), features) + return packages(opt, d) + +def active_packages(features, d): + return itertools.chain(required_packages(features, d), + optional_packages(features, d)) + +def active_recipes(features, d): + import oe.packagedata + + for pkg in active_packages(features, d): + recipe = oe.packagedata.recipename(pkg, d) + if recipe: + yield recipe diff --git a/meta/lib/oe/patch.py b/meta/lib/oe/patch.py new file mode 100644 index 0000000000..b085c9d6b5 --- /dev/null +++ b/meta/lib/oe/patch.py @@ -0,0 +1,447 @@ +import oe.path + +class NotFoundError(bb.BBHandledException): + def __init__(self, path): + self.path = path + + def __str__(self): + return "Error: %s not found." % self.path + +class CmdError(bb.BBHandledException): + def __init__(self, exitstatus, output): + self.status = exitstatus + self.output = output + + def __str__(self): + return "Command Error: exit status: %d Output:\n%s" % (self.status, self.output) + + +def runcmd(args, dir = None): + import pipes + + if dir: + olddir = os.path.abspath(os.curdir) + if not os.path.exists(dir): + raise NotFoundError(dir) + os.chdir(dir) + # print("cwd: %s -> %s" % (olddir, dir)) + + try: + args = [ pipes.quote(str(arg)) for arg in args ] + cmd = " ".join(args) + # print("cmd: %s" % cmd) + (exitstatus, output) = oe.utils.getstatusoutput(cmd) + if exitstatus != 0: + raise CmdError(exitstatus >> 8, output) + return output + + finally: + if dir: + os.chdir(olddir) + +class PatchError(Exception): + def __init__(self, msg): + self.msg = msg + + def __str__(self): + return "Patch Error: %s" % self.msg + +class PatchSet(object): + defaults = { + "strippath": 1 + } + + def __init__(self, dir, d): + self.dir = dir + self.d = d + self.patches = [] + self._current = None + + def current(self): + return self._current + + def Clean(self): + """ + Clean out the patch set. Generally includes unapplying all + patches and wiping out all associated metadata. + """ + raise NotImplementedError() + + def Import(self, patch, force): + if not patch.get("file"): + if not patch.get("remote"): + raise PatchError("Patch file must be specified in patch import.") + else: + patch["file"] = bb.fetch2.localpath(patch["remote"], self.d) + + for param in PatchSet.defaults: + if not patch.get(param): + patch[param] = PatchSet.defaults[param] + + if patch.get("remote"): + patch["file"] = bb.data.expand(bb.fetch2.localpath(patch["remote"], self.d), self.d) + + patch["filemd5"] = bb.utils.md5_file(patch["file"]) + + def Push(self, force): + raise NotImplementedError() + + def Pop(self, force): + raise NotImplementedError() + + def Refresh(self, remote = None, all = None): + raise NotImplementedError() + + +class PatchTree(PatchSet): + def __init__(self, dir, d): + PatchSet.__init__(self, dir, d) + self.patchdir = os.path.join(self.dir, 'patches') + self.seriespath = os.path.join(self.dir, 'patches', 'series') + bb.utils.mkdirhier(self.patchdir) + + def _appendPatchFile(self, patch, strippath): + with open(self.seriespath, 'a') as f: + f.write(os.path.basename(patch) + "," + strippath + "\n") + shellcmd = ["cat", patch, ">" , self.patchdir + "/" + os.path.basename(patch)] + runcmd(["sh", "-c", " ".join(shellcmd)], self.dir) + + def _removePatch(self, p): + patch = {} + patch['file'] = p.split(",")[0] + patch['strippath'] = p.split(",")[1] + self._applypatch(patch, False, True) + + def _removePatchFile(self, all = False): + if not os.path.exists(self.seriespath): + return + patches = open(self.seriespath, 'r+').readlines() + if all: + for p in reversed(patches): + self._removePatch(os.path.join(self.patchdir, p.strip())) + patches = [] + else: + self._removePatch(os.path.join(self.patchdir, patches[-1].strip())) + patches.pop() + with open(self.seriespath, 'w') as f: + for p in patches: + f.write(p) + + def Import(self, patch, force = None): + """""" + PatchSet.Import(self, patch, force) + + if self._current is not None: + i = self._current + 1 + else: + i = 0 + self.patches.insert(i, patch) + + def _applypatch(self, patch, force = False, reverse = False, run = True): + shellcmd = ["cat", patch['file'], "|", "patch", "-p", patch['strippath']] + if reverse: + shellcmd.append('-R') + + if not run: + return "sh" + "-c" + " ".join(shellcmd) + + if not force: + shellcmd.append('--dry-run') + + output = runcmd(["sh", "-c", " ".join(shellcmd)], self.dir) + + if force: + return + + shellcmd.pop(len(shellcmd) - 1) + output = runcmd(["sh", "-c", " ".join(shellcmd)], self.dir) + + if not reverse: + self._appendPatchFile(patch['file'], patch['strippath']) + + return output + + def Push(self, force = False, all = False, run = True): + bb.note("self._current is %s" % self._current) + bb.note("patches is %s" % self.patches) + if all: + for i in self.patches: + bb.note("applying patch %s" % i) + self._applypatch(i, force) + self._current = i + else: + if self._current is not None: + next = self._current + 1 + else: + next = 0 + + bb.note("applying patch %s" % self.patches[next]) + ret = self._applypatch(self.patches[next], force) + + self._current = next + return ret + + def Pop(self, force = None, all = None): + if all: + self._removePatchFile(True) + self._current = None + else: + self._removePatchFile(False) + + if self._current == 0: + self._current = None + + if self._current is not None: + self._current = self._current - 1 + + def Clean(self): + """""" + self.Pop(all=True) + +class GitApplyTree(PatchTree): + def __init__(self, dir, d): + PatchTree.__init__(self, dir, d) + + def _applypatch(self, patch, force = False, reverse = False, run = True): + def _applypatchhelper(shellcmd, patch, force = False, reverse = False, run = True): + if reverse: + shellcmd.append('-R') + + shellcmd.append(patch['file']) + + if not run: + return "sh" + "-c" + " ".join(shellcmd) + + return runcmd(["sh", "-c", " ".join(shellcmd)], self.dir) + + try: + shellcmd = ["git", "--work-tree=.", "am", "-3", "-p%s" % patch['strippath']] + return _applypatchhelper(shellcmd, patch, force, reverse, run) + except CmdError: + shellcmd = ["git", "--git-dir=.", "apply", "-p%s" % patch['strippath']] + return _applypatchhelper(shellcmd, patch, force, reverse, run) + + +class QuiltTree(PatchSet): + def _runcmd(self, args, run = True): + quiltrc = self.d.getVar('QUILTRCFILE', True) + if not run: + return ["quilt"] + ["--quiltrc"] + [quiltrc] + args + runcmd(["quilt"] + ["--quiltrc"] + [quiltrc] + args, self.dir) + + def _quiltpatchpath(self, file): + return os.path.join(self.dir, "patches", os.path.basename(file)) + + + def __init__(self, dir, d): + PatchSet.__init__(self, dir, d) + self.initialized = False + p = os.path.join(self.dir, 'patches') + if not os.path.exists(p): + os.makedirs(p) + + def Clean(self): + try: + self._runcmd(["pop", "-a", "-f"]) + oe.path.remove(os.path.join(self.dir, "patches","series")) + except Exception: + pass + self.initialized = True + + def InitFromDir(self): + # read series -> self.patches + seriespath = os.path.join(self.dir, 'patches', 'series') + if not os.path.exists(self.dir): + raise NotFoundError(self.dir) + if os.path.exists(seriespath): + series = file(seriespath, 'r') + for line in series.readlines(): + patch = {} + parts = line.strip().split() + patch["quiltfile"] = self._quiltpatchpath(parts[0]) + patch["quiltfilemd5"] = bb.utils.md5_file(patch["quiltfile"]) + if len(parts) > 1: + patch["strippath"] = parts[1][2:] + self.patches.append(patch) + series.close() + + # determine which patches are applied -> self._current + try: + output = runcmd(["quilt", "applied"], self.dir) + except CmdError: + import sys + if sys.exc_value.output.strip() == "No patches applied": + return + else: + raise + output = [val for val in output.split('\n') if not val.startswith('#')] + for patch in self.patches: + if os.path.basename(patch["quiltfile"]) == output[-1]: + self._current = self.patches.index(patch) + self.initialized = True + + def Import(self, patch, force = None): + if not self.initialized: + self.InitFromDir() + PatchSet.Import(self, patch, force) + oe.path.symlink(patch["file"], self._quiltpatchpath(patch["file"]), force=True) + f = open(os.path.join(self.dir, "patches","series"), "a"); + f.write(os.path.basename(patch["file"]) + " -p" + patch["strippath"]+"\n") + f.close() + patch["quiltfile"] = self._quiltpatchpath(patch["file"]) + patch["quiltfilemd5"] = bb.utils.md5_file(patch["quiltfile"]) + + # TODO: determine if the file being imported: + # 1) is already imported, and is the same + # 2) is already imported, but differs + + self.patches.insert(self._current or 0, patch) + + + def Push(self, force = False, all = False, run = True): + # quilt push [-f] + + args = ["push"] + if force: + args.append("-f") + if all: + args.append("-a") + if not run: + return self._runcmd(args, run) + + self._runcmd(args) + + if self._current is not None: + self._current = self._current + 1 + else: + self._current = 0 + + def Pop(self, force = None, all = None): + # quilt pop [-f] + args = ["pop"] + if force: + args.append("-f") + if all: + args.append("-a") + + self._runcmd(args) + + if self._current == 0: + self._current = None + + if self._current is not None: + self._current = self._current - 1 + + def Refresh(self, **kwargs): + if kwargs.get("remote"): + patch = self.patches[kwargs["patch"]] + if not patch: + raise PatchError("No patch found at index %s in patchset." % kwargs["patch"]) + (type, host, path, user, pswd, parm) = bb.fetch.decodeurl(patch["remote"]) + if type == "file": + import shutil + if not patch.get("file") and patch.get("remote"): + patch["file"] = bb.fetch2.localpath(patch["remote"], self.d) + + shutil.copyfile(patch["quiltfile"], patch["file"]) + else: + raise PatchError("Unable to do a remote refresh of %s, unsupported remote url scheme %s." % (os.path.basename(patch["quiltfile"]), type)) + else: + # quilt refresh + args = ["refresh"] + if kwargs.get("quiltfile"): + args.append(os.path.basename(kwargs["quiltfile"])) + elif kwargs.get("patch"): + args.append(os.path.basename(self.patches[kwargs["patch"]]["quiltfile"])) + self._runcmd(args) + +class Resolver(object): + def __init__(self, patchset, terminal): + raise NotImplementedError() + + def Resolve(self): + raise NotImplementedError() + + def Revert(self): + raise NotImplementedError() + + def Finalize(self): + raise NotImplementedError() + +class NOOPResolver(Resolver): + def __init__(self, patchset, terminal): + self.patchset = patchset + self.terminal = terminal + + def Resolve(self): + olddir = os.path.abspath(os.curdir) + os.chdir(self.patchset.dir) + try: + self.patchset.Push() + except Exception: + import sys + os.chdir(olddir) + raise + +# Patch resolver which relies on the user doing all the work involved in the +# resolution, with the exception of refreshing the remote copy of the patch +# files (the urls). +class UserResolver(Resolver): + def __init__(self, patchset, terminal): + self.patchset = patchset + self.terminal = terminal + + # Force a push in the patchset, then drop to a shell for the user to + # resolve any rejected hunks + def Resolve(self): + olddir = os.path.abspath(os.curdir) + os.chdir(self.patchset.dir) + try: + self.patchset.Push(False) + except CmdError as v: + # Patch application failed + patchcmd = self.patchset.Push(True, False, False) + + t = self.patchset.d.getVar('T', True) + if not t: + bb.msg.fatal("Build", "T not set") + bb.utils.mkdirhier(t) + import random + rcfile = "%s/bashrc.%s.%s" % (t, str(os.getpid()), random.random()) + f = open(rcfile, "w") + f.write("echo '*** Manual patch resolution mode ***'\n") + f.write("echo 'Dropping to a shell, so patch rejects can be fixed manually.'\n") + f.write("echo 'Run \"quilt refresh\" when patch is corrected, press CTRL+D to exit.'\n") + f.write("echo ''\n") + f.write(" ".join(patchcmd) + "\n") + f.close() + os.chmod(rcfile, 0775) + + self.terminal("bash --rcfile " + rcfile, 'Patch Rejects: Please fix patch rejects manually', self.patchset.d) + + # Construct a new PatchSet after the user's changes, compare the + # sets, checking patches for modifications, and doing a remote + # refresh on each. + oldpatchset = self.patchset + self.patchset = oldpatchset.__class__(self.patchset.dir, self.patchset.d) + + for patch in self.patchset.patches: + oldpatch = None + for opatch in oldpatchset.patches: + if opatch["quiltfile"] == patch["quiltfile"]: + oldpatch = opatch + + if oldpatch: + patch["remote"] = oldpatch["remote"] + if patch["quiltfile"] == oldpatch["quiltfile"]: + if patch["quiltfilemd5"] != oldpatch["quiltfilemd5"]: + bb.note("Patch %s has changed, updating remote url %s" % (os.path.basename(patch["quiltfile"]), patch["remote"])) + # user change? remote refresh + self.patchset.Refresh(remote=True, patch=self.patchset.patches.index(patch)) + else: + # User did not fix the problem. Abort. + raise PatchError("Patch application failed, and user did not fix and refresh the patch.") + except Exception: + os.chdir(olddir) + raise + os.chdir(olddir) diff --git a/meta/lib/oe/path.py b/meta/lib/oe/path.py new file mode 100644 index 0000000000..413ebfb395 --- /dev/null +++ b/meta/lib/oe/path.py @@ -0,0 +1,243 @@ +import errno +import glob +import shutil +import subprocess +import os.path + +def join(*paths): + """Like os.path.join but doesn't treat absolute RHS specially""" + return os.path.normpath("/".join(paths)) + +def relative(src, dest): + """ Return a relative path from src to dest. + + >>> relative("/usr/bin", "/tmp/foo/bar") + ../../tmp/foo/bar + + >>> relative("/usr/bin", "/usr/lib") + ../lib + + >>> relative("/tmp", "/tmp/foo/bar") + foo/bar + """ + + return os.path.relpath(dest, src) + +def make_relative_symlink(path): + """ Convert an absolute symlink to a relative one """ + if not os.path.islink(path): + return + link = os.readlink(path) + if not os.path.isabs(link): + return + + # find the common ancestor directory + ancestor = path + depth = 0 + while ancestor and not link.startswith(ancestor): + ancestor = ancestor.rpartition('/')[0] + depth += 1 + + if not ancestor: + print("make_relative_symlink() Error: unable to find the common ancestor of %s and its target" % path) + return + + base = link.partition(ancestor)[2].strip('/') + while depth > 1: + base = "../" + base + depth -= 1 + + os.remove(path) + os.symlink(base, path) + +def format_display(path, metadata): + """ Prepare a path for display to the user. """ + rel = relative(metadata.getVar("TOPDIR", True), path) + if len(rel) > len(path): + return path + else: + return rel + +def copytree(src, dst): + # We could use something like shutil.copytree here but it turns out to + # to be slow. It takes twice as long copying to an empty directory. + # If dst already has contents performance can be 15 time slower + # This way we also preserve hardlinks between files in the tree. + + bb.utils.mkdirhier(dst) + cmd = 'tar -cf - -C %s -p . | tar -xf - -C %s' % (src, dst) + check_output(cmd, shell=True, stderr=subprocess.STDOUT) + +def copyhardlinktree(src, dst): + """ Make the hard link when possible, otherwise copy. """ + bb.utils.mkdirhier(dst) + if os.path.isdir(src) and not len(os.listdir(src)): + return + + if (os.stat(src).st_dev == os.stat(dst).st_dev): + # Need to copy directories only with tar first since cp will error if two + # writers try and create a directory at the same time + cmd = 'cd %s; find . -type d -print | tar -cf - -C %s -p --files-from - --no-recursion | tar -xf - -C %s' % (src, src, dst) + check_output(cmd, shell=True, stderr=subprocess.STDOUT) + cmd = 'cd %s; find . -print0 | cpio --null -pdlu %s' % (src, dst) + check_output(cmd, shell=True, stderr=subprocess.STDOUT) + else: + copytree(src, dst) + +def remove(path, recurse=True): + """Equivalent to rm -f or rm -rf""" + for name in glob.glob(path): + try: + os.unlink(name) + except OSError as exc: + if recurse and exc.errno == errno.EISDIR: + shutil.rmtree(name) + elif exc.errno != errno.ENOENT: + raise + +def symlink(source, destination, force=False): + """Create a symbolic link""" + try: + if force: + remove(destination) + os.symlink(source, destination) + except OSError as e: + if e.errno != errno.EEXIST or os.readlink(destination) != source: + raise + +class CalledProcessError(Exception): + def __init__(self, retcode, cmd, output = None): + self.retcode = retcode + self.cmd = cmd + self.output = output + def __str__(self): + return "Command '%s' returned non-zero exit status %d with output %s" % (self.cmd, self.retcode, self.output) + +# Not needed when we move to python 2.7 +def check_output(*popenargs, **kwargs): + r"""Run command with arguments and return its output as a byte string. + + If the exit code was non-zero it raises a CalledProcessError. The + CalledProcessError object will have the return code in the returncode + attribute and output in the output attribute. + + The arguments are the same as for the Popen constructor. Example: + + >>> check_output(["ls", "-l", "/dev/null"]) + 'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n' + + The stdout argument is not allowed as it is used internally. + To capture standard error in the result, use stderr=STDOUT. + + >>> check_output(["/bin/sh", "-c", + ... "ls -l non_existent_file ; exit 0"], + ... stderr=STDOUT) + 'ls: non_existent_file: No such file or directory\n' + """ + if 'stdout' in kwargs: + raise ValueError('stdout argument not allowed, it will be overridden.') + process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs) + output, unused_err = process.communicate() + retcode = process.poll() + if retcode: + cmd = kwargs.get("args") + if cmd is None: + cmd = popenargs[0] + raise CalledProcessError(retcode, cmd, output=output) + return output + +def find(dir, **walkoptions): + """ Given a directory, recurses into that directory, + returning all files as absolute paths. """ + + for root, dirs, files in os.walk(dir, **walkoptions): + for file in files: + yield os.path.join(root, file) + + +## realpath() related functions +def __is_path_below(file, root): + return (file + os.path.sep).startswith(root) + +def __realpath_rel(start, rel_path, root, loop_cnt, assume_dir): + """Calculates real path of symlink 'start' + 'rel_path' below + 'root'; no part of 'start' below 'root' must contain symlinks. """ + have_dir = True + + for d in rel_path.split(os.path.sep): + if not have_dir and not assume_dir: + raise OSError(errno.ENOENT, "no such directory %s" % start) + + if d == os.path.pardir: # '..' + if len(start) >= len(root): + # do not follow '..' before root + start = os.path.dirname(start) + else: + # emit warning? + pass + else: + (start, have_dir) = __realpath(os.path.join(start, d), + root, loop_cnt, assume_dir) + + assert(__is_path_below(start, root)) + + return start + +def __realpath(file, root, loop_cnt, assume_dir): + while os.path.islink(file) and len(file) >= len(root): + if loop_cnt == 0: + raise OSError(errno.ELOOP, file) + + loop_cnt -= 1 + target = os.path.normpath(os.readlink(file)) + + if not os.path.isabs(target): + tdir = os.path.dirname(file) + assert(__is_path_below(tdir, root)) + else: + tdir = root + + file = __realpath_rel(tdir, target, root, loop_cnt, assume_dir) + + try: + is_dir = os.path.isdir(file) + except: + is_dir = false + + return (file, is_dir) + +def realpath(file, root, use_physdir = True, loop_cnt = 100, assume_dir = False): + """ Returns the canonical path of 'file' with assuming a + toplevel 'root' directory. When 'use_physdir' is set, all + preceding path components of 'file' will be resolved first; + this flag should be set unless it is guaranteed that there is + no symlink in the path. When 'assume_dir' is not set, missing + path components will raise an ENOENT error""" + + root = os.path.normpath(root) + file = os.path.normpath(file) + + if not root.endswith(os.path.sep): + # letting root end with '/' makes some things easier + root = root + os.path.sep + + if not __is_path_below(file, root): + raise OSError(errno.EINVAL, "file '%s' is not below root" % file) + + try: + if use_physdir: + file = __realpath_rel(root, file[(len(root) - 1):], root, loop_cnt, assume_dir) + else: + file = __realpath(file, root, loop_cnt, assume_dir)[0] + except OSError as e: + if e.errno == errno.ELOOP: + # make ELOOP more readable; without catching it, there will + # be printed a backtrace with 100s of OSError exceptions + # else + raise OSError(errno.ELOOP, + "too much recursions while resolving '%s'; loop in '%s'" % + (file, e.strerror)) + + raise + + return file diff --git a/meta/lib/oe/prservice.py b/meta/lib/oe/prservice.py new file mode 100644 index 0000000000..b0cbcb1fbc --- /dev/null +++ b/meta/lib/oe/prservice.py @@ -0,0 +1,126 @@ + +def prserv_make_conn(d, check = False): + import prserv.serv + host_params = filter(None, (d.getVar("PRSERV_HOST", True) or '').split(':')) + try: + conn = None + conn = prserv.serv.PRServerConnection(host_params[0], int(host_params[1])) + if check: + if not conn.ping(): + raise Exception('service not available') + d.setVar("__PRSERV_CONN",conn) + except Exception, exc: + bb.fatal("Connecting to PR service %s:%s failed: %s" % (host_params[0], host_params[1], str(exc))) + + return conn + +def prserv_dump_db(d): + if not d.getVar('PRSERV_HOST', True): + bb.error("Not using network based PR service") + return None + + conn = d.getVar("__PRSERV_CONN", True) + if conn is None: + conn = prserv_make_conn(d) + if conn is None: + bb.error("Making connection failed to remote PR service") + return None + + #dump db + opt_version = d.getVar('PRSERV_DUMPOPT_VERSION', True) + opt_pkgarch = d.getVar('PRSERV_DUMPOPT_PKGARCH', True) + opt_checksum = d.getVar('PRSERV_DUMPOPT_CHECKSUM', True) + opt_col = ("1" == d.getVar('PRSERV_DUMPOPT_COL', True)) + return conn.export(opt_version, opt_pkgarch, opt_checksum, opt_col) + +def prserv_import_db(d, filter_version=None, filter_pkgarch=None, filter_checksum=None): + if not d.getVar('PRSERV_HOST', True): + bb.error("Not using network based PR service") + return None + + conn = d.getVar("__PRSERV_CONN", True) + if conn is None: + conn = prserv_make_conn(d) + if conn is None: + bb.error("Making connection failed to remote PR service") + return None + #get the entry values + imported = [] + prefix = "PRAUTO$" + for v in d.keys(): + if v.startswith(prefix): + (remain, sep, checksum) = v.rpartition('$') + (remain, sep, pkgarch) = remain.rpartition('$') + (remain, sep, version) = remain.rpartition('$') + if (remain + '$' != prefix) or \ + (filter_version and filter_version != version) or \ + (filter_pkgarch and filter_pkgarch != pkgarch) or \ + (filter_checksum and filter_checksum != checksum): + continue + try: + value = int(d.getVar(remain + '$' + version + '$' + pkgarch + '$' + checksum, True)) + except BaseException as exc: + bb.debug("Not valid value of %s:%s" % (v,str(exc))) + continue + ret = conn.importone(version,pkgarch,checksum,value) + if ret != value: + bb.error("importing(%s,%s,%s,%d) failed. DB may have larger value %d" % (version,pkgarch,checksum,value,ret)) + else: + imported.append((version,pkgarch,checksum,value)) + return imported + +def prserv_export_tofile(d, metainfo, datainfo, lockdown, nomax=False): + import bb.utils + #initilize the output file + bb.utils.mkdirhier(d.getVar('PRSERV_DUMPDIR', True)) + df = d.getVar('PRSERV_DUMPFILE', True) + #write data + lf = bb.utils.lockfile("%s.lock" % df) + f = open(df, "a") + if metainfo: + #dump column info + f.write("#PR_core_ver = \"%s\"\n\n" % metainfo['core_ver']); + f.write("#Table: %s\n" % metainfo['tbl_name']) + f.write("#Columns:\n") + f.write("#name \t type \t notn \t dflt \t pk\n") + f.write("#----------\t --------\t --------\t --------\t ----\n") + for i in range(len(metainfo['col_info'])): + f.write("#%10s\t %8s\t %8s\t %8s\t %4s\n" % + (metainfo['col_info'][i]['name'], + metainfo['col_info'][i]['type'], + metainfo['col_info'][i]['notnull'], + metainfo['col_info'][i]['dflt_value'], + metainfo['col_info'][i]['pk'])) + f.write("\n") + + if lockdown: + f.write("PRSERV_LOCKDOWN = \"1\"\n\n") + + if datainfo: + idx = {} + for i in range(len(datainfo)): + pkgarch = datainfo[i]['pkgarch'] + value = datainfo[i]['value'] + if pkgarch not in idx: + idx[pkgarch] = i + elif value > datainfo[idx[pkgarch]]['value']: + idx[pkgarch] = i + f.write("PRAUTO$%s$%s$%s = \"%s\"\n" % + (str(datainfo[i]['version']), pkgarch, str(datainfo[i]['checksum']), str(value))) + if not nomax: + for i in idx: + f.write("PRAUTO_%s_%s = \"%s\"\n" % (str(datainfo[idx[i]]['version']),str(datainfo[idx[i]]['pkgarch']),str(datainfo[idx[i]]['value']))) + f.close() + bb.utils.unlockfile(lf) + +def prserv_check_avail(d): + host_params = filter(None, (d.getVar("PRSERV_HOST", True) or '').split(':')) + try: + if len(host_params) != 2: + raise TypeError + else: + int(host_params[1]) + except TypeError: + bb.fatal('Undefined/incorrect PRSERV_HOST value. Format: "host:port"') + else: + prserv_make_conn(d, True) diff --git a/meta/lib/oe/qa.py b/meta/lib/oe/qa.py new file mode 100644 index 0000000000..d5cdaa0fcd --- /dev/null +++ b/meta/lib/oe/qa.py @@ -0,0 +1,111 @@ +class ELFFile: + EI_NIDENT = 16 + + EI_CLASS = 4 + EI_DATA = 5 + EI_VERSION = 6 + EI_OSABI = 7 + EI_ABIVERSION = 8 + + # possible values for EI_CLASS + ELFCLASSNONE = 0 + ELFCLASS32 = 1 + ELFCLASS64 = 2 + + # possible value for EI_VERSION + EV_CURRENT = 1 + + # possible values for EI_DATA + ELFDATANONE = 0 + ELFDATA2LSB = 1 + ELFDATA2MSB = 2 + + def my_assert(self, expectation, result): + if not expectation == result: + #print "'%x','%x' %s" % (ord(expectation), ord(result), self.name) + raise Exception("This does not work as expected") + + def __init__(self, name, bits = 0): + self.name = name + self.bits = bits + self.objdump_output = {} + + def open(self): + self.file = file(self.name, "r") + self.data = self.file.read(ELFFile.EI_NIDENT+4) + + self.my_assert(len(self.data), ELFFile.EI_NIDENT+4) + self.my_assert(self.data[0], chr(0x7f) ) + self.my_assert(self.data[1], 'E') + self.my_assert(self.data[2], 'L') + self.my_assert(self.data[3], 'F') + if self.bits == 0: + if self.data[ELFFile.EI_CLASS] == chr(ELFFile.ELFCLASS32): + self.bits = 32 + elif self.data[ELFFile.EI_CLASS] == chr(ELFFile.ELFCLASS64): + self.bits = 64 + else: + # Not 32-bit or 64.. lets assert + raise Exception("ELF but not 32 or 64 bit.") + elif self.bits == 32: + self.my_assert(self.data[ELFFile.EI_CLASS], chr(ELFFile.ELFCLASS32)) + elif self.bits == 64: + self.my_assert(self.data[ELFFile.EI_CLASS], chr(ELFFile.ELFCLASS64)) + else: + raise Exception("Must specify unknown, 32 or 64 bit size.") + self.my_assert(self.data[ELFFile.EI_VERSION], chr(ELFFile.EV_CURRENT) ) + + self.sex = self.data[ELFFile.EI_DATA] + if self.sex == chr(ELFFile.ELFDATANONE): + raise Exception("self.sex == ELFDATANONE") + elif self.sex == chr(ELFFile.ELFDATA2LSB): + self.sex = "<" + elif self.sex == chr(ELFFile.ELFDATA2MSB): + self.sex = ">" + else: + raise Exception("Unknown self.sex") + + def osAbi(self): + return ord(self.data[ELFFile.EI_OSABI]) + + def abiVersion(self): + return ord(self.data[ELFFile.EI_ABIVERSION]) + + def abiSize(self): + return self.bits + + def isLittleEndian(self): + return self.sex == "<" + + def isBigEngian(self): + return self.sex == ">" + + def machine(self): + """ + We know the sex stored in self.sex and we + know the position + """ + import struct + (a,) = struct.unpack(self.sex+"H", self.data[18:20]) + return a + + def run_objdump(self, cmd, d): + import bb.process + import sys + + if cmd in self.objdump_output: + return self.objdump_output[cmd] + + objdump = d.getVar('OBJDUMP', True) + + env = os.environ.copy() + env["LC_ALL"] = "C" + env["PATH"] = d.getVar('PATH', True) + + try: + bb.note("%s %s %s" % (objdump, cmd, self.name)) + self.objdump_output[cmd] = bb.process.run([objdump, cmd, self.name], env=env, shell=False)[0] + return self.objdump_output[cmd] + except Exception as e: + bb.note("%s %s %s failed: %s" % (objdump, cmd, self.name, e)) + return "" diff --git a/meta/lib/oe/rootfs.py b/meta/lib/oe/rootfs.py new file mode 100644 index 0000000000..67ed9ef03d --- /dev/null +++ b/meta/lib/oe/rootfs.py @@ -0,0 +1,800 @@ +from abc import ABCMeta, abstractmethod +from oe.utils import execute_pre_post_process +from oe.package_manager import * +from oe.manifest import * +import oe.path +import filecmp +import shutil +import os +import subprocess +import re + + +class Rootfs(object): + """ + This is an abstract class. Do not instantiate this directly. + """ + __metaclass__ = ABCMeta + + def __init__(self, d): + self.d = d + self.pm = None + self.image_rootfs = self.d.getVar('IMAGE_ROOTFS', True) + self.deploy_dir_image = self.d.getVar('DEPLOY_DIR_IMAGE', True) + + self.install_order = Manifest.INSTALL_ORDER + + @abstractmethod + def _create(self): + pass + + @abstractmethod + def _get_delayed_postinsts(self): + pass + + @abstractmethod + def _save_postinsts(self): + pass + + @abstractmethod + def _log_check(self): + pass + + def _insert_feed_uris(self): + if bb.utils.contains("IMAGE_FEATURES", "package-management", + True, False, self.d): + self.pm.insert_feeds_uris() + + @abstractmethod + def _handle_intercept_failure(self, failed_script): + pass + + """ + The _cleanup() method should be used to clean-up stuff that we don't really + want to end up on target. For example, in the case of RPM, the DB locks. + The method is called, once, at the end of create() method. + """ + @abstractmethod + def _cleanup(self): + pass + + def _exec_shell_cmd(self, cmd): + fakerootcmd = self.d.getVar('FAKEROOT', True) + if fakerootcmd is not None: + exec_cmd = [fakerootcmd, cmd] + else: + exec_cmd = cmd + + try: + subprocess.check_output(exec_cmd, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + return("Command '%s' returned %d:\n%s" % (e.cmd, e.returncode, e.output)) + + return None + + def create(self): + bb.note("###### Generate rootfs #######") + pre_process_cmds = self.d.getVar("ROOTFS_PREPROCESS_COMMAND", True) + post_process_cmds = self.d.getVar("ROOTFS_POSTPROCESS_COMMAND", True) + + intercepts_dir = os.path.join(self.d.getVar('WORKDIR', True), + "intercept_scripts") + + bb.utils.remove(intercepts_dir, True) + + bb.utils.mkdirhier(self.image_rootfs) + + bb.utils.mkdirhier(self.deploy_dir_image) + + shutil.copytree(self.d.expand("${COREBASE}/scripts/postinst-intercepts"), + intercepts_dir) + + shutil.copy(self.d.expand("${COREBASE}/meta/files/deploydir_readme.txt"), + self.deploy_dir_image + + "/README_-_DO_NOT_DELETE_FILES_IN_THIS_DIRECTORY.txt") + + execute_pre_post_process(self.d, pre_process_cmds) + + # call the package manager dependent create method + self._create() + + sysconfdir = self.image_rootfs + self.d.getVar('sysconfdir', True) + bb.utils.mkdirhier(sysconfdir) + with open(sysconfdir + "/version", "w+") as ver: + ver.write(self.d.getVar('BUILDNAME', True) + "\n") + + self._run_intercepts() + + execute_pre_post_process(self.d, post_process_cmds) + + if bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs", + True, False, self.d): + delayed_postinsts = self._get_delayed_postinsts() + if delayed_postinsts is not None: + bb.fatal("The following packages could not be configured " + "offline and rootfs is read-only: %s" % + delayed_postinsts) + + if self.d.getVar('USE_DEVFS', True) != "1": + self._create_devfs() + + self._uninstall_uneeded() + + self._insert_feed_uris() + + self._run_ldconfig() + + self._generate_kernel_module_deps() + + self._cleanup() + + def _uninstall_uneeded(self): + # Remove unneeded init script symlinks + delayed_postinsts = self._get_delayed_postinsts() + if delayed_postinsts is None: + if os.path.exists(self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/init.d/run-postinsts")): + self._exec_shell_cmd(["update-rc.d", "-f", "-r", + self.d.getVar('IMAGE_ROOTFS', True), + "run-postinsts", "remove"]) + + # Remove unneeded package-management related components + if bb.utils.contains("IMAGE_FEATURES", "package-management", + True, False, self.d): + return + + if delayed_postinsts is None: + installed_pkgs_dir = self.d.expand('${WORKDIR}/installed_pkgs.txt') + pkgs_to_remove = list() + with open(installed_pkgs_dir, "r+") as installed_pkgs: + pkgs_installed = installed_pkgs.read().split('\n') + for pkg_installed in pkgs_installed[:]: + pkg = pkg_installed.split()[0] + if pkg in ["update-rc.d", + "base-passwd", + self.d.getVar("ROOTFS_BOOTSTRAP_INSTALL", True) + ]: + pkgs_to_remove.append(pkg) + pkgs_installed.remove(pkg_installed) + + if len(pkgs_to_remove) > 0: + self.pm.remove(pkgs_to_remove, False) + # Update installed_pkgs.txt + open(installed_pkgs_dir, "w+").write('\n'.join(pkgs_installed)) + + else: + self._save_postinsts() + + self.pm.remove_packaging_data() + + def _run_intercepts(self): + intercepts_dir = os.path.join(self.d.getVar('WORKDIR', True), + "intercept_scripts") + + bb.note("Running intercept scripts:") + os.environ['D'] = self.image_rootfs + for script in os.listdir(intercepts_dir): + script_full = os.path.join(intercepts_dir, script) + + if script == "postinst_intercept" or not os.access(script_full, os.X_OK): + continue + + bb.note("> Executing %s intercept ..." % script) + + try: + subprocess.check_output(script_full) + except subprocess.CalledProcessError as e: + bb.warn("The postinstall intercept hook '%s' failed (exit code: %d)! See log for details!" % + (script, e.returncode)) + + with open(script_full) as intercept: + registered_pkgs = None + for line in intercept.read().split("\n"): + m = re.match("^##PKGS:(.*)", line) + if m is not None: + registered_pkgs = m.group(1).strip() + break + + if registered_pkgs is not None: + bb.warn("The postinstalls for the following packages " + "will be postponed for first boot: %s" % + registered_pkgs) + + # call the backend dependent handler + self._handle_intercept_failure(registered_pkgs) + + def _run_ldconfig(self): + if self.d.getVar('LDCONFIGDEPEND', True): + bb.note("Executing: ldconfig -r" + self.image_rootfs + "-c new -v") + self._exec_shell_cmd(['ldconfig', '-r', self.image_rootfs, '-c', + 'new', '-v']) + + def _generate_kernel_module_deps(self): + kernel_abi_ver_file = os.path.join(self.d.getVar('STAGING_KERNEL_DIR', True), + 'kernel-abiversion') + if os.path.exists(kernel_abi_ver_file): + kernel_ver = open(kernel_abi_ver_file).read().strip(' \n') + modules_dir = os.path.join(self.image_rootfs, 'lib', 'modules', kernel_ver) + + bb.utils.mkdirhier(modules_dir) + + self._exec_shell_cmd(['depmodwrapper', '-a', '-b', self.image_rootfs, + kernel_ver]) + + """ + Create devfs: + * IMAGE_DEVICE_TABLE is the old name to an absolute path to a device table file + * IMAGE_DEVICE_TABLES is a new name for a file, or list of files, seached + for in the BBPATH + If neither are specified then the default name of files/device_table-minimal.txt + is searched for in the BBPATH (same as the old version.) + """ + def _create_devfs(self): + devtable_list = [] + devtable = self.d.getVar('IMAGE_DEVICE_TABLE', True) + if devtable is not None: + devtable_list.append(devtable) + else: + devtables = self.d.getVar('IMAGE_DEVICE_TABLES', True) + if devtables is None: + devtables = 'files/device_table-minimal.txt' + for devtable in devtables.split(): + devtable_list.append("%s" % bb.utils.which(self.d.getVar('BBPATH', True), devtable)) + + for devtable in devtable_list: + self._exec_shell_cmd(["makedevs", "-r", + self.image_rootfs, "-D", devtable]) + + +class RpmRootfs(Rootfs): + def __init__(self, d, manifest_dir): + super(RpmRootfs, self).__init__(d) + + self.manifest = RpmManifest(d, manifest_dir) + + self.pm = RpmPM(d, + d.getVar('IMAGE_ROOTFS', True), + self.d.getVar('TARGET_VENDOR', True) + ) + + self.inc_rpm_image_gen = self.d.getVar('INC_RPM_IMAGE_GEN', True) + if self.inc_rpm_image_gen != "1": + bb.utils.remove(self.image_rootfs, True) + else: + self.pm.recovery_packaging_data() + bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS', True), True) + + self.pm.create_configs() + + ''' + While rpm incremental image generation is enabled, it will remove the + unneeded pkgs by comparing the new install solution manifest and the + old installed manifest. + ''' + def _create_incremental(self, pkgs_initial_install): + if self.inc_rpm_image_gen == "1": + + pkgs_to_install = list() + for pkg_type in pkgs_initial_install: + pkgs_to_install += pkgs_initial_install[pkg_type] + + installed_manifest = self.pm.load_old_install_solution() + solution_manifest = self.pm.dump_install_solution(pkgs_to_install) + + pkg_to_remove = list() + for pkg in installed_manifest: + if pkg not in solution_manifest: + pkg_to_remove.append(pkg) + + self.pm.update() + + bb.note('incremental update -- upgrade packages in place ') + self.pm.upgrade() + if pkg_to_remove != []: + bb.note('incremental removed: %s' % ' '.join(pkg_to_remove)) + self.pm.remove(pkg_to_remove) + + def _create(self): + pkgs_to_install = self.manifest.parse_initial_manifest() + + # update PM index files + self.pm.write_index() + + self.pm.dump_all_available_pkgs() + + if self.inc_rpm_image_gen == "1": + self._create_incremental(pkgs_to_install) + + self.pm.update() + + pkgs = [] + pkgs_attempt = [] + for pkg_type in pkgs_to_install: + if pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY: + pkgs_attempt += pkgs_to_install[pkg_type] + else: + pkgs += pkgs_to_install[pkg_type] + + self.pm.install(pkgs) + + self.pm.install(pkgs_attempt, True) + + self.pm.install_complementary() + + self._log_check() + + if self.inc_rpm_image_gen == "1": + self.pm.backup_packaging_data() + + self.pm.rpm_setup_smart_target_config() + + @staticmethod + def _depends_list(): + return ['DEPLOY_DIR_RPM', 'INC_RPM_IMAGE_GEN', 'RPM_PREPROCESS_COMMANDS', + 'RPM_POSTPROCESS_COMMANDS', 'RPM_PREFER_ELF_ARCH'] + + def _get_delayed_postinsts(self): + postinst_dir = self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/rpm-postinsts") + if os.path.isdir(postinst_dir): + files = os.listdir(postinst_dir) + for f in files: + bb.note('Delayed package scriptlet: %s' % f) + return files + + return None + + def _save_postinsts(self): + # this is just a stub. For RPM, the failed postinstalls are + # already saved in /etc/rpm-postinsts + pass + + def _log_check_warn(self): + r = re.compile('(warn|Warn)') + log_path = self.d.expand("${T}/log.do_rootfs") + with open(log_path, 'r') as log: + for line in log: + if 'log_check' in line: + continue + + m = r.search(line) + if m: + bb.warn('[log_check] %s: found a warning message in the logfile (keyword \'%s\'):\n[log_check] %s' + % (self.d.getVar('PN', True), m.group(), line)) + + def _log_check_error(self): + r = re.compile('(unpacking of archive failed|Cannot find package|exit 1|ERR|Fail)') + log_path = self.d.expand("${T}/log.do_rootfs") + with open(log_path, 'r') as log: + found_error = 0 + message = "\n" + for line in log: + if 'log_check' in line: + continue + + m = r.search(line) + if m: + found_error = 1 + bb.warn('[log_check] %s: found an error message in the logfile (keyword \'%s\'):\n[log_check] %s' + % (self.d.getVar('PN', True), m.group(), line)) + + if found_error >= 1 and found_error <= 5: + message += line + '\n' + found_error += 1 + + if found_error == 6: + bb.fatal(message) + + def _log_check(self): + self._log_check_warn() + self._log_check_error() + + def _handle_intercept_failure(self, registered_pkgs): + rpm_postinsts_dir = self.image_rootfs + self.d.expand('${sysconfdir}/rpm-postinsts/') + bb.utils.mkdirhier(rpm_postinsts_dir) + + # Save the package postinstalls in /etc/rpm-postinsts + for pkg in registered_pkgs.split(): + self.pm.save_rpmpostinst(pkg) + + def _cleanup(self): + # during the execution of postprocess commands, rpm is called several + # times to get the files installed, dependencies, etc. This creates the + # __db.00* (Berkeley DB files that hold locks, rpm specific environment + # settings, etc.), that should not get into the final rootfs + self.pm.unlock_rpm_db() + bb.utils.remove(self.image_rootfs + "/install", True) + + +class DpkgRootfs(Rootfs): + def __init__(self, d, manifest_dir): + super(DpkgRootfs, self).__init__(d) + + bb.utils.remove(self.image_rootfs, True) + bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS', True), True) + self.manifest = DpkgManifest(d, manifest_dir) + self.pm = DpkgPM(d, d.getVar('IMAGE_ROOTFS', True), + d.getVar('PACKAGE_ARCHS', True), + d.getVar('DPKG_ARCH', True)) + + + def _create(self): + pkgs_to_install = self.manifest.parse_initial_manifest() + + alt_dir = self.d.expand("${IMAGE_ROOTFS}/var/lib/dpkg/alternatives") + bb.utils.mkdirhier(alt_dir) + + # update PM index files + self.pm.write_index() + + self.pm.update() + + for pkg_type in self.install_order: + if pkg_type in pkgs_to_install: + self.pm.install(pkgs_to_install[pkg_type], + [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY]) + + self.pm.install_complementary() + + self.pm.fix_broken_dependencies() + + self.pm.mark_packages("installed") + + self.pm.run_pre_post_installs() + + @staticmethod + def _depends_list(): + return ['DEPLOY_DIR_DEB', 'DEB_SDK_ARCH', 'APTCONF_TARGET', 'APT_ARGS', 'DPKG_ARCH', 'DEB_PREPROCESS_COMMANDS', 'DEB_POSTPROCESS_COMMAND'] + + def _get_delayed_postinsts(self): + pkg_list = [] + with open(self.image_rootfs + "/var/lib/dpkg/status") as status: + for line in status: + m_pkg = re.match("^Package: (.*)", line) + m_status = re.match("^Status:.*unpacked", line) + if m_pkg is not None: + pkg_name = m_pkg.group(1) + elif m_status is not None: + pkg_list.append(pkg_name) + + if len(pkg_list) == 0: + return None + + return pkg_list + + def _save_postinsts(self): + num = 0 + for p in self._get_delayed_postinsts(): + dst_postinst_dir = self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/deb-postinsts") + src_postinst_dir = self.d.expand("${IMAGE_ROOTFS}/var/lib/dpkg/info") + + bb.utils.mkdirhier(dst_postinst_dir) + + if os.path.exists(os.path.join(src_postinst_dir, p + ".postinst")): + shutil.copy(os.path.join(src_postinst_dir, p + ".postinst"), + os.path.join(dst_postinst_dir, "%03d-%s" % (num, p))) + + num += 1 + + def _handle_intercept_failure(self, registered_pkgs): + self.pm.mark_packages("unpacked", registered_pkgs.split()) + + def _log_check(self): + pass + + def _cleanup(self): + pass + + +class OpkgRootfs(Rootfs): + def __init__(self, d, manifest_dir): + super(OpkgRootfs, self).__init__(d) + + self.manifest = OpkgManifest(d, manifest_dir) + self.opkg_conf = self.d.getVar("IPKGCONF_TARGET", True) + self.pkg_archs = self.d.getVar("ALL_MULTILIB_PACKAGE_ARCHS", True) + + self.inc_opkg_image_gen = self.d.getVar('INC_IPK_IMAGE_GEN', True) or "" + if self._remove_old_rootfs(): + bb.utils.remove(self.image_rootfs, True) + self.pm = OpkgPM(d, + self.image_rootfs, + self.opkg_conf, + self.pkg_archs) + else: + self.pm = OpkgPM(d, + self.image_rootfs, + self.opkg_conf, + self.pkg_archs) + self.pm.recover_packaging_data() + + bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS', True), True) + + def _prelink_file(self, root_dir, filename): + bb.note('prelink %s in %s' % (filename, root_dir)) + prelink_cfg = oe.path.join(root_dir, + self.d.expand('${sysconfdir}/prelink.conf')) + if not os.path.exists(prelink_cfg): + shutil.copy(self.d.expand('${STAGING_DIR_NATIVE}${sysconfdir_native}/prelink.conf'), + prelink_cfg) + + cmd_prelink = self.d.expand('${STAGING_DIR_NATIVE}${sbindir_native}/prelink') + self._exec_shell_cmd([cmd_prelink, + '--root', + root_dir, + '-amR', + '-N', + '-c', + self.d.expand('${sysconfdir}/prelink.conf')]) + + ''' + Compare two files with the same key twice to see if they are equal. + If they are not equal, it means they are duplicated and come from + different packages. + 1st: Comapre them directly; + 2nd: While incremental image creation is enabled, one of the + files could be probaly prelinked in the previous image + creation and the file has been changed, so we need to + prelink the other one and compare them. + ''' + def _file_equal(self, key, f1, f2): + + # Both of them are not prelinked + if filecmp.cmp(f1, f2): + return True + + if self.image_rootfs not in f1: + self._prelink_file(f1.replace(key, ''), f1) + + if self.image_rootfs not in f2: + self._prelink_file(f2.replace(key, ''), f2) + + # Both of them are prelinked + if filecmp.cmp(f1, f2): + return True + + # Not equal + return False + + """ + This function was reused from the old implementation. + See commit: "image.bbclass: Added variables for multilib support." by + Lianhao Lu. + """ + def _multilib_sanity_test(self, dirs): + + allow_replace = self.d.getVar("MULTILIBRE_ALLOW_REP", True) + if allow_replace is None: + allow_replace = "" + + allow_rep = re.compile(re.sub("\|$", "", allow_replace)) + error_prompt = "Multilib check error:" + + files = {} + for dir in dirs: + for root, subfolders, subfiles in os.walk(dir): + for file in subfiles: + item = os.path.join(root, file) + key = str(os.path.join("/", os.path.relpath(item, dir))) + + valid = True + if key in files: + #check whether the file is allow to replace + if allow_rep.match(key): + valid = True + else: + if os.path.exists(files[key]) and \ + os.path.exists(item) and \ + not self._file_equal(key, files[key], item): + valid = False + bb.fatal("%s duplicate files %s %s is not the same\n" % + (error_prompt, item, files[key])) + + #pass the check, add to list + if valid: + files[key] = item + + def _multilib_test_install(self, pkgs): + ml_temp = self.d.getVar("MULTILIB_TEMP_ROOTFS", True) + bb.utils.mkdirhier(ml_temp) + + dirs = [self.image_rootfs] + + for variant in self.d.getVar("MULTILIB_VARIANTS", True).split(): + ml_target_rootfs = os.path.join(ml_temp, variant) + + bb.utils.remove(ml_target_rootfs, True) + + ml_opkg_conf = os.path.join(ml_temp, + variant + "-" + os.path.basename(self.opkg_conf)) + + ml_pm = OpkgPM(self.d, ml_target_rootfs, ml_opkg_conf, self.pkg_archs) + + ml_pm.update() + ml_pm.install(pkgs) + + dirs.append(ml_target_rootfs) + + self._multilib_sanity_test(dirs) + + ''' + While ipk incremental image generation is enabled, it will remove the + unneeded pkgs by comparing the old full manifest in previous existing + image and the new full manifest in the current image. + ''' + def _remove_extra_packages(self, pkgs_initial_install): + if self.inc_opkg_image_gen == "1": + # Parse full manifest in previous existing image creation session + old_full_manifest = self.manifest.parse_full_manifest() + + # Create full manifest for the current image session, the old one + # will be replaced by the new one. + self.manifest.create_full(self.pm) + + # Parse full manifest in current image creation session + new_full_manifest = self.manifest.parse_full_manifest() + + pkg_to_remove = list() + for pkg in old_full_manifest: + if pkg not in new_full_manifest: + pkg_to_remove.append(pkg) + + if pkg_to_remove != []: + bb.note('decremental removed: %s' % ' '.join(pkg_to_remove)) + self.pm.remove(pkg_to_remove) + + ''' + Compare with previous existing image creation, if some conditions + triggered, the previous old image should be removed. + The conditions include any of 'PACKAGE_EXCLUDE, NO_RECOMMENDATIONS + and BAD_RECOMMENDATIONS' has been changed. + ''' + def _remove_old_rootfs(self): + if self.inc_opkg_image_gen != "1": + return True + + vars_list_file = self.d.expand('${T}/vars_list') + + old_vars_list = "" + if os.path.exists(vars_list_file): + old_vars_list = open(vars_list_file, 'r+').read() + + new_vars_list = '%s:%s:%s\n' % \ + ((self.d.getVar('BAD_RECOMMENDATIONS', True) or '').strip(), + (self.d.getVar('NO_RECOMMENDATIONS', True) or '').strip(), + (self.d.getVar('PACKAGE_EXCLUDE', True) or '').strip()) + open(vars_list_file, 'w+').write(new_vars_list) + + if old_vars_list != new_vars_list: + return True + + return False + + def _create(self): + pkgs_to_install = self.manifest.parse_initial_manifest() + opkg_pre_process_cmds = self.d.getVar('OPKG_PREPROCESS_COMMANDS', True) + opkg_post_process_cmds = self.d.getVar('OPKG_POSTPROCESS_COMMANDS', True) + rootfs_post_install_cmds = self.d.getVar('ROOTFS_POSTINSTALL_COMMAND', True) + + # update PM index files, unless users provide their own feeds + if (self.d.getVar('BUILD_IMAGES_FROM_FEEDS', True) or "") != "1": + self.pm.write_index() + + execute_pre_post_process(self.d, opkg_pre_process_cmds) + + self.pm.update() + + self.pm.handle_bad_recommendations() + + if self.inc_opkg_image_gen == "1": + self._remove_extra_packages(pkgs_to_install) + + for pkg_type in self.install_order: + if pkg_type in pkgs_to_install: + # For multilib, we perform a sanity test before final install + # If sanity test fails, it will automatically do a bb.fatal() + # and the installation will stop + if pkg_type == Manifest.PKG_TYPE_MULTILIB: + self._multilib_test_install(pkgs_to_install[pkg_type]) + + self.pm.install(pkgs_to_install[pkg_type], + [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY]) + + self.pm.install_complementary() + + execute_pre_post_process(self.d, opkg_post_process_cmds) + execute_pre_post_process(self.d, rootfs_post_install_cmds) + + if self.inc_opkg_image_gen == "1": + self.pm.backup_packaging_data() + + @staticmethod + def _depends_list(): + return ['IPKGCONF_SDK', 'IPK_FEED_URIS', 'DEPLOY_DIR_IPK', 'IPKGCONF_TARGET', 'INC_IPK_IMAGE_GEN', 'OPKG_ARGS', 'OPKGLIBDIR', 'OPKG_PREPROCESS_COMMANDS', 'OPKG_POSTPROCESS_COMMANDS', 'OPKGLIBDIR'] + + def _get_delayed_postinsts(self): + pkg_list = [] + status_file = os.path.join(self.image_rootfs, + self.d.getVar('OPKGLIBDIR', True).strip('/'), + "opkg", "status") + + with open(status_file) as status: + for line in status: + m_pkg = re.match("^Package: (.*)", line) + m_status = re.match("^Status:.*unpacked", line) + if m_pkg is not None: + pkg_name = m_pkg.group(1) + elif m_status is not None: + pkg_list.append(pkg_name) + + if len(pkg_list) == 0: + return None + + return pkg_list + + def _save_postinsts(self): + num = 0 + for p in self._get_delayed_postinsts(): + dst_postinst_dir = self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/ipk-postinsts") + src_postinst_dir = self.d.expand("${IMAGE_ROOTFS}${OPKGLIBDIR}/opkg/info") + + bb.utils.mkdirhier(dst_postinst_dir) + + if os.path.exists(os.path.join(src_postinst_dir, p + ".postinst")): + shutil.copy(os.path.join(src_postinst_dir, p + ".postinst"), + os.path.join(dst_postinst_dir, "%03d-%s" % (num, p))) + + num += 1 + + def _handle_intercept_failure(self, registered_pkgs): + self.pm.mark_packages("unpacked", registered_pkgs.split()) + + def _log_check(self): + pass + + def _cleanup(self): + pass + +def get_class_for_type(imgtype): + return {"rpm": RpmRootfs, + "ipk": OpkgRootfs, + "deb": DpkgRootfs}[imgtype] + +def variable_depends(d, manifest_dir=None): + img_type = d.getVar('IMAGE_PKGTYPE', True) + cls = get_class_for_type(img_type) + return cls._depends_list() + +def create_rootfs(d, manifest_dir=None): + env_bkp = os.environ.copy() + + img_type = d.getVar('IMAGE_PKGTYPE', True) + if img_type == "rpm": + RpmRootfs(d, manifest_dir).create() + elif img_type == "ipk": + OpkgRootfs(d, manifest_dir).create() + elif img_type == "deb": + DpkgRootfs(d, manifest_dir).create() + + os.environ.clear() + os.environ.update(env_bkp) + + +def image_list_installed_packages(d, format=None, rootfs_dir=None): + if not rootfs_dir: + rootfs_dir = d.getVar('IMAGE_ROOTFS', True) + + img_type = d.getVar('IMAGE_PKGTYPE', True) + if img_type == "rpm": + return RpmPkgsList(d, rootfs_dir).list(format) + elif img_type == "ipk": + return OpkgPkgsList(d, rootfs_dir, d.getVar("IPKGCONF_TARGET", True)).list(format) + elif img_type == "deb": + return DpkgPkgsList(d, rootfs_dir).list(format) + +if __name__ == "__main__": + """ + We should be able to run this as a standalone script, from outside bitbake + environment. + """ + """ + TBD + """ diff --git a/meta/lib/oe/sdk.py b/meta/lib/oe/sdk.py new file mode 100644 index 0000000000..c57a441941 --- /dev/null +++ b/meta/lib/oe/sdk.py @@ -0,0 +1,326 @@ +from abc import ABCMeta, abstractmethod +from oe.utils import execute_pre_post_process +from oe.manifest import * +from oe.package_manager import * +import os +import shutil +import glob + + +class Sdk(object): + __metaclass__ = ABCMeta + + def __init__(self, d, manifest_dir): + self.d = d + self.sdk_output = self.d.getVar('SDK_OUTPUT', True) + self.sdk_native_path = self.d.getVar('SDKPATHNATIVE', True).strip('/') + self.target_path = self.d.getVar('SDKTARGETSYSROOT', True).strip('/') + self.sysconfdir = self.d.getVar('sysconfdir', True).strip('/') + + self.sdk_target_sysroot = os.path.join(self.sdk_output, self.target_path) + self.sdk_host_sysroot = self.sdk_output + + if manifest_dir is None: + self.manifest_dir = self.d.getVar("SDK_DIR", True) + else: + self.manifest_dir = manifest_dir + + bb.utils.remove(self.sdk_output, True) + + self.install_order = Manifest.INSTALL_ORDER + + @abstractmethod + def _populate(self): + pass + + def populate(self): + bb.utils.mkdirhier(self.sdk_output) + + # call backend dependent implementation + self._populate() + + # Don't ship any libGL in the SDK + bb.utils.remove(os.path.join(self.sdk_output, self.sdk_native_path, + self.d.getVar('libdir_nativesdk', True).strip('/'), + "libGL*")) + + # Fix or remove broken .la files + bb.utils.remove(os.path.join(self.sdk_output, self.sdk_native_path, + self.d.getVar('libdir_nativesdk', True).strip('/'), + "*.la")) + + # Link the ld.so.cache file into the hosts filesystem + link_name = os.path.join(self.sdk_output, self.sdk_native_path, + self.sysconfdir, "ld.so.cache") + bb.utils.mkdirhier(os.path.dirname(link_name)) + os.symlink("/etc/ld.so.cache", link_name) + + execute_pre_post_process(self.d, self.d.getVar('SDK_POSTPROCESS_COMMAND', True)) + + +class RpmSdk(Sdk): + def __init__(self, d, manifest_dir=None): + super(RpmSdk, self).__init__(d, manifest_dir) + + self.target_manifest = RpmManifest(d, self.manifest_dir, + Manifest.MANIFEST_TYPE_SDK_TARGET) + self.host_manifest = RpmManifest(d, self.manifest_dir, + Manifest.MANIFEST_TYPE_SDK_HOST) + + target_providename = ['/bin/sh', + '/bin/bash', + '/usr/bin/env', + '/usr/bin/perl', + 'pkgconfig' + ] + + self.target_pm = RpmPM(d, + self.sdk_target_sysroot, + self.d.getVar('TARGET_VENDOR', True), + 'target', + target_providename + ) + + sdk_providename = ['/bin/sh', + '/bin/bash', + '/usr/bin/env', + '/usr/bin/perl', + 'pkgconfig', + 'libGL.so()(64bit)', + 'libGL.so' + ] + + self.host_pm = RpmPM(d, + self.sdk_host_sysroot, + self.d.getVar('SDK_VENDOR', True), + 'host', + sdk_providename, + "SDK_PACKAGE_ARCHS", + "SDK_OS" + ) + + def _populate_sysroot(self, pm, manifest): + pkgs_to_install = manifest.parse_initial_manifest() + + pm.create_configs() + pm.write_index() + pm.dump_all_available_pkgs() + pm.update() + + for pkg_type in self.install_order: + if pkg_type in pkgs_to_install: + pm.install(pkgs_to_install[pkg_type], + [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY]) + + def _populate(self): + bb.note("Installing TARGET packages") + self._populate_sysroot(self.target_pm, self.target_manifest) + + self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY', True)) + + execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND", True)) + + self.target_pm.remove_packaging_data() + + bb.note("Installing NATIVESDK packages") + self._populate_sysroot(self.host_pm, self.host_manifest) + + execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND", True)) + + self.host_pm.remove_packaging_data() + + # Move host RPM library data + native_rpm_state_dir = os.path.join(self.sdk_output, + self.sdk_native_path, + self.d.getVar('localstatedir_nativesdk', True).strip('/'), + "lib", + "rpm" + ) + bb.utils.mkdirhier(native_rpm_state_dir) + for f in glob.glob(os.path.join(self.sdk_output, + "var", + "lib", + "rpm", + "*")): + bb.utils.movefile(f, native_rpm_state_dir) + + bb.utils.remove(os.path.join(self.sdk_output, "var"), True) + + # Move host sysconfig data + native_sysconf_dir = os.path.join(self.sdk_output, + self.sdk_native_path, + self.d.getVar('sysconfdir', + True).strip('/'), + ) + bb.utils.mkdirhier(native_sysconf_dir) + for f in glob.glob(os.path.join(self.sdk_output, "etc", "*")): + bb.utils.movefile(f, native_sysconf_dir) + bb.utils.remove(os.path.join(self.sdk_output, "etc"), True) + + +class OpkgSdk(Sdk): + def __init__(self, d, manifest_dir=None): + super(OpkgSdk, self).__init__(d, manifest_dir) + + self.target_conf = self.d.getVar("IPKGCONF_TARGET", True) + self.host_conf = self.d.getVar("IPKGCONF_SDK", True) + + self.target_manifest = OpkgManifest(d, self.manifest_dir, + Manifest.MANIFEST_TYPE_SDK_TARGET) + self.host_manifest = OpkgManifest(d, self.manifest_dir, + Manifest.MANIFEST_TYPE_SDK_HOST) + + self.target_pm = OpkgPM(d, self.sdk_target_sysroot, self.target_conf, + self.d.getVar("ALL_MULTILIB_PACKAGE_ARCHS", True)) + + self.host_pm = OpkgPM(d, self.sdk_host_sysroot, self.host_conf, + self.d.getVar("SDK_PACKAGE_ARCHS", True)) + + def _populate_sysroot(self, pm, manifest): + pkgs_to_install = manifest.parse_initial_manifest() + + if (self.d.getVar('BUILD_IMAGES_FROM_FEEDS', True) or "") != "1": + pm.write_index() + + pm.update() + + for pkg_type in self.install_order: + if pkg_type in pkgs_to_install: + pm.install(pkgs_to_install[pkg_type], + [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY]) + + def _populate(self): + bb.note("Installing TARGET packages") + self._populate_sysroot(self.target_pm, self.target_manifest) + + self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY', True)) + + execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND", True)) + + bb.note("Installing NATIVESDK packages") + self._populate_sysroot(self.host_pm, self.host_manifest) + + execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND", True)) + + target_sysconfdir = os.path.join(self.sdk_target_sysroot, self.sysconfdir) + host_sysconfdir = os.path.join(self.sdk_host_sysroot, self.sysconfdir) + + bb.utils.mkdirhier(target_sysconfdir) + shutil.copy(self.target_conf, target_sysconfdir) + os.chmod(os.path.join(target_sysconfdir, + os.path.basename(self.target_conf)), 0644) + + bb.utils.mkdirhier(host_sysconfdir) + shutil.copy(self.host_conf, host_sysconfdir) + os.chmod(os.path.join(host_sysconfdir, + os.path.basename(self.host_conf)), 0644) + + native_opkg_state_dir = os.path.join(self.sdk_output, self.sdk_native_path, + self.d.getVar('localstatedir_nativesdk', True).strip('/'), + "lib", "opkg") + bb.utils.mkdirhier(native_opkg_state_dir) + for f in glob.glob(os.path.join(self.sdk_output, "var", "lib", "opkg", "*")): + bb.utils.movefile(f, native_opkg_state_dir) + + bb.utils.remove(os.path.join(self.sdk_output, "var"), True) + + +class DpkgSdk(Sdk): + def __init__(self, d, manifest_dir=None): + super(DpkgSdk, self).__init__(d, manifest_dir) + + self.target_conf_dir = os.path.join(self.d.getVar("APTCONF_TARGET", True), "apt") + self.host_conf_dir = os.path.join(self.d.getVar("APTCONF_TARGET", True), "apt-sdk") + + self.target_manifest = DpkgManifest(d, self.manifest_dir, + Manifest.MANIFEST_TYPE_SDK_TARGET) + self.host_manifest = DpkgManifest(d, self.manifest_dir, + Manifest.MANIFEST_TYPE_SDK_HOST) + + self.target_pm = DpkgPM(d, self.sdk_target_sysroot, + self.d.getVar("PACKAGE_ARCHS", True), + self.d.getVar("DPKG_ARCH", True), + self.target_conf_dir) + + self.host_pm = DpkgPM(d, self.sdk_host_sysroot, + self.d.getVar("SDK_PACKAGE_ARCHS", True), + self.d.getVar("DEB_SDK_ARCH", True), + self.host_conf_dir) + + def _copy_apt_dir_to(self, dst_dir): + staging_etcdir_native = self.d.getVar("STAGING_ETCDIR_NATIVE", True) + + bb.utils.remove(dst_dir, True) + + shutil.copytree(os.path.join(staging_etcdir_native, "apt"), dst_dir) + + def _populate_sysroot(self, pm, manifest): + pkgs_to_install = manifest.parse_initial_manifest() + + pm.write_index() + pm.update() + + for pkg_type in self.install_order: + if pkg_type in pkgs_to_install: + pm.install(pkgs_to_install[pkg_type], + [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY]) + + def _populate(self): + bb.note("Installing TARGET packages") + self._populate_sysroot(self.target_pm, self.target_manifest) + + execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND", True)) + + self._copy_apt_dir_to(os.path.join(self.sdk_target_sysroot, "etc", "apt")) + + bb.note("Installing NATIVESDK packages") + self._populate_sysroot(self.host_pm, self.host_manifest) + + execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND", True)) + + self._copy_apt_dir_to(os.path.join(self.sdk_output, self.sdk_native_path, + "etc", "apt")) + + native_dpkg_state_dir = os.path.join(self.sdk_output, self.sdk_native_path, + "var", "lib", "dpkg") + bb.utils.mkdirhier(native_dpkg_state_dir) + for f in glob.glob(os.path.join(self.sdk_output, "var", "lib", "dpkg", "*")): + bb.utils.movefile(f, native_dpkg_state_dir) + + bb.utils.remove(os.path.join(self.sdk_output, "var"), True) + + +def sdk_list_installed_packages(d, target, format=None, rootfs_dir=None): + if rootfs_dir is None: + sdk_output = d.getVar('SDK_OUTPUT', True) + target_path = d.getVar('SDKTARGETSYSROOT', True).strip('/') + + rootfs_dir = [sdk_output, os.path.join(sdk_output, target_path)][target is True] + + img_type = d.getVar('IMAGE_PKGTYPE', True) + if img_type == "rpm": + arch_var = ["SDK_PACKAGE_ARCHS", None][target is True] + os_var = ["SDK_OS", None][target is True] + return RpmPkgsList(d, rootfs_dir, arch_var, os_var).list(format) + elif img_type == "ipk": + conf_file_var = ["IPKGCONF_SDK", "IPKGCONF_TARGET"][target is True] + return OpkgPkgsList(d, rootfs_dir, d.getVar(conf_file_var, True)).list(format) + elif img_type == "deb": + return DpkgPkgsList(d, rootfs_dir).list(format) + +def populate_sdk(d, manifest_dir=None): + env_bkp = os.environ.copy() + + img_type = d.getVar('IMAGE_PKGTYPE', True) + if img_type == "rpm": + RpmSdk(d, manifest_dir).populate() + elif img_type == "ipk": + OpkgSdk(d, manifest_dir).populate() + elif img_type == "deb": + DpkgSdk(d, manifest_dir).populate() + + os.environ.clear() + os.environ.update(env_bkp) + +if __name__ == "__main__": + pass diff --git a/meta/lib/oe/sstatesig.py b/meta/lib/oe/sstatesig.py new file mode 100644 index 0000000000..af7617ee61 --- /dev/null +++ b/meta/lib/oe/sstatesig.py @@ -0,0 +1,276 @@ +import bb.siggen + +def sstate_rundepfilter(siggen, fn, recipename, task, dep, depname, dataCache): + # Return True if we should keep the dependency, False to drop it + def isNative(x): + return x.endswith("-native") + def isCross(x): + return "-cross-" in x + def isNativeSDK(x): + return x.startswith("nativesdk-") + def isKernel(fn): + inherits = " ".join(dataCache.inherits[fn]) + return inherits.find("/module-base.bbclass") != -1 or inherits.find("/linux-kernel-base.bbclass") != -1 + def isPackageGroup(fn): + inherits = " ".join(dataCache.inherits[fn]) + return "/packagegroup.bbclass" in inherits + def isAllArch(fn): + inherits = " ".join(dataCache.inherits[fn]) + return "/allarch.bbclass" in inherits + def isImage(fn): + return "/image.bbclass" in " ".join(dataCache.inherits[fn]) + + # Always include our own inter-task dependencies + if recipename == depname: + return True + + # Quilt (patch application) changing isn't likely to affect anything + excludelist = ['quilt-native', 'subversion-native', 'git-native'] + if depname in excludelist and recipename != depname: + return False + + # Exclude well defined recipe->dependency + if "%s->%s" % (recipename, depname) in siggen.saferecipedeps: + return False + + # Don't change native/cross/nativesdk recipe dependencies any further + if isNative(recipename) or isCross(recipename) or isNativeSDK(recipename): + return True + + # Only target packages beyond here + + # allarch packagegroups are assumed to have well behaved names which don't change between architecures/tunes + if isPackageGroup(fn) and isAllArch(fn): + return False + + # Exclude well defined machine specific configurations which don't change ABI + if depname in siggen.abisaferecipes and not isImage(fn): + return False + + # Kernel modules are well namespaced. We don't want to depend on the kernel's checksum + # if we're just doing an RRECOMMENDS_xxx = "kernel-module-*", not least because the checksum + # is machine specific. + # Therefore if we're not a kernel or a module recipe (inheriting the kernel classes) + # and we reccomend a kernel-module, we exclude the dependency. + depfn = dep.rsplit(".", 1)[0] + if dataCache and isKernel(depfn) and not isKernel(fn): + for pkg in dataCache.runrecs[fn]: + if " ".join(dataCache.runrecs[fn][pkg]).find("kernel-module-") != -1: + return False + + # Default to keep dependencies + return True + +def sstate_lockedsigs(d): + sigs = {} + types = (d.getVar("SIGGEN_LOCKEDSIGS_TYPES", True) or "").split() + for t in types: + lockedsigs = (d.getVar("SIGGEN_LOCKEDSIGS_%s" % t, True) or "").split() + for ls in lockedsigs: + pn, task, h = ls.split(":", 2) + if pn not in sigs: + sigs[pn] = {} + sigs[pn][task] = h + return sigs + +class SignatureGeneratorOEBasic(bb.siggen.SignatureGeneratorBasic): + name = "OEBasic" + def init_rundepcheck(self, data): + self.abisaferecipes = (data.getVar("SIGGEN_EXCLUDERECIPES_ABISAFE", True) or "").split() + self.saferecipedeps = (data.getVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS", True) or "").split() + pass + def rundep_check(self, fn, recipename, task, dep, depname, dataCache = None): + return sstate_rundepfilter(self, fn, recipename, task, dep, depname, dataCache) + +class SignatureGeneratorOEBasicHash(bb.siggen.SignatureGeneratorBasicHash): + name = "OEBasicHash" + def init_rundepcheck(self, data): + self.abisaferecipes = (data.getVar("SIGGEN_EXCLUDERECIPES_ABISAFE", True) or "").split() + self.saferecipedeps = (data.getVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS", True) or "").split() + self.lockedsigs = sstate_lockedsigs(data) + self.lockedhashes = {} + self.lockedpnmap = {} + self.lockedhashfn = {} + self.machine = data.getVar("MACHINE", True) + self.mismatch_msgs = [] + pass + def rundep_check(self, fn, recipename, task, dep, depname, dataCache = None): + return sstate_rundepfilter(self, fn, recipename, task, dep, depname, dataCache) + + def get_taskdata(self): + data = super(bb.siggen.SignatureGeneratorBasicHash, self).get_taskdata() + return (data, self.lockedpnmap, self.lockedhashfn) + + def set_taskdata(self, data): + coredata, self.lockedpnmap, self.lockedhashfn = data + super(bb.siggen.SignatureGeneratorBasicHash, self).set_taskdata(coredata) + + def dump_sigs(self, dataCache, options): + self.dump_lockedsigs() + return super(bb.siggen.SignatureGeneratorBasicHash, self).dump_sigs(dataCache, options) + + def get_taskhash(self, fn, task, deps, dataCache): + h = super(bb.siggen.SignatureGeneratorBasicHash, self).get_taskhash(fn, task, deps, dataCache) + + recipename = dataCache.pkg_fn[fn] + self.lockedpnmap[fn] = recipename + self.lockedhashfn[fn] = dataCache.hashfn[fn] + if recipename in self.lockedsigs: + if task in self.lockedsigs[recipename]: + k = fn + "." + task + h_locked = self.lockedsigs[recipename][task] + self.lockedhashes[k] = h_locked + self.taskhash[k] = h_locked + #bb.warn("Using %s %s %s" % (recipename, task, h)) + + if h != h_locked: + self.mismatch_msgs.append('The %s:%s sig (%s) changed, use locked sig %s to instead' + % (recipename, task, h, h_locked)) + + return h_locked + #bb.warn("%s %s %s" % (recipename, task, h)) + return h + + def dump_sigtask(self, fn, task, stampbase, runtime): + k = fn + "." + task + if k in self.lockedhashes: + return + super(bb.siggen.SignatureGeneratorBasicHash, self).dump_sigtask(fn, task, stampbase, runtime) + + def dump_lockedsigs(self, sigfile=None): + if not sigfile: + sigfile = os.getcwd() + "/locked-sigs.inc" + + bb.plain("Writing locked sigs to %s" % sigfile) + types = {} + for k in self.runtaskdeps: + fn = k.rsplit(".",1)[0] + t = self.lockedhashfn[fn].split(" ")[1].split(":")[5] + t = 't-' + t.replace('_', '-') + if t not in types: + types[t] = [] + types[t].append(k) + + with open(sigfile, "w") as f: + for t in types: + f.write('SIGGEN_LOCKEDSIGS_%s = "\\\n' % t) + types[t].sort() + sortedk = sorted(types[t], key=lambda k: self.lockedpnmap[k.rsplit(".",1)[0]]) + for k in sortedk: + fn = k.rsplit(".",1)[0] + task = k.rsplit(".",1)[1] + if k not in self.taskhash: + continue + f.write(" " + self.lockedpnmap[fn] + ":" + task + ":" + self.taskhash[k] + " \\\n") + f.write(' "\n') + f.write('SIGGEN_LOCKEDSIGS_TYPES_%s = "%s"' % (self.machine, " ".join(types.keys()))) + + def checkhashes(self, missed, ret, sq_fn, sq_task, sq_hash, sq_hashfn, d): + checklevel = d.getVar("SIGGEN_LOCKEDSIGS_CHECK_LEVEL", True) + for task in range(len(sq_fn)): + if task not in ret: + for pn in self.lockedsigs: + if sq_hash[task] in self.lockedsigs[pn].itervalues(): + self.mismatch_msgs.append("Locked sig is set for %s:%s (%s) yet not in sstate cache?" + % (pn, sq_task[task], sq_hash[task])) + + if self.mismatch_msgs and checklevel == 'warn': + bb.warn("\n".join(self.mismatch_msgs)) + elif self.mismatch_msgs and checklevel == 'error': + bb.fatal("\n".join(self.mismatch_msgs)) + + +# Insert these classes into siggen's namespace so it can see and select them +bb.siggen.SignatureGeneratorOEBasic = SignatureGeneratorOEBasic +bb.siggen.SignatureGeneratorOEBasicHash = SignatureGeneratorOEBasicHash + + +def find_siginfo(pn, taskname, taskhashlist, d): + """ Find signature data files for comparison purposes """ + + import fnmatch + import glob + + if taskhashlist: + hashfiles = {} + + if not taskname: + # We have to derive pn and taskname + key = pn + splitit = key.split('.bb.') + taskname = splitit[1] + pn = os.path.basename(splitit[0]).split('_')[0] + if key.startswith('virtual:native:'): + pn = pn + '-native' + + if taskname in ['do_fetch', 'do_unpack', 'do_patch', 'do_populate_lic']: + pn.replace("-native", "") + + filedates = {} + + # First search in stamps dir + localdata = d.createCopy() + localdata.setVar('MULTIMACH_TARGET_SYS', '*') + localdata.setVar('PN', pn) + localdata.setVar('PV', '*') + localdata.setVar('PR', '*') + localdata.setVar('EXTENDPE', '') + stamp = localdata.getVar('STAMP', True) + filespec = '%s.%s.sigdata.*' % (stamp, taskname) + foundall = False + import glob + for fullpath in glob.glob(filespec): + match = False + if taskhashlist: + for taskhash in taskhashlist: + if fullpath.endswith('.%s' % taskhash): + hashfiles[taskhash] = fullpath + if len(hashfiles) == len(taskhashlist): + foundall = True + break + else: + try: + filedates[fullpath] = os.stat(fullpath).st_mtime + except OSError: + continue + + if not taskhashlist or (len(filedates) < 2 and not foundall): + # That didn't work, look in sstate-cache + hashes = taskhashlist or ['*'] + localdata = bb.data.createCopy(d) + for hashval in hashes: + localdata.setVar('PACKAGE_ARCH', '*') + localdata.setVar('TARGET_VENDOR', '*') + localdata.setVar('TARGET_OS', '*') + localdata.setVar('PN', pn) + localdata.setVar('PV', '*') + localdata.setVar('PR', '*') + localdata.setVar('BB_TASKHASH', hashval) + if pn.endswith('-native') or "-cross-" in pn or "-crosssdk-" in pn: + localdata.setVar('SSTATE_EXTRAPATH', "${NATIVELSBSTRING}/") + sstatename = taskname[3:] + filespec = '%s_%s.*.siginfo' % (localdata.getVar('SSTATE_PKG', True), sstatename) + + if hashval != '*': + sstatedir = "%s/%s" % (d.getVar('SSTATE_DIR', True), hashval[:2]) + else: + sstatedir = d.getVar('SSTATE_DIR', True) + + for root, dirs, files in os.walk(sstatedir): + for fn in files: + fullpath = os.path.join(root, fn) + if fnmatch.fnmatch(fullpath, filespec): + if taskhashlist: + hashfiles[hashval] = fullpath + else: + try: + filedates[fullpath] = os.stat(fullpath).st_mtime + except: + continue + + if taskhashlist: + return hashfiles + else: + return filedates + +bb.siggen.find_siginfo = find_siginfo diff --git a/meta/lib/oe/terminal.py b/meta/lib/oe/terminal.py new file mode 100644 index 0000000000..0a623c75b1 --- /dev/null +++ b/meta/lib/oe/terminal.py @@ -0,0 +1,208 @@ +import logging +import oe.classutils +import shlex +from bb.process import Popen, ExecutionError + +logger = logging.getLogger('BitBake.OE.Terminal') + + +class UnsupportedTerminal(Exception): + pass + +class NoSupportedTerminals(Exception): + pass + + +class Registry(oe.classutils.ClassRegistry): + command = None + + def __init__(cls, name, bases, attrs): + super(Registry, cls).__init__(name.lower(), bases, attrs) + + @property + def implemented(cls): + return bool(cls.command) + + +class Terminal(Popen): + __metaclass__ = Registry + + def __init__(self, sh_cmd, title=None, env=None, d=None): + fmt_sh_cmd = self.format_command(sh_cmd, title) + try: + Popen.__init__(self, fmt_sh_cmd, env=env) + except OSError as exc: + import errno + if exc.errno == errno.ENOENT: + raise UnsupportedTerminal(self.name) + else: + raise + + def format_command(self, sh_cmd, title): + fmt = {'title': title or 'Terminal', 'command': sh_cmd} + if isinstance(self.command, basestring): + return shlex.split(self.command.format(**fmt)) + else: + return [element.format(**fmt) for element in self.command] + +class XTerminal(Terminal): + def __init__(self, sh_cmd, title=None, env=None, d=None): + Terminal.__init__(self, sh_cmd, title, env, d) + if not os.environ.get('DISPLAY'): + raise UnsupportedTerminal(self.name) + +class Gnome(XTerminal): + command = 'gnome-terminal -t "{title}" -x {command}' + priority = 2 + +class Mate(XTerminal): + command = 'mate-terminal -t "{title}" -x {command}' + priority = 2 + +class Xfce(XTerminal): + command = 'xfce4-terminal -T "{title}" -e "{command}"' + priority = 2 + +class Konsole(XTerminal): + command = 'konsole -T "{title}" -e {command}' + priority = 2 + + def __init__(self, sh_cmd, title=None, env=None, d=None): + # Check version + vernum = check_konsole_version("konsole") + if vernum: + if vernum.split('.')[0] == "2": + logger.debug(1, 'Konsole from KDE 4.x will not work as devshell, skipping') + raise UnsupportedTerminal(self.name) + XTerminal.__init__(self, sh_cmd, title, env, d) + +class XTerm(XTerminal): + command = 'xterm -T "{title}" -e {command}' + priority = 1 + +class Rxvt(XTerminal): + command = 'rxvt -T "{title}" -e {command}' + priority = 1 + +class Screen(Terminal): + command = 'screen -D -m -t "{title}" -S devshell {command}' + + def __init__(self, sh_cmd, title=None, env=None, d=None): + s_id = "devshell_%i" % os.getpid() + self.command = "screen -D -m -t \"{title}\" -S %s {command}" % s_id + Terminal.__init__(self, sh_cmd, title, env, d) + msg = 'Screen started. Please connect in another terminal with ' \ + '"screen -r %s"' % s_id + if (d): + bb.event.fire(bb.event.LogExecTTY(msg, "screen -r %s" % s_id, + 0.5, 10), d) + else: + logger.warn(msg) + +class TmuxRunning(Terminal): + """Open a new pane in the current running tmux window""" + name = 'tmux-running' + command = 'tmux split-window "{command}"' + priority = 2.75 + + def __init__(self, sh_cmd, title=None, env=None, d=None): + if not bb.utils.which(os.getenv('PATH'), 'tmux'): + raise UnsupportedTerminal('tmux is not installed') + + if not os.getenv('TMUX'): + raise UnsupportedTerminal('tmux is not running') + + Terminal.__init__(self, sh_cmd, title, env, d) + +class Tmux(Terminal): + """Start a new tmux session and window""" + command = 'tmux new -d -s devshell -n devshell "{command}"' + priority = 0.75 + + def __init__(self, sh_cmd, title=None, env=None, d=None): + if not bb.utils.which(os.getenv('PATH'), 'tmux'): + raise UnsupportedTerminal('tmux is not installed') + + # TODO: consider using a 'devshell' session shared amongst all + # devshells, if it's already there, add a new window to it. + window_name = 'devshell-%i' % os.getpid() + + self.command = 'tmux new -d -s {0} -n {0} "{{command}}"'.format(window_name) + Terminal.__init__(self, sh_cmd, title, env, d) + + attach_cmd = 'tmux att -t {0}'.format(window_name) + msg = 'Tmux started. Please connect in another terminal with `tmux att -t {0}`'.format(window_name) + if d: + bb.event.fire(bb.event.LogExecTTY(msg, attach_cmd, 0.5, 10), d) + else: + logger.warn(msg) + +class Custom(Terminal): + command = 'false' # This is a placeholder + priority = 3 + + def __init__(self, sh_cmd, title=None, env=None, d=None): + self.command = d and d.getVar('OE_TERMINAL_CUSTOMCMD', True) + if self.command: + if not '{command}' in self.command: + self.command += ' {command}' + Terminal.__init__(self, sh_cmd, title, env, d) + logger.warn('Custom terminal was started.') + else: + logger.debug(1, 'No custom terminal (OE_TERMINAL_CUSTOMCMD) set') + raise UnsupportedTerminal('OE_TERMINAL_CUSTOMCMD not set') + + +def prioritized(): + return Registry.prioritized() + +def spawn_preferred(sh_cmd, title=None, env=None, d=None): + """Spawn the first supported terminal, by priority""" + for terminal in prioritized(): + try: + spawn(terminal.name, sh_cmd, title, env, d) + break + except UnsupportedTerminal: + continue + else: + raise NoSupportedTerminals() + +def spawn(name, sh_cmd, title=None, env=None, d=None): + """Spawn the specified terminal, by name""" + logger.debug(1, 'Attempting to spawn terminal "%s"', name) + try: + terminal = Registry.registry[name] + except KeyError: + raise UnsupportedTerminal(name) + + pipe = terminal(sh_cmd, title, env, d) + output = pipe.communicate()[0] + if pipe.returncode != 0: + raise ExecutionError(sh_cmd, pipe.returncode, output) + +def check_konsole_version(konsole): + import subprocess as sub + try: + p = sub.Popen(['sh', '-c', '%s --version' % konsole],stdout=sub.PIPE,stderr=sub.PIPE) + out, err = p.communicate() + ver_info = out.rstrip().split('\n') + except OSError as exc: + import errno + if exc.errno == errno.ENOENT: + return None + else: + raise + vernum = None + for ver in ver_info: + if ver.startswith('Konsole'): + vernum = ver.split(' ')[-1] + return vernum + +def distro_name(): + try: + p = Popen(['lsb_release', '-i']) + out, err = p.communicate() + distro = out.split(':')[1].strip().lower() + except: + distro = "unknown" + return distro diff --git a/meta/lib/oe/tests/__init__.py b/meta/lib/oe/tests/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/meta/lib/oe/tests/test_license.py b/meta/lib/oe/tests/test_license.py new file mode 100644 index 0000000000..c388886184 --- /dev/null +++ b/meta/lib/oe/tests/test_license.py @@ -0,0 +1,68 @@ +import unittest +import oe.license + +class SeenVisitor(oe.license.LicenseVisitor): + def __init__(self): + self.seen = [] + oe.license.LicenseVisitor.__init__(self) + + def visit_Str(self, node): + self.seen.append(node.s) + +class TestSingleLicense(unittest.TestCase): + licenses = [ + "GPLv2", + "LGPL-2.0", + "Artistic", + "MIT", + "GPLv3+", + "FOO_BAR", + ] + invalid_licenses = ["GPL/BSD"] + + @staticmethod + def parse(licensestr): + visitor = SeenVisitor() + visitor.visit_string(licensestr) + return visitor.seen + + def test_single_licenses(self): + for license in self.licenses: + licenses = self.parse(license) + self.assertListEqual(licenses, [license]) + + def test_invalid_licenses(self): + for license in self.invalid_licenses: + with self.assertRaises(oe.license.InvalidLicense) as cm: + self.parse(license) + self.assertEqual(cm.exception.license, license) + +class TestSimpleCombinations(unittest.TestCase): + tests = { + "FOO&BAR": ["FOO", "BAR"], + "BAZ & MOO": ["BAZ", "MOO"], + "ALPHA|BETA": ["ALPHA"], + "BAZ&MOO|FOO": ["FOO"], + "FOO&BAR|BAZ": ["FOO", "BAR"], + } + preferred = ["ALPHA", "FOO", "BAR"] + + def test_tests(self): + def choose(a, b): + if all(lic in self.preferred for lic in b): + return b + else: + return a + + for license, expected in self.tests.items(): + licenses = oe.license.flattened_licenses(license, choose) + self.assertListEqual(licenses, expected) + +class TestComplexCombinations(TestSimpleCombinations): + tests = { + "FOO & (BAR | BAZ)&MOO": ["FOO", "BAR", "MOO"], + "(ALPHA|(BETA&THETA)|OMEGA)&DELTA": ["OMEGA", "DELTA"], + "((ALPHA|BETA)&FOO)|BAZ": ["BETA", "FOO"], + "(GPL-2.0|Proprietary)&BSD-4-clause&MIT": ["GPL-2.0", "BSD-4-clause", "MIT"], + } + preferred = ["BAR", "OMEGA", "BETA", "GPL-2.0"] diff --git a/meta/lib/oe/tests/test_path.py b/meta/lib/oe/tests/test_path.py new file mode 100644 index 0000000000..3d41ce157a --- /dev/null +++ b/meta/lib/oe/tests/test_path.py @@ -0,0 +1,89 @@ +import unittest +import oe, oe.path +import tempfile +import os +import errno +import shutil + +class TestRealPath(unittest.TestCase): + DIRS = [ "a", "b", "etc", "sbin", "usr", "usr/bin", "usr/binX", "usr/sbin", "usr/include", "usr/include/gdbm" ] + FILES = [ "etc/passwd", "b/file" ] + LINKS = [ + ( "bin", "/usr/bin", "/usr/bin" ), + ( "binX", "usr/binX", "/usr/binX" ), + ( "c", "broken", "/broken" ), + ( "etc/passwd-1", "passwd", "/etc/passwd" ), + ( "etc/passwd-2", "passwd-1", "/etc/passwd" ), + ( "etc/passwd-3", "/etc/passwd-1", "/etc/passwd" ), + ( "etc/shadow-1", "/etc/shadow", "/etc/shadow" ), + ( "etc/shadow-2", "/etc/shadow-1", "/etc/shadow" ), + ( "prog-A", "bin/prog-A", "/usr/bin/prog-A" ), + ( "prog-B", "/bin/prog-B", "/usr/bin/prog-B" ), + ( "usr/bin/prog-C", "../../sbin/prog-C", "/sbin/prog-C" ), + ( "usr/bin/prog-D", "/sbin/prog-D", "/sbin/prog-D" ), + ( "usr/binX/prog-E", "../sbin/prog-E", None ), + ( "usr/bin/prog-F", "../../../sbin/prog-F", "/sbin/prog-F" ), + ( "loop", "a/loop", None ), + ( "a/loop", "../loop", None ), + ( "b/test", "file/foo", "/b/file/foo" ), + ] + + LINKS_PHYS = [ + ( "./", "/", "" ), + ( "binX/prog-E", "/usr/sbin/prog-E", "/sbin/prog-E" ), + ] + + EXCEPTIONS = [ + ( "loop", errno.ELOOP ), + ( "b/test", errno.ENOENT ), + ] + + def __del__(self): + try: + #os.system("tree -F %s" % self.tmpdir) + shutil.rmtree(self.tmpdir) + except: + pass + + def setUp(self): + self.tmpdir = tempfile.mkdtemp(prefix = "oe-test_path") + self.root = os.path.join(self.tmpdir, "R") + + os.mkdir(os.path.join(self.tmpdir, "_real")) + os.symlink("_real", self.root) + + for d in self.DIRS: + os.mkdir(os.path.join(self.root, d)) + for f in self.FILES: + file(os.path.join(self.root, f), "w") + for l in self.LINKS: + os.symlink(l[1], os.path.join(self.root, l[0])) + + def __realpath(self, file, use_physdir, assume_dir = True): + return oe.path.realpath(os.path.join(self.root, file), self.root, + use_physdir, assume_dir = assume_dir) + + def test_norm(self): + for l in self.LINKS: + if l[2] == None: + continue + + target_p = self.__realpath(l[0], True) + target_l = self.__realpath(l[0], False) + + if l[2] != False: + self.assertEqual(target_p, target_l) + self.assertEqual(l[2], target_p[len(self.root):]) + + def test_phys(self): + for l in self.LINKS_PHYS: + target_p = self.__realpath(l[0], True) + target_l = self.__realpath(l[0], False) + + self.assertEqual(l[1], target_p[len(self.root):]) + self.assertEqual(l[2], target_l[len(self.root):]) + + def test_loop(self): + for e in self.EXCEPTIONS: + self.assertRaisesRegexp(OSError, r'\[Errno %u\]' % e[1], + self.__realpath, e[0], False, False) diff --git a/meta/lib/oe/tests/test_types.py b/meta/lib/oe/tests/test_types.py new file mode 100644 index 0000000000..367cc30e45 --- /dev/null +++ b/meta/lib/oe/tests/test_types.py @@ -0,0 +1,62 @@ +import unittest +from oe.maketype import create, factory + +class TestTypes(unittest.TestCase): + def assertIsInstance(self, obj, cls): + return self.assertTrue(isinstance(obj, cls)) + + def assertIsNot(self, obj, other): + return self.assertFalse(obj is other) + + def assertFactoryCreated(self, value, type, **flags): + cls = factory(type) + self.assertIsNot(cls, None) + self.assertIsInstance(create(value, type, **flags), cls) + +class TestBooleanType(TestTypes): + def test_invalid(self): + self.assertRaises(ValueError, create, '', 'boolean') + self.assertRaises(ValueError, create, 'foo', 'boolean') + self.assertRaises(TypeError, create, object(), 'boolean') + + def test_true(self): + self.assertTrue(create('y', 'boolean')) + self.assertTrue(create('yes', 'boolean')) + self.assertTrue(create('1', 'boolean')) + self.assertTrue(create('t', 'boolean')) + self.assertTrue(create('true', 'boolean')) + self.assertTrue(create('TRUE', 'boolean')) + self.assertTrue(create('truE', 'boolean')) + + def test_false(self): + self.assertFalse(create('n', 'boolean')) + self.assertFalse(create('no', 'boolean')) + self.assertFalse(create('0', 'boolean')) + self.assertFalse(create('f', 'boolean')) + self.assertFalse(create('false', 'boolean')) + self.assertFalse(create('FALSE', 'boolean')) + self.assertFalse(create('faLse', 'boolean')) + + def test_bool_equality(self): + self.assertEqual(create('n', 'boolean'), False) + self.assertNotEqual(create('n', 'boolean'), True) + self.assertEqual(create('y', 'boolean'), True) + self.assertNotEqual(create('y', 'boolean'), False) + +class TestList(TestTypes): + def assertListEqual(self, value, valid, sep=None): + obj = create(value, 'list', separator=sep) + self.assertEqual(obj, valid) + if sep is not None: + self.assertEqual(obj.separator, sep) + self.assertEqual(str(obj), obj.separator.join(obj)) + + def test_list_nosep(self): + testlist = ['alpha', 'beta', 'theta'] + self.assertListEqual('alpha beta theta', testlist) + self.assertListEqual('alpha beta\ttheta', testlist) + self.assertListEqual('alpha', ['alpha']) + + def test_list_usersep(self): + self.assertListEqual('foo:bar', ['foo', 'bar'], ':') + self.assertListEqual('foo:bar:baz', ['foo', 'bar', 'baz'], ':') diff --git a/meta/lib/oe/tests/test_utils.py b/meta/lib/oe/tests/test_utils.py new file mode 100644 index 0000000000..5d9ac52e7d --- /dev/null +++ b/meta/lib/oe/tests/test_utils.py @@ -0,0 +1,51 @@ +import unittest +from oe.utils import packages_filter_out_system + +class TestPackagesFilterOutSystem(unittest.TestCase): + def test_filter(self): + """ + Test that oe.utils.packages_filter_out_system works. + """ + try: + import bb + except ImportError: + self.skipTest("Cannot import bb") + + d = bb.data_smart.DataSmart() + d.setVar("PN", "foo") + + d.setVar("PACKAGES", "foo foo-doc foo-dev") + pkgs = packages_filter_out_system(d) + self.assertEqual(pkgs, []) + + d.setVar("PACKAGES", "foo foo-doc foo-data foo-dev") + pkgs = packages_filter_out_system(d) + self.assertEqual(pkgs, ["foo-data"]) + + d.setVar("PACKAGES", "foo foo-locale-en-gb") + pkgs = packages_filter_out_system(d) + self.assertEqual(pkgs, []) + + d.setVar("PACKAGES", "foo foo-data foo-locale-en-gb") + pkgs = packages_filter_out_system(d) + self.assertEqual(pkgs, ["foo-data"]) + + +class TestTrimVersion(unittest.TestCase): + def test_version_exception(self): + with self.assertRaises(TypeError): + trim_version(None, 2) + with self.assertRaises(TypeError): + trim_version((1, 2, 3), 2) + + def test_num_exception(self): + with self.assertRaises(ValueError): + trim_version("1.2.3", 0) + with self.assertRaises(ValueError): + trim_version("1.2.3", -1) + + def test_valid(self): + self.assertEqual(trim_version("1.2.3", 1), "1") + self.assertEqual(trim_version("1.2.3", 2), "1.2") + self.assertEqual(trim_version("1.2.3", 3), "1.2.3") + self.assertEqual(trim_version("1.2.3", 4), "1.2.3") diff --git a/meta/lib/oe/types.py b/meta/lib/oe/types.py new file mode 100644 index 0000000000..7f47c17d0e --- /dev/null +++ b/meta/lib/oe/types.py @@ -0,0 +1,153 @@ +import errno +import re +import os + + +class OEList(list): + """OpenEmbedded 'list' type + + Acts as an ordinary list, but is constructed from a string value and a + separator (optional), and re-joins itself when converted to a string with + str(). Set the variable type flag to 'list' to use this type, and the + 'separator' flag may be specified (defaulting to whitespace).""" + + name = "list" + + def __init__(self, value, separator = None): + if value is not None: + list.__init__(self, value.split(separator)) + else: + list.__init__(self) + + if separator is None: + self.separator = " " + else: + self.separator = separator + + def __str__(self): + return self.separator.join(self) + +def choice(value, choices): + """OpenEmbedded 'choice' type + + Acts as a multiple choice for the user. To use this, set the variable + type flag to 'choice', and set the 'choices' flag to a space separated + list of valid values.""" + if not isinstance(value, basestring): + raise TypeError("choice accepts a string, not '%s'" % type(value)) + + value = value.lower() + choices = choices.lower() + if value not in choices.split(): + raise ValueError("Invalid choice '%s'. Valid choices: %s" % + (value, choices)) + return value + +class NoMatch(object): + """Stub python regex pattern object which never matches anything""" + def findall(self, string, flags=0): + return None + + def finditer(self, string, flags=0): + return None + + def match(self, flags=0): + return None + + def search(self, string, flags=0): + return None + + def split(self, string, maxsplit=0): + return None + + def sub(pattern, repl, string, count=0): + return None + + def subn(pattern, repl, string, count=0): + return None + +NoMatch = NoMatch() + +def regex(value, regexflags=None): + """OpenEmbedded 'regex' type + + Acts as a regular expression, returning the pre-compiled regular + expression pattern object. To use this type, set the variable type flag + to 'regex', and optionally, set the 'regexflags' type to a space separated + list of the flags to control the regular expression matching (e.g. + FOO[regexflags] += 'ignorecase'). See the python documentation on the + 're' module for a list of valid flags.""" + + flagval = 0 + if regexflags: + for flag in regexflags.split(): + flag = flag.upper() + try: + flagval |= getattr(re, flag) + except AttributeError: + raise ValueError("Invalid regex flag '%s'" % flag) + + if not value: + # Let's ensure that the default behavior for an undefined or empty + # variable is to match nothing. If the user explicitly wants to match + # anything, they can match '.*' instead. + return NoMatch + + try: + return re.compile(value, flagval) + except re.error as exc: + raise ValueError("Invalid regex value '%s': %s" % + (value, exc.args[0])) + +def boolean(value): + """OpenEmbedded 'boolean' type + + Valid values for true: 'yes', 'y', 'true', 't', '1' + Valid values for false: 'no', 'n', 'false', 'f', '0' + """ + + if not isinstance(value, basestring): + raise TypeError("boolean accepts a string, not '%s'" % type(value)) + + value = value.lower() + if value in ('yes', 'y', 'true', 't', '1'): + return True + elif value in ('no', 'n', 'false', 'f', '0'): + return False + raise ValueError("Invalid boolean value '%s'" % value) + +def integer(value, numberbase=10): + """OpenEmbedded 'integer' type + + Defaults to base 10, but this can be specified using the optional + 'numberbase' flag.""" + + return int(value, int(numberbase)) + +_float = float +def float(value, fromhex='false'): + """OpenEmbedded floating point type + + To use this type, set the type flag to 'float', and optionally set the + 'fromhex' flag to a true value (obeying the same rules as for the + 'boolean' type) if the value is in base 16 rather than base 10.""" + + if boolean(fromhex): + return _float.fromhex(value) + else: + return _float(value) + +def path(value, relativeto='', normalize='true', mustexist='false'): + value = os.path.join(relativeto, value) + + if boolean(normalize): + value = os.path.normpath(value) + + if boolean(mustexist): + try: + open(value, 'r') + except IOError as exc: + if exc.errno == errno.ENOENT: + raise ValueError("{0}: {1}".format(value, os.strerror(errno.ENOENT))) + + return value diff --git a/meta/lib/oe/utils.py b/meta/lib/oe/utils.py new file mode 100644 index 0000000000..35442568e2 --- /dev/null +++ b/meta/lib/oe/utils.py @@ -0,0 +1,182 @@ +try: + # Python 2 + import commands as cmdstatus +except ImportError: + # Python 3 + import subprocess as cmdstatus + +def read_file(filename): + try: + f = open( filename, "r" ) + except IOError as reason: + return "" # WARNING: can't raise an error now because of the new RDEPENDS handling. This is a bit ugly. :M: + else: + data = f.read().strip() + f.close() + return data + return None + +def ifelse(condition, iftrue = True, iffalse = False): + if condition: + return iftrue + else: + return iffalse + +def conditional(variable, checkvalue, truevalue, falsevalue, d): + if d.getVar(variable,1) == checkvalue: + return truevalue + else: + return falsevalue + +def less_or_equal(variable, checkvalue, truevalue, falsevalue, d): + if float(d.getVar(variable,1)) <= float(checkvalue): + return truevalue + else: + return falsevalue + +def version_less_or_equal(variable, checkvalue, truevalue, falsevalue, d): + result = bb.utils.vercmp_string(d.getVar(variable,True), checkvalue) + if result <= 0: + return truevalue + else: + return falsevalue + +def both_contain(variable1, variable2, checkvalue, d): + if d.getVar(variable1,1).find(checkvalue) != -1 and d.getVar(variable2,1).find(checkvalue) != -1: + return checkvalue + else: + return "" + +def prune_suffix(var, suffixes, d): + # See if var ends with any of the suffixes listed and + # remove it if found + for suffix in suffixes: + if var.endswith(suffix): + var = var.replace(suffix, "") + + prefix = d.getVar("MLPREFIX", True) + if prefix and var.startswith(prefix): + var = var.replace(prefix, "") + + return var + +def str_filter(f, str, d): + from re import match + return " ".join(filter(lambda x: match(f, x, 0), str.split())) + +def str_filter_out(f, str, d): + from re import match + return " ".join(filter(lambda x: not match(f, x, 0), str.split())) + +def param_bool(cfg, field, dflt = None): + """Lookup in map and convert it to a boolean; take + when this does not exist""" + value = cfg.get(field, dflt) + strvalue = str(value).lower() + if strvalue in ('yes', 'y', 'true', 't', '1'): + return True + elif strvalue in ('no', 'n', 'false', 'f', '0'): + return False + raise ValueError("invalid value for boolean parameter '%s': '%s'" % (field, value)) + +def inherits(d, *classes): + """Return True if the metadata inherits any of the specified classes""" + return any(bb.data.inherits_class(cls, d) for cls in classes) + +def features_backfill(var,d): + # This construct allows the addition of new features to variable specified + # as var + # Example for var = "DISTRO_FEATURES" + # This construct allows the addition of new features to DISTRO_FEATURES + # that if not present would disable existing functionality, without + # disturbing distributions that have already set DISTRO_FEATURES. + # Distributions wanting to elide a value in DISTRO_FEATURES_BACKFILL should + # add the feature to DISTRO_FEATURES_BACKFILL_CONSIDERED + features = (d.getVar(var, True) or "").split() + backfill = (d.getVar(var+"_BACKFILL", True) or "").split() + considered = (d.getVar(var+"_BACKFILL_CONSIDERED", True) or "").split() + + addfeatures = [] + for feature in backfill: + if feature not in features and feature not in considered: + addfeatures.append(feature) + + if addfeatures: + d.appendVar(var, " " + " ".join(addfeatures)) + + +def packages_filter_out_system(d): + """ + Return a list of packages from PACKAGES with the "system" packages such as + PN-dbg PN-doc PN-locale-eb-gb removed. + """ + pn = d.getVar('PN', True) + blacklist = map(lambda suffix: pn + suffix, ('', '-dbg', '-dev', '-doc', '-locale', '-staticdev')) + localepkg = pn + "-locale-" + pkgs = [] + + for pkg in d.getVar('PACKAGES', True).split(): + if pkg not in blacklist and localepkg not in pkg: + pkgs.append(pkg) + return pkgs + +def getstatusoutput(cmd): + return cmdstatus.getstatusoutput(cmd) + + +def trim_version(version, num_parts=2): + """ + Return just the first of , split by periods. For + example, trim_version("1.2.3", 2) will return "1.2". + """ + if type(version) is not str: + raise TypeError("Version should be a string") + if num_parts < 1: + raise ValueError("Cannot split to parts < 1") + + parts = version.split(".") + trimmed = ".".join(parts[:num_parts]) + return trimmed + +def cpu_count(): + import multiprocessing + return multiprocessing.cpu_count() + +def execute_pre_post_process(d, cmds): + if cmds is None: + return + + for cmd in cmds.strip().split(';'): + cmd = cmd.strip() + if cmd != '': + bb.note("Executing %s ..." % cmd) + bb.build.exec_func(cmd, d) + +def multiprocess_exec(commands, function): + import signal + import multiprocessing + + if not commands: + return [] + + def init_worker(): + signal.signal(signal.SIGINT, signal.SIG_IGN) + + nproc = min(multiprocessing.cpu_count(), len(commands)) + pool = bb.utils.multiprocessingpool(nproc, init_worker) + imap = pool.imap(function, commands) + + try: + res = list(imap) + pool.close() + pool.join() + results = [] + for result in res: + if result is not None: + results.append(result) + return results + + except KeyboardInterrupt: + pool.terminate() + pool.join() + raise diff --git a/meta/lib/oeqa/__init__.py b/meta/lib/oeqa/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/meta/lib/oeqa/controllers/__init__.py b/meta/lib/oeqa/controllers/__init__.py new file mode 100644 index 0000000000..8eda92763c --- /dev/null +++ b/meta/lib/oeqa/controllers/__init__.py @@ -0,0 +1,3 @@ +# Enable other layers to have modules in the same named directory +from pkgutil import extend_path +__path__ = extend_path(__path__, __name__) diff --git a/meta/lib/oeqa/controllers/masterimage.py b/meta/lib/oeqa/controllers/masterimage.py new file mode 100644 index 0000000000..311f0cf68c --- /dev/null +++ b/meta/lib/oeqa/controllers/masterimage.py @@ -0,0 +1,201 @@ +# Copyright (C) 2014 Intel Corporation +# +# Released under the MIT license (see COPYING.MIT) + +# This module adds support to testimage.bbclass to deploy images and run +# tests using a "master image" - this is a "known good" image that is +# installed onto the device as part of initial setup and will be booted into +# with no interaction; we can then use it to deploy the image to be tested +# to a second partition before running the tests. +# +# For an example master image, see core-image-testmaster +# (meta/recipes-extended/images/core-image-testmaster.bb) + +import os +import bb +import traceback +import time +import subprocess + +import oeqa.targetcontrol +import oeqa.utils.sshcontrol as sshcontrol +import oeqa.utils.commands as commands +from oeqa.utils import CommandError + +from abc import ABCMeta, abstractmethod + +class MasterImageHardwareTarget(oeqa.targetcontrol.BaseTarget): + + __metaclass__ = ABCMeta + + supported_image_fstypes = ['tar.gz', 'tar.bz2'] + + def __init__(self, d): + super(MasterImageHardwareTarget, self).__init__(d) + + # target ip + addr = d.getVar("TEST_TARGET_IP", True) or bb.fatal('Please set TEST_TARGET_IP with the IP address of the machine you want to run the tests on.') + self.ip = addr.split(":")[0] + try: + self.port = addr.split(":")[1] + except IndexError: + self.port = None + bb.note("Target IP: %s" % self.ip) + self.server_ip = d.getVar("TEST_SERVER_IP", True) + if not self.server_ip: + try: + self.server_ip = subprocess.check_output(['ip', 'route', 'get', self.ip ]).split("\n")[0].split()[-1] + except Exception as e: + bb.fatal("Failed to determine the host IP address (alternatively you can set TEST_SERVER_IP with the IP address of this machine): %s" % e) + bb.note("Server IP: %s" % self.server_ip) + + # test rootfs + kernel + self.image_fstype = self.get_image_fstype(d) + self.rootfs = os.path.join(d.getVar("DEPLOY_DIR_IMAGE", True), d.getVar("IMAGE_LINK_NAME", True) + '.' + self.image_fstype) + self.kernel = os.path.join(d.getVar("DEPLOY_DIR_IMAGE", True), d.getVar("KERNEL_IMAGETYPE") + '-' + d.getVar('MACHINE') + '.bin') + if not os.path.isfile(self.rootfs): + # we could've checked that IMAGE_FSTYPES contains tar.gz but the config for running testimage might not be + # the same as the config with which the image was build, ie + # you bitbake core-image-sato with IMAGE_FSTYPES += "tar.gz" + # and your autobuilder overwrites the config, adds the test bits and runs bitbake core-image-sato -c testimage + bb.fatal("No rootfs found. Did you build the image ?\nIf yes, did you build it with IMAGE_FSTYPES += \"tar.gz\" ? \ + \nExpected path: %s" % self.rootfs) + if not os.path.isfile(self.kernel): + bb.fatal("No kernel found. Expected path: %s" % self.kernel) + + # master ssh connection + self.master = None + # if the user knows what they are doing, then by all means... + self.user_cmds = d.getVar("TEST_DEPLOY_CMDS", True) + self.deploy_cmds = None + + # this is the name of the command that controls the power for a board + # e.g: TEST_POWERCONTROL_CMD = "/home/user/myscripts/powercontrol.py ${MACHINE} what-ever-other-args-the-script-wants" + # the command should take as the last argument "off" and "on" and "cycle" (off, on) + self.powercontrol_cmd = d.getVar("TEST_POWERCONTROL_CMD", True) or None + self.powercontrol_args = d.getVar("TEST_POWERCONTROL_EXTRA_ARGS") or "" + + self.serialcontrol_cmd = d.getVar("TEST_SERIALCONTROL_CMD", True) or None + self.serialcontrol_args = d.getVar("TEST_SERIALCONTROL_EXTRA_ARGS") or "" + + self.origenv = os.environ + if self.powercontrol_cmd or self.serialcontrol_cmd: + # the external script for controlling power might use ssh + # ssh + keys means we need the original user env + bborigenv = d.getVar("BB_ORIGENV", False) or {} + for key in bborigenv: + val = bborigenv.getVar(key, True) + if val is not None: + self.origenv[key] = str(val) + + if self.powercontrol_cmd: + if self.powercontrol_args: + self.powercontrol_cmd = "%s %s" % (self.powercontrol_cmd, self.powercontrol_args) + if self.serialcontrol_cmd: + if self.serialcontrol_args: + self.serialcontrol_cmd = "%s %s" % (self.serialcontrol_cmd, self.serialcontrol_args) + + def power_ctl(self, msg): + if self.powercontrol_cmd: + cmd = "%s %s" % (self.powercontrol_cmd, msg) + try: + commands.runCmd(cmd, assert_error=False, preexec_fn=os.setsid, env=self.origenv) + except CommandError as e: + bb.fatal(str(e)) + + def power_cycle(self, conn): + if self.powercontrol_cmd: + # be nice, don't just cut power + conn.run("shutdown -h now") + time.sleep(10) + self.power_ctl("cycle") + else: + status, output = conn.run("reboot") + if status != 0: + bb.error("Failed rebooting target and no power control command defined. You need to manually reset the device.\n%s" % output) + + def _wait_until_booted(self): + ''' Waits until the target device has booted (if we have just power cycled it) ''' + # Subclasses with better methods of determining boot can override this + time.sleep(120) + + def deploy(self): + # base class just sets the ssh log file for us + super(MasterImageHardwareTarget, self).deploy() + self.master = sshcontrol.SSHControl(ip=self.ip, logfile=self.sshlog, timeout=600, port=self.port) + status, output = self.master.run("cat /etc/masterimage") + if status != 0: + # We're not booted into the master image, so try rebooting + bb.plain("%s - booting into the master image" % self.pn) + self.power_ctl("cycle") + self._wait_until_booted() + + bb.plain("%s - deploying image on target" % self.pn) + status, output = self.master.run("cat /etc/masterimage") + if status != 0: + bb.fatal("No ssh connectivity or target isn't running a master image.\n%s" % output) + if self.user_cmds: + self.deploy_cmds = self.user_cmds.split("\n") + try: + self._deploy() + except Exception as e: + bb.fatal("Failed deploying test image: %s" % e) + + @abstractmethod + def _deploy(self): + pass + + def start(self, params=None): + bb.plain("%s - boot test image on target" % self.pn) + self._start() + # set the ssh object for the target/test image + self.connection = sshcontrol.SSHControl(self.ip, logfile=self.sshlog, port=self.port) + bb.plain("%s - start running tests" % self.pn) + + @abstractmethod + def _start(self): + pass + + def stop(self): + bb.plain("%s - reboot/powercycle target" % self.pn) + self.power_cycle(self.connection) + + +class GummibootTarget(MasterImageHardwareTarget): + + def __init__(self, d): + super(GummibootTarget, self).__init__(d) + # this the value we need to set in the LoaderEntryOneShot EFI variable + # so the system boots the 'test' bootloader label and not the default + # The first four bytes are EFI bits, and the rest is an utf-16le string + # (EFI vars values need to be utf-16) + # $ echo -en "test\0" | iconv -f ascii -t utf-16le | hexdump -C + # 00000000 74 00 65 00 73 00 74 00 00 00 |t.e.s.t...| + self.efivarvalue = r'\x07\x00\x00\x00\x74\x00\x65\x00\x73\x00\x74\x00\x00\x00' + self.deploy_cmds = [ + 'mount -L boot /boot', + 'mkdir -p /mnt/testrootfs', + 'mount -L testrootfs /mnt/testrootfs', + 'modprobe efivarfs', + 'mount -t efivarfs efivarfs /sys/firmware/efi/efivars', + 'cp ~/test-kernel /boot', + 'rm -rf /mnt/testrootfs/*', + 'tar xvf ~/test-rootfs.%s -C /mnt/testrootfs' % self.image_fstype, + 'printf "%s" > /sys/firmware/efi/efivars/LoaderEntryOneShot-4a67b082-0a4c-41cf-b6c7-440b29bb8c4f' % self.efivarvalue + ] + + def _deploy(self): + # make sure these aren't mounted + self.master.run("umount /boot; umount /mnt/testrootfs; umount /sys/firmware/efi/efivars;") + # from now on, every deploy cmd should return 0 + # else an exception will be thrown by sshcontrol + self.master.ignore_status = False + self.master.copy_to(self.rootfs, "~/test-rootfs." + self.image_fstype) + self.master.copy_to(self.kernel, "~/test-kernel") + for cmd in self.deploy_cmds: + self.master.run(cmd) + + def _start(self, params=None): + self.power_cycle(self.master) + # there are better ways than a timeout but this should work for now + time.sleep(120) diff --git a/meta/lib/oeqa/controllers/testtargetloader.py b/meta/lib/oeqa/controllers/testtargetloader.py new file mode 100644 index 0000000000..a1b7b1d92b --- /dev/null +++ b/meta/lib/oeqa/controllers/testtargetloader.py @@ -0,0 +1,70 @@ +import types +import bb +import os + +# This class is responsible for loading a test target controller +class TestTargetLoader: + + # Search oeqa.controllers module directory for and return a controller + # corresponding to the given target name. + # AttributeError raised if not found. + # ImportError raised if a provided module can not be imported. + def get_controller_module(self, target, bbpath): + controllerslist = self.get_controller_modulenames(bbpath) + bb.note("Available controller modules: %s" % str(controllerslist)) + controller = self.load_controller_from_name(target, controllerslist) + return controller + + # Return a list of all python modules in lib/oeqa/controllers for each + # layer in bbpath + def get_controller_modulenames(self, bbpath): + + controllerslist = [] + + def add_controller_list(path): + if not os.path.exists(os.path.join(path, '__init__.py')): + bb.fatal('Controllers directory %s exists but is missing __init__.py' % path) + files = sorted([f for f in os.listdir(path) if f.endswith('.py') and not f.startswith('_')]) + for f in files: + module = 'oeqa.controllers.' + f[:-3] + if module not in controllerslist: + controllerslist.append(module) + else: + bb.warn("Duplicate controller module found for %s, only one added. Layers should create unique controller module names" % module) + + for p in bbpath: + controllerpath = os.path.join(p, 'lib', 'oeqa', 'controllers') + bb.debug(2, 'Searching for target controllers in %s' % controllerpath) + if os.path.exists(controllerpath): + add_controller_list(controllerpath) + return controllerslist + + # Search for and return a controller from given target name and + # set of module names. + # Raise AttributeError if not found. + # Raise ImportError if a provided module can not be imported + def load_controller_from_name(self, target, modulenames): + for name in modulenames: + obj = self.load_controller_from_module(target, name) + if obj: + return obj + raise AttributeError("Unable to load {0} from available modules: {1}".format(target, str(modulenames))) + + # Search for and return a controller or None from given module name + def load_controller_from_module(self, target, modulename): + obj = None + # import module, allowing it to raise import exception + module = __import__(modulename, globals(), locals(), [target]) + # look for target class in the module, catching any exceptions as it + # is valid that a module may not have the target class. + try: + obj = getattr(module, target) + if obj: + from oeqa.targetcontrol import BaseTarget + if (not isinstance(obj, (type, types.ClassType))): + bb.warn("Target {0} found, but not of type Class".format(target)) + if( not issubclass(obj, BaseTarget)): + bb.warn("Target {0} found, but subclass is not BaseTarget".format(target)) + except: + obj = None + return obj diff --git a/meta/lib/oeqa/oetest.py b/meta/lib/oeqa/oetest.py new file mode 100644 index 0000000000..0b7e7dc42d --- /dev/null +++ b/meta/lib/oeqa/oetest.py @@ -0,0 +1,106 @@ +# Copyright (C) 2013 Intel Corporation +# +# Released under the MIT license (see COPYING.MIT) + +# Main unittest module used by testimage.bbclass +# This provides the oeRuntimeTest base class which is inherited by all tests in meta/lib/oeqa/runtime. + +# It also has some helper functions and it's responsible for actually starting the tests + +import os, re, mmap +import unittest +import inspect +import subprocess +from oeqa.utils.decorators import LogResults + +def loadTests(tc, type="runtime"): + if type == "runtime": + # set the context object passed from the test class + setattr(oeTest, "tc", tc) + # set ps command to use + setattr(oeRuntimeTest, "pscmd", "ps -ef" if oeTest.hasPackage("procps") else "ps") + # prepare test suite, loader and runner + suite = unittest.TestSuite() + elif type == "sdk": + # set the context object passed from the test class + setattr(oeTest, "tc", tc) + testloader = unittest.TestLoader() + testloader.sortTestMethodsUsing = None + suite = testloader.loadTestsFromNames(tc.testslist) + + return suite + +def runTests(tc, type="runtime"): + + suite = loadTests(tc, type) + print("Test modules %s" % tc.testslist) + print("Found %s tests" % suite.countTestCases()) + runner = unittest.TextTestRunner(verbosity=2) + result = runner.run(suite) + + return result + +@LogResults +class oeTest(unittest.TestCase): + + longMessage = True + + @classmethod + def hasPackage(self, pkg): + + if re.search(pkg, oeTest.tc.pkgmanifest): + return True + return False + + @classmethod + def hasFeature(self,feature): + + if feature in oeTest.tc.imagefeatures or \ + feature in oeTest.tc.distrofeatures: + return True + else: + return False + +class oeRuntimeTest(oeTest): + def __init__(self, methodName='runTest'): + self.target = oeRuntimeTest.tc.target + super(oeRuntimeTest, self).__init__(methodName) + + #TODO: use package_manager.py to install packages on any type of image + def install_packages(self, packagelist): + for package in packagelist: + (status, result) = self.target.run("smart install -y "+package) + if status != 0: + return status + +class oeSDKTest(oeTest): + def __init__(self, methodName='runTest'): + self.sdktestdir = oeSDKTest.tc.sdktestdir + super(oeSDKTest, self).__init__(methodName) + +def getmodule(pos=2): + # stack returns a list of tuples containg frame information + # First element of the list the is current frame, caller is 1 + frameinfo = inspect.stack()[pos] + modname = inspect.getmodulename(frameinfo[1]) + #modname = inspect.getmodule(frameinfo[0]).__name__ + return modname + +def skipModule(reason, pos=2): + modname = getmodule(pos) + if modname not in oeTest.tc.testsrequired: + raise unittest.SkipTest("%s: %s" % (modname, reason)) + else: + raise Exception("\nTest %s wants to be skipped.\nReason is: %s" \ + "\nTest was required in TEST_SUITES, so either the condition for skipping is wrong" \ + "\nor the image really doesn't have the required feature/package when it should." % (modname, reason)) + +def skipModuleIf(cond, reason): + + if cond: + skipModule(reason, 3) + +def skipModuleUnless(cond, reason): + + if not cond: + skipModule(reason, 3) diff --git a/meta/lib/oeqa/runexported.py b/meta/lib/oeqa/runexported.py new file mode 100755 index 0000000000..e1b6642ec2 --- /dev/null +++ b/meta/lib/oeqa/runexported.py @@ -0,0 +1,140 @@ +#!/usr/bin/env python + + +# Copyright (C) 2013 Intel Corporation +# +# Released under the MIT license (see COPYING.MIT) + +# This script should be used outside of the build system to run image tests. +# It needs a json file as input as exported by the build. +# E.g for an already built image: +#- export the tests: +# TEST_EXPORT_ONLY = "1" +# TEST_TARGET = "simpleremote" +# TEST_TARGET_IP = "192.168.7.2" +# TEST_SERVER_IP = "192.168.7.1" +# bitbake core-image-sato -c testimage +# Setup your target, e.g for qemu: runqemu core-image-sato +# cd build/tmp/testimage/core-image-sato +# ./runexported.py testdata.json + +import sys +import os +import time +from optparse import OptionParser + +try: + import simplejson as json +except ImportError: + import json + +sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "oeqa"))) + +from oeqa.oetest import runTests +from oeqa.utils.sshcontrol import SSHControl + +# this isn't pretty but we need a fake target object +# for running the tests externally as we don't care +# about deploy/start we only care about the connection methods (run, copy) +class FakeTarget(object): + def __init__(self, d): + self.connection = None + self.ip = None + self.server_ip = None + self.datetime = time.strftime('%Y%m%d%H%M%S',time.gmtime()) + self.testdir = d.getVar("TEST_LOG_DIR", True) + self.pn = d.getVar("PN", True) + + def exportStart(self): + self.sshlog = os.path.join(self.testdir, "ssh_target_log.%s" % self.datetime) + sshloglink = os.path.join(self.testdir, "ssh_target_log") + if os.path.islink(sshloglink): + os.unlink(sshloglink) + os.symlink(self.sshlog, sshloglink) + print("SSH log file: %s" % self.sshlog) + self.connection = SSHControl(self.ip, logfile=self.sshlog) + + def run(self, cmd, timeout=None): + return self.connection.run(cmd, timeout) + + def copy_to(self, localpath, remotepath): + return self.connection.copy_to(localpath, remotepath) + + def copy_from(self, remotepath, localpath): + return self.connection.copy_from(remotepath, localpath) + + +class MyDataDict(dict): + def getVar(self, key, unused = None): + return self.get(key, "") + +class TestContext(object): + def __init__(self): + self.d = None + self.target = None + +def main(): + + usage = "usage: %prog [options] " + parser = OptionParser(usage=usage) + parser.add_option("-t", "--target-ip", dest="ip", help="The IP address of the target machine. Use this to \ + overwrite the value determined from TEST_TARGET_IP at build time") + parser.add_option("-s", "--server-ip", dest="server_ip", help="The IP address of this machine. Use this to \ + overwrite the value determined from TEST_SERVER_IP at build time.") + parser.add_option("-d", "--deploy-dir", dest="deploy_dir", help="Full path to the package feeds, that this \ + the contents of what used to be DEPLOY_DIR on the build machine. If not specified it will use the value \ + specified in the json if that directory actually exists or it will error out.") + parser.add_option("-l", "--log-dir", dest="log_dir", help="This sets the path for TEST_LOG_DIR. If not specified \ + the current dir is used. This is used for usually creating a ssh log file and a scp test file.") + + (options, args) = parser.parse_args() + if len(args) != 1: + parser.error("Incorrect number of arguments. The one and only argument should be a json file exported by the build system") + + with open(args[0], "r") as f: + loaded = json.load(f) + + if options.ip: + loaded["target"]["ip"] = options.ip + if options.server_ip: + loaded["target"]["server_ip"] = options.server_ip + + d = MyDataDict() + for key in loaded["d"].keys(): + d[key] = loaded["d"][key] + + if options.log_dir: + d["TEST_LOG_DIR"] = options.log_dir + else: + d["TEST_LOG_DIR"] = os.path.abspath(os.path.dirname(__file__)) + if options.deploy_dir: + d["DEPLOY_DIR"] = options.deploy_dir + else: + if not os.path.isdir(d["DEPLOY_DIR"]): + raise Exception("The path to DEPLOY_DIR does not exists: %s" % d["DEPLOY_DIR"]) + + + target = FakeTarget(d) + for key in loaded["target"].keys(): + setattr(target, key, loaded["target"][key]) + + tc = TestContext() + setattr(tc, "d", d) + setattr(tc, "target", target) + for key in loaded.keys(): + if key != "d" and key != "target": + setattr(tc, key, loaded[key]) + + target.exportStart() + runTests(tc) + + return 0 + +if __name__ == "__main__": + try: + ret = main() + except Exception: + ret = 1 + import traceback + traceback.print_exc(5) + sys.exit(ret) diff --git a/meta/lib/oeqa/runtime/__init__.py b/meta/lib/oeqa/runtime/__init__.py new file mode 100644 index 0000000000..4cf3fa76b6 --- /dev/null +++ b/meta/lib/oeqa/runtime/__init__.py @@ -0,0 +1,3 @@ +# Enable other layers to have tests in the same named directory +from pkgutil import extend_path +__path__ = extend_path(__path__, __name__) diff --git a/meta/lib/oeqa/runtime/_ptest.py b/meta/lib/oeqa/runtime/_ptest.py new file mode 100644 index 0000000000..4c58dc1d7f --- /dev/null +++ b/meta/lib/oeqa/runtime/_ptest.py @@ -0,0 +1,124 @@ +import unittest, os, shutil +from oeqa.oetest import oeRuntimeTest, skipModule +from oeqa.utils.decorators import * +from oeqa.utils.logparser import * +from oeqa.utils.httpserver import HTTPService +import bb +import glob +from oe.package_manager import RpmPkgsList +import subprocess + +def setUpModule(): + if not oeRuntimeTest.hasFeature("package-management"): + skipModule("Image doesn't have package management feature") + if not oeRuntimeTest.hasPackage("smart"): + skipModule("Image doesn't have smart installed") + if "package_rpm" != oeRuntimeTest.tc.d.getVar("PACKAGE_CLASSES", True).split()[0]: + skipModule("Rpm is not the primary package manager") + +class PtestRunnerTest(oeRuntimeTest): + + # a ptest log parser + def parse_ptest(self, logfile): + parser = Lparser(test_0_pass_regex="^PASS:(.+)", test_0_fail_regex="^FAIL:(.+)", section_0_begin_regex="^BEGIN: .*/(.+)/ptest", section_0_end_regex="^END: .*/(.+)/ptest") + parser.init() + result = Result() + + with open(logfile) as f: + for line in f: + result_tuple = parser.parse_line(line) + if not result_tuple: + continue + result_tuple = line_type, category, status, name = parser.parse_line(line) + + if line_type == 'section' and status == 'begin': + current_section = name + continue + + if line_type == 'section' and status == 'end': + current_section = None + continue + + if line_type == 'test' and status == 'pass': + result.store(current_section, name, status) + continue + + if line_type == 'test' and status == 'fail': + result.store(current_section, name, status) + continue + + result.sort_tests() + return result + + @classmethod + def setUpClass(self): + #note the existing channels that are on the board before creating new ones +# self.existingchannels = set() +# (status, result) = oeRuntimeTest.tc.target.run('smart channel --show | grep "\["', 0) +# for x in result.split("\n"): +# self.existingchannels.add(x) + self.repo_server = HTTPService(oeRuntimeTest.tc.d.getVar('DEPLOY_DIR', True), oeRuntimeTest.tc.target.server_ip) + self.repo_server.start() + + @classmethod + def tearDownClass(self): + self.repo_server.stop() + #remove created channels to be able to repeat the tests on same image +# (status, result) = oeRuntimeTest.tc.target.run('smart channel --show | grep "\["', 0) +# for x in result.split("\n"): +# if x not in self.existingchannels: +# oeRuntimeTest.tc.target.run('smart channel --remove '+x[1:-1]+' -y', 0) + + def add_smart_channel(self): + image_pkgtype = self.tc.d.getVar('IMAGE_PKGTYPE', True) + deploy_url = 'http://%s:%s/%s' %(self.target.server_ip, self.repo_server.port, image_pkgtype) + pkgarchs = self.tc.d.getVar('PACKAGE_ARCHS', True).replace("-","_").split() + for arch in os.listdir('%s/%s' % (self.repo_server.root_dir, image_pkgtype)): + if arch in pkgarchs: + self.target.run('smart channel -y --add {a} type=rpm-md baseurl={u}/{a}'.format(a=arch, u=deploy_url), 0) + self.target.run('smart update', 0) + + def install_complementary(self, globs=None): + installed_pkgs_file = os.path.join(oeRuntimeTest.tc.d.getVar('WORKDIR', True), + "installed_pkgs.txt") + self.pkgs_list = RpmPkgsList(oeRuntimeTest.tc.d, oeRuntimeTest.tc.d.getVar('IMAGE_ROOTFS', True), oeRuntimeTest.tc.d.getVar('arch_var', True), oeRuntimeTest.tc.d.getVar('os_var', True)) + with open(installed_pkgs_file, "w+") as installed_pkgs: + installed_pkgs.write(self.pkgs_list.list("arch")) + + cmd = [bb.utils.which(os.getenv('PATH'), "oe-pkgdata-util"), + "glob", oeRuntimeTest.tc.d.getVar('PKGDATA_DIR', True), installed_pkgs_file, + globs] + try: + bb.note("Installing complementary packages ...") + complementary_pkgs = subprocess.check_output(cmd, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + bb.fatal("Could not compute complementary packages list. Command " + "'%s' returned %d:\n%s" % + (' '.join(cmd), e.returncode, e.output)) + + return complementary_pkgs.split() + + def setUp(self): + self.buildhist_dir = oeRuntimeTest.tc.d.getVar("BUILDHISTORY_DIR_IMAGE", True) + self.assertTrue(os.path.exists(self.buildhist_dir)) + self.ptest_log = os.path.join(oeRuntimeTest.tc.d.getVar("TEST_LOG_DIR",True), "ptest-%s.log" % oeRuntimeTest.tc.d.getVar('DATETIME', True)) + + @skipUnlessPassed('test_ssh') + def test_ptestrunner(self): + self.add_smart_channel() + cond = oeRuntimeTest.hasPackage("ptest-runner") and oeRuntimeTest.hasFeature("ptest") and oeRuntimeTest.hasPackage("-ptest") + if not cond: + self.install_packages(self.install_complementary("*-ptest")) + self.install_packages(['ptest-runner']) + + self.target.run('/usr/bin/ptest-runner > /tmp/ptest.log 2>&1', 0) + self.target.copy_from('/tmp/ptest.log', self.ptest_log) + shutil.copyfile(self.ptest_log, os.path.join(self.buildhist_dir, "ptest.log")) + + result = self.parse_ptest(os.path.join(self.buildhist_dir, "ptest.log")) + log_results_to_location = "./results" + if os.path.exists(log_results_to_location): + shutil.rmtree(log_results_to_location) + os.makedirs(log_results_to_location) + + result.log_as_files(log_results_to_location, test_status = ['fail']) diff --git a/meta/lib/oeqa/runtime/buildcvs.py b/meta/lib/oeqa/runtime/buildcvs.py new file mode 100644 index 0000000000..fe6cbfbcd5 --- /dev/null +++ b/meta/lib/oeqa/runtime/buildcvs.py @@ -0,0 +1,31 @@ +from oeqa.oetest import oeRuntimeTest, skipModule +from oeqa.utils.decorators import * +from oeqa.utils.targetbuild import TargetBuildProject + +def setUpModule(): + if not oeRuntimeTest.hasFeature("tools-sdk"): + skipModule("Image doesn't have tools-sdk in IMAGE_FEATURES") + +class BuildCvsTest(oeRuntimeTest): + + @classmethod + def setUpClass(self): + self.project = TargetBuildProject(oeRuntimeTest.tc.target, oeRuntimeTest.tc.d, + "http://ftp.gnu.org/non-gnu/cvs/source/feature/1.12.13/cvs-1.12.13.tar.bz2") + self.project.download_archive() + + @testcase(205) + @skipUnlessPassed("test_ssh") + def test_cvs(self): + self.assertEqual(self.project.run_configure(), 0, + msg="Running configure failed") + + self.assertEqual(self.project.run_make(), 0, + msg="Running make failed") + + self.assertEqual(self.project.run_install(), 0, + msg="Running make install failed") + + @classmethod + def tearDownClass(self): + self.project.clean() diff --git a/meta/lib/oeqa/runtime/buildiptables.py b/meta/lib/oeqa/runtime/buildiptables.py new file mode 100644 index 0000000000..09e252df8c --- /dev/null +++ b/meta/lib/oeqa/runtime/buildiptables.py @@ -0,0 +1,31 @@ +from oeqa.oetest import oeRuntimeTest, skipModule +from oeqa.utils.decorators import * +from oeqa.utils.targetbuild import TargetBuildProject + +def setUpModule(): + if not oeRuntimeTest.hasFeature("tools-sdk"): + skipModule("Image doesn't have tools-sdk in IMAGE_FEATURES") + +class BuildIptablesTest(oeRuntimeTest): + + @classmethod + def setUpClass(self): + self.project = TargetBuildProject(oeRuntimeTest.tc.target, oeRuntimeTest.tc.d, + "http://netfilter.org/projects/iptables/files/iptables-1.4.13.tar.bz2") + self.project.download_archive() + + @testcase(206) + @skipUnlessPassed("test_ssh") + def test_iptables(self): + self.assertEqual(self.project.run_configure(), 0, + msg="Running configure failed") + + self.assertEqual(self.project.run_make(), 0, + msg="Running make failed") + + self.assertEqual(self.project.run_install(), 0, + msg="Running make install failed") + + @classmethod + def tearDownClass(self): + self.project.clean() diff --git a/meta/lib/oeqa/runtime/buildsudoku.py b/meta/lib/oeqa/runtime/buildsudoku.py new file mode 100644 index 0000000000..802b060010 --- /dev/null +++ b/meta/lib/oeqa/runtime/buildsudoku.py @@ -0,0 +1,28 @@ +from oeqa.oetest import oeRuntimeTest, skipModule +from oeqa.utils.decorators import * +from oeqa.utils.targetbuild import TargetBuildProject + +def setUpModule(): + if not oeRuntimeTest.hasFeature("tools-sdk"): + skipModule("Image doesn't have tools-sdk in IMAGE_FEATURES") + +class SudokuTest(oeRuntimeTest): + + @classmethod + def setUpClass(self): + self.project = TargetBuildProject(oeRuntimeTest.tc.target, oeRuntimeTest.tc.d, + "http://downloads.sourceforge.net/project/sudoku-savant/sudoku-savant/sudoku-savant-1.3/sudoku-savant-1.3.tar.bz2") + self.project.download_archive() + + @testcase(207) + @skipUnlessPassed("test_ssh") + def test_sudoku(self): + self.assertEqual(self.project.run_configure(), 0, + msg="Running configure failed") + + self.assertEqual(self.project.run_make(), 0, + msg="Running make failed") + + @classmethod + def tearDownClass(self): + self.project.clean() diff --git a/meta/lib/oeqa/runtime/connman.py b/meta/lib/oeqa/runtime/connman.py new file mode 100644 index 0000000000..cc537f7766 --- /dev/null +++ b/meta/lib/oeqa/runtime/connman.py @@ -0,0 +1,30 @@ +import unittest +from oeqa.oetest import oeRuntimeTest, skipModule +from oeqa.utils.decorators import * + +def setUpModule(): + if not oeRuntimeTest.hasPackage("connman"): + skipModule("No connman package in image") + + +class ConnmanTest(oeRuntimeTest): + + def service_status(self, service): + if oeRuntimeTest.hasFeature("systemd"): + (status, output) = self.target.run('systemctl status -l %s' % service) + return output + else: + return "Unable to get status or logs for %s" % service + + @skipUnlessPassed('test_ssh') + def test_connmand_help(self): + (status, output) = self.target.run('/usr/sbin/connmand --help') + self.assertEqual(status, 0, msg="status and output: %s and %s" % (status,output)) + + @testcase(221) + @skipUnlessPassed('test_connmand_help') + def test_connmand_running(self): + (status, output) = self.target.run(oeRuntimeTest.pscmd + ' | grep [c]onnmand') + if status != 0: + print self.service_status("connman") + self.fail("No connmand process running") diff --git a/meta/lib/oeqa/runtime/date.py b/meta/lib/oeqa/runtime/date.py new file mode 100644 index 0000000000..97e8ee42ad --- /dev/null +++ b/meta/lib/oeqa/runtime/date.py @@ -0,0 +1,23 @@ +from oeqa.oetest import oeRuntimeTest +from oeqa.utils.decorators import * +import re + +class DateTest(oeRuntimeTest): + + @testcase(211) + @skipUnlessPassed("test_ssh") + def test_date(self): + (status, output) = self.target.run('date +"%Y-%m-%d %T"') + self.assertEqual(status, 0, msg="Failed to get initial date, output: %s" % output) + oldDate = output + + sampleDate = '"2016-08-09 10:00:00"' + (status, output) = self.target.run("date -s %s" % sampleDate) + self.assertEqual(status, 0, msg="Date set failed, output: %s" % output) + + (status, output) = self.target.run("date -R") + p = re.match('Tue, 09 Aug 2016 10:00:.. \+0000', output) + self.assertTrue(p, msg="The date was not set correctly, output: %s" % output) + + (status, output) = self.target.run('date -s "%s"' % oldDate) + self.assertEqual(status, 0, msg="Failed to reset date, output: %s" % output) diff --git a/meta/lib/oeqa/runtime/df.py b/meta/lib/oeqa/runtime/df.py new file mode 100644 index 0000000000..09569d5ff6 --- /dev/null +++ b/meta/lib/oeqa/runtime/df.py @@ -0,0 +1,12 @@ +import unittest +from oeqa.oetest import oeRuntimeTest +from oeqa.utils.decorators import * + + +class DfTest(oeRuntimeTest): + + @testcase(234) + @skipUnlessPassed("test_ssh") + def test_df(self): + (status,output) = self.target.run("df / | sed -n '2p' | awk '{print $4}'") + self.assertTrue(int(output)>5120, msg="Not enough space on image. Current size is %s" % output) diff --git a/meta/lib/oeqa/runtime/dmesg.py b/meta/lib/oeqa/runtime/dmesg.py new file mode 100644 index 0000000000..5831471e50 --- /dev/null +++ b/meta/lib/oeqa/runtime/dmesg.py @@ -0,0 +1,12 @@ +import unittest +from oeqa.oetest import oeRuntimeTest +from oeqa.utils.decorators import * + + +class DmesgTest(oeRuntimeTest): + + @testcase(215) + @skipUnlessPassed('test_ssh') + def test_dmesg(self): + (status, output) = self.target.run('dmesg | grep -v mmci-pl18x | grep -v "error changing net interface name" | grep -iv "dma timeout" | grep -v usbhid | grep -i error') + self.assertEqual(status, 1, msg = "Error messages in dmesg log: %s" % output) diff --git a/meta/lib/oeqa/runtime/files/hellomod.c b/meta/lib/oeqa/runtime/files/hellomod.c new file mode 100644 index 0000000000..a383397e93 --- /dev/null +++ b/meta/lib/oeqa/runtime/files/hellomod.c @@ -0,0 +1,19 @@ +#include +#include +#include + +static int __init hello_init(void) +{ + printk(KERN_INFO "Hello world!\n"); + return 0; +} + +static void __exit hello_cleanup(void) +{ + printk(KERN_INFO "Cleaning up hellomod.\n"); +} + +module_init(hello_init); +module_exit(hello_cleanup); + +MODULE_LICENSE("GPL"); diff --git a/meta/lib/oeqa/runtime/files/hellomod_makefile b/meta/lib/oeqa/runtime/files/hellomod_makefile new file mode 100644 index 0000000000..b92d5c8fe0 --- /dev/null +++ b/meta/lib/oeqa/runtime/files/hellomod_makefile @@ -0,0 +1,8 @@ +obj-m := hellomod.o +KDIR := /usr/src/kernel + +all: + $(MAKE) -C $(KDIR) M=$(PWD) modules + +clean: + $(MAKE) -C $(KDIR) M=$(PWD) clean diff --git a/meta/lib/oeqa/runtime/files/test.c b/meta/lib/oeqa/runtime/files/test.c new file mode 100644 index 0000000000..2d8389c92e --- /dev/null +++ b/meta/lib/oeqa/runtime/files/test.c @@ -0,0 +1,26 @@ +#include +#include +#include + +double convert(long long l) +{ + return (double)l; +} + +int main(int argc, char * argv[]) { + + long long l = 10; + double f; + double check = 10.0; + + f = convert(l); + printf("convert: %lld => %f\n", l, f); + if ( f != check ) exit(1); + + f = 1234.67; + check = 1234.0; + printf("floorf(%f) = %f\n", f, floorf(f)); + if ( floorf(f) != check) exit(1); + + return 0; +} diff --git a/meta/lib/oeqa/runtime/files/test.cpp b/meta/lib/oeqa/runtime/files/test.cpp new file mode 100644 index 0000000000..9e1a76473d --- /dev/null +++ b/meta/lib/oeqa/runtime/files/test.cpp @@ -0,0 +1,3 @@ +#include + +int main() {} \ No newline at end of file diff --git a/meta/lib/oeqa/runtime/files/test.pl b/meta/lib/oeqa/runtime/files/test.pl new file mode 100644 index 0000000000..689c8f1635 --- /dev/null +++ b/meta/lib/oeqa/runtime/files/test.pl @@ -0,0 +1,2 @@ +$a = 9.01e+21 - 9.01e+21 + 0.01; +print ("the value of a is ", $a, "\n"); diff --git a/meta/lib/oeqa/runtime/files/test.py b/meta/lib/oeqa/runtime/files/test.py new file mode 100644 index 0000000000..f3a2273c52 --- /dev/null +++ b/meta/lib/oeqa/runtime/files/test.py @@ -0,0 +1,6 @@ +import os + +os.system('touch /tmp/testfile.python') + +a = 9.01e+21 - 9.01e+21 + 0.01 +print "the value of a is %s" % a diff --git a/meta/lib/oeqa/runtime/files/testmakefile b/meta/lib/oeqa/runtime/files/testmakefile new file mode 100644 index 0000000000..ca1844e930 --- /dev/null +++ b/meta/lib/oeqa/runtime/files/testmakefile @@ -0,0 +1,5 @@ +test: test.o + gcc -o test test.o -lm +test.o: test.c + gcc -c test.c + diff --git a/meta/lib/oeqa/runtime/gcc.py b/meta/lib/oeqa/runtime/gcc.py new file mode 100644 index 0000000000..a7f62e1758 --- /dev/null +++ b/meta/lib/oeqa/runtime/gcc.py @@ -0,0 +1,46 @@ +import unittest +import os +from oeqa.oetest import oeRuntimeTest, skipModule +from oeqa.utils.decorators import * + +def setUpModule(): + if not oeRuntimeTest.hasFeature("tools-sdk"): + skipModule("Image doesn't have tools-sdk in IMAGE_FEATURES") + + +class GccCompileTest(oeRuntimeTest): + + @classmethod + def setUpClass(self): + oeRuntimeTest.tc.target.copy_to(os.path.join(oeRuntimeTest.tc.filesdir, "test.c"), "/tmp/test.c") + oeRuntimeTest.tc.target.copy_to(os.path.join(oeRuntimeTest.tc.filesdir, "testmakefile"), "/tmp/testmakefile") + oeRuntimeTest.tc.target.copy_to(os.path.join(oeRuntimeTest.tc.filesdir, "test.cpp"), "/tmp/test.cpp") + + @testcase(203) + def test_gcc_compile(self): + (status, output) = self.target.run('gcc /tmp/test.c -o /tmp/test -lm') + self.assertEqual(status, 0, msg="gcc compile failed, output: %s" % output) + (status, output) = self.target.run('/tmp/test') + self.assertEqual(status, 0, msg="running compiled file failed, output %s" % output) + + @testcase(200) + def test_gpp_compile(self): + (status, output) = self.target.run('g++ /tmp/test.c -o /tmp/test -lm') + self.assertEqual(status, 0, msg="g++ compile failed, output: %s" % output) + (status, output) = self.target.run('/tmp/test') + self.assertEqual(status, 0, msg="running compiled file failed, output %s" % output) + + def test_gpp2_compile(self): + (status, output) = self.target.run('g++ /tmp/test.cpp -o /tmp/test -lm') + self.assertEqual(status, 0, msg="g++ compile failed, output: %s" % output) + (status, output) = self.target.run('/tmp/test') + self.assertEqual(status, 0, msg="running compiled file failed, output %s" % output) + + @testcase(204) + def test_make(self): + (status, output) = self.target.run('cd /tmp; make -f testmakefile') + self.assertEqual(status, 0, msg="running make failed, output %s" % output) + + @classmethod + def tearDownClass(self): + oeRuntimeTest.tc.target.run("rm /tmp/test.c /tmp/test.o /tmp/test /tmp/testmakefile") diff --git a/meta/lib/oeqa/runtime/kernelmodule.py b/meta/lib/oeqa/runtime/kernelmodule.py new file mode 100644 index 0000000000..2e81720327 --- /dev/null +++ b/meta/lib/oeqa/runtime/kernelmodule.py @@ -0,0 +1,34 @@ +import unittest +import os +from oeqa.oetest import oeRuntimeTest, skipModule +from oeqa.utils.decorators import * + +def setUpModule(): + if not oeRuntimeTest.hasFeature("tools-sdk"): + skipModule("Image doesn't have tools-sdk in IMAGE_FEATURES") + + +class KernelModuleTest(oeRuntimeTest): + + def setUp(self): + self.target.copy_to(os.path.join(oeRuntimeTest.tc.filesdir, "hellomod.c"), "/tmp/hellomod.c") + self.target.copy_to(os.path.join(oeRuntimeTest.tc.filesdir, "hellomod_makefile"), "/tmp/Makefile") + + @testcase('316') + @skipUnlessPassed('test_ssh') + @skipUnlessPassed('test_gcc_compile') + def test_kernel_module(self): + cmds = [ + 'cd /usr/src/kernel && make scripts', + 'cd /tmp && make', + 'cd /tmp && insmod hellomod.ko', + 'lsmod | grep hellomod', + 'dmesg | grep Hello', + 'rmmod hellomod', 'dmesg | grep "Cleaning up hellomod"' + ] + for cmd in cmds: + (status, output) = self.target.run(cmd, 900) + self.assertEqual(status, 0, msg="\n".join([cmd, output])) + + def tearDown(self): + self.target.run('rm -f /tmp/Makefile /tmp/hellomod.c') diff --git a/meta/lib/oeqa/runtime/ldd.py b/meta/lib/oeqa/runtime/ldd.py new file mode 100644 index 0000000000..bce56c4270 --- /dev/null +++ b/meta/lib/oeqa/runtime/ldd.py @@ -0,0 +1,20 @@ +import unittest +from oeqa.oetest import oeRuntimeTest, skipModule +from oeqa.utils.decorators import * + +def setUpModule(): + if not oeRuntimeTest.hasFeature("tools-sdk"): + skipModule("Image doesn't have tools-sdk in IMAGE_FEATURES") + +class LddTest(oeRuntimeTest): + + @skipUnlessPassed('test_ssh') + def test_ldd_exists(self): + (status, output) = self.target.run('which ldd') + self.assertEqual(status, 0, msg = "ldd does not exist in PATH: which ldd: %s" % output) + + @testcase(239) + @skipUnlessPassed('test_ldd_exists') + def test_ldd_rtldlist_check(self): + (status, output) = self.target.run('for i in $(which ldd | xargs cat | grep "^RTLDLIST"|cut -d\'=\' -f2|tr -d \'"\'); do test -f $i && echo $i && break; done') + self.assertEqual(status, 0, msg = "ldd path not correct or RTLDLIST files don't exist. ") diff --git a/meta/lib/oeqa/runtime/logrotate.py b/meta/lib/oeqa/runtime/logrotate.py new file mode 100644 index 0000000000..86d791c300 --- /dev/null +++ b/meta/lib/oeqa/runtime/logrotate.py @@ -0,0 +1,28 @@ +# This test should cover https://bugzilla.yoctoproject.org/tr_show_case.cgi?case_id=289 testcase +# Note that the image under test must have logrotate installed + +import unittest +from oeqa.oetest import oeRuntimeTest, skipModule +from oeqa.utils.decorators import * + +def setUpModule(): + if not oeRuntimeTest.hasPackage("logrotate"): + skipModule("No logrotate package in image") + + +class LogrotateTest(oeRuntimeTest): + + @skipUnlessPassed("test_ssh") + def test_1_logrotate_setup(self): + (status, output) = self.target.run('mkdir /home/root/logrotate_dir') + self.assertEqual(status, 0, msg = "Could not create logrotate_dir. Output: %s" % output) + (status, output) = self.target.run("sed -i 's#wtmp {#wtmp {\\n olddir /home/root/logrotate_dir#' /etc/logrotate.conf") + self.assertEqual(status, 0, msg = "Could not write to logrotate.conf file. Status and output: %s and %s)" % (status, output)) + + @testcase(289) + @skipUnlessPassed("test_1_logrotate_setup") + def test_2_logrotate(self): + (status, output) = self.target.run('logrotate -f /etc/logrotate.conf') + self.assertEqual(status, 0, msg = "logrotate service could not be reloaded. Status and output: %s and %s" % (status, output)) + output = self.target.run('ls -la /home/root/logrotate_dir/ | wc -l')[1] + self.assertTrue(int(output)>=3, msg = "new logfile could not be created. List of files within log directory: %s" %(self.target.run('ls -la /home/root/logrotate_dir')[1])) diff --git a/meta/lib/oeqa/runtime/multilib.py b/meta/lib/oeqa/runtime/multilib.py new file mode 100644 index 0000000000..ab0a6ccd69 --- /dev/null +++ b/meta/lib/oeqa/runtime/multilib.py @@ -0,0 +1,18 @@ +import unittest +from oeqa.oetest import oeRuntimeTest, skipModule +from oeqa.utils.decorators import * + +def setUpModule(): + multilibs = oeRuntimeTest.tc.d.getVar("MULTILIBS", True) or "" + if "multilib:lib32" not in multilibs: + skipModule("this isn't a multilib:lib32 image") + + +class MultilibTest(oeRuntimeTest): + + @testcase('279') + @skipUnlessPassed('test_ssh') + def test_file_connman(self): + self.assertTrue(oeRuntimeTest.hasPackage('connman-gnome'), msg="This test assumes connman-gnome is installed") + (status, output) = self.target.run("readelf -h /usr/bin/connman-applet | sed -n '3p' | awk '{print $2}'") + self.assertEqual(output, "ELF32", msg="connman-applet isn't an ELF32 binary. readelf says: %s" % self.target.run("readelf -h /usr/bin/connman-applet")[1]) diff --git a/meta/lib/oeqa/runtime/pam.py b/meta/lib/oeqa/runtime/pam.py new file mode 100644 index 0000000000..c8205c9abc --- /dev/null +++ b/meta/lib/oeqa/runtime/pam.py @@ -0,0 +1,25 @@ +# This test should cover https://bugzilla.yoctoproject.org/tr_show_case.cgi?case_id=287 testcase +# Note that the image under test must have "pam" in DISTRO_FEATURES + +import unittest +from oeqa.oetest import oeRuntimeTest, skipModule +from oeqa.utils.decorators import * + +def setUpModule(): + if not oeRuntimeTest.hasFeature("pam"): + skipModule("target doesn't have 'pam' in DISTRO_FEATURES") + + +class PamBasicTest(oeRuntimeTest): + + @testcase(287) + @skipUnlessPassed('test_ssh') + def test_pam(self): + (status, output) = self.target.run('login --help') + self.assertEqual(status, 1, msg = "login command does not work as expected. Status and output:%s and %s" %(status, output)) + (status, output) = self.target.run('passwd --help') + self.assertEqual(status, 0, msg = "passwd command does not work as expected. Status and output:%s and %s" %(status, output)) + (status, output) = self.target.run('su --help') + self.assertEqual(status, 0, msg = "su command does not work as expected. Status and output:%s and %s" %(status, output)) + (status, output) = self.target.run('useradd --help') + self.assertEqual(status, 0, msg = "useradd command does not work as expected. Status and output:%s and %s" %(status, output)) diff --git a/meta/lib/oeqa/runtime/parselogs.py b/meta/lib/oeqa/runtime/parselogs.py new file mode 100644 index 0000000000..42cb1b5e6f --- /dev/null +++ b/meta/lib/oeqa/runtime/parselogs.py @@ -0,0 +1,178 @@ +import os +import unittest +from oeqa.oetest import oeRuntimeTest +from oeqa.utils.decorators import * + +#in the future these lists could be moved outside of module +errors = ["error", "cannot", "can\'t", "failed"] + +common_errors = [ + '(WW) warning, (EE) error, (NI) not implemented, (??) unknown.', + 'dma timeout', + 'can\'t add hid device:', + 'usbhid: probe of ', + ] + +x86_common = [ + '[drm:psb_do_init] *ERROR* Debug is', + 'wrong ELF class', + 'Could not enable PowerButton event', + 'probe of LNXPWRBN:00 failed with error -22', +] + common_errors + +qemux86_common = [ + 'Fast TSC calibration', + '_OSC failed (AE_NOT_FOUND); disabling ASPM', + 'Open ACPI failed (/var/run/acpid.socket) (No such file or directory)', + 'Failed to load module "vesa"', + 'Failed to load module "modesetting"', + 'Failed to load module "glx"', + 'wrong ELF class', +] + common_errors + +ignore_errors = { + 'default' : common_errors, + 'qemux86' : [ + 'Failed to access perfctr msr (MSR c1 is 0)', + "fail to add MMCONFIG information, can't access extended PCI configuration space under this bridge.", + ] + qemux86_common, + 'qemux86-64' : qemux86_common, + 'qemumips' : [ + 'Failed to load module "glx"', + ] + common_errors, + 'qemuppc' : [ + 'PCI 0000:00 Cannot reserve Legacy IO [io 0x0000-0x0fff]', + 'mode "640x480" test failed', + 'Failed to load module "glx"', + ] + common_errors, + 'qemuarm' : [ + 'mmci-pl18x: probe of fpga:05 failed with error -22', + 'mmci-pl18x: probe of fpga:0b failed with error -22', + 'Failed to load module "glx"' + ] + common_errors, + 'emenlow' : x86_common, + 'crownbay' : x86_common, + 'genericx86' : x86_common, + 'genericx86-64' : x86_common, +} + +log_locations = ["/var/log/","/var/log/dmesg", "/tmp/dmesg_output.log"] + +class ParseLogsTest(oeRuntimeTest): + + @classmethod + def setUpClass(self): + self.errors = errors + self.ignore_errors = ignore_errors + self.log_locations = log_locations + self.msg = "" + + def getMachine(self): + (status, output) = self.target.run("uname -n") + return output + + #get some information on the CPU of the machine to display at the beginning of the output. This info might be useful in some cases. + def getHardwareInfo(self): + hwi = "" + (status, cpu_name) = self.target.run("cat /proc/cpuinfo | grep \"model name\" | head -n1 | awk 'BEGIN{FS=\":\"}{print $2}'") + (status, cpu_physical_cores) = self.target.run("cat /proc/cpuinfo | grep \"cpu cores\" | head -n1 | awk {'print $4'}") + (status, cpu_logical_cores) = self.target.run("cat /proc/cpuinfo | grep \"processor\" | wc -l") + (status, cpu_arch) = self.target.run("uname -m") + hwi += "Machine information: \n" + hwi += "*******************************\n" + hwi += "Machine name: "+self.getMachine()+"\n" + hwi += "CPU: "+str(cpu_name)+"\n" + hwi += "Arch: "+str(cpu_arch)+"\n" + hwi += "Physical cores: "+str(cpu_physical_cores)+"\n" + hwi += "Logical cores: "+str(cpu_logical_cores)+"\n" + hwi += "*******************************\n" + return hwi + + #go through the log locations provided and if it's a folder create a list with all the .log files in it, if it's a file just add + #it to that list + def getLogList(self, log_locations): + logs = [] + for location in log_locations: + (status, output) = self.target.run("test -f "+str(location)) + if (status == 0): + logs.append(str(location)) + else: + (status, output) = self.target.run("test -d "+str(location)) + if (status == 0): + (status, output) = self.target.run("find "+str(location)+"/*.log -maxdepth 1 -type f") + output = output.splitlines() + for logfile in output: + logs.append(os.path.join(location,str(logfile))) + return logs + + #build the grep command to be used with filters and exclusions + def build_grepcmd(self, errors, ignore_errors, log): + grepcmd = "grep " + grepcmd +="-Ei \"" + for error in errors: + grepcmd += error+"|" + grepcmd = grepcmd[:-1] + grepcmd += "\" "+str(log)+" | grep -Eiv \'" + try: + errorlist = ignore_errors[self.getMachine()] + except KeyError: + self.msg += "No ignore list found for this machine, using default\n" + errorlist = ignore_errors['default'] + for ignore_error in errorlist: + ignore_error = ignore_error.replace("(", "\(") + ignore_error = ignore_error.replace(")", "\)") + ignore_error = ignore_error.replace("'", ".") + ignore_error = ignore_error.replace("?", "\?") + ignore_error = ignore_error.replace("[", "\[") + ignore_error = ignore_error.replace("]", "\]") + ignore_error = ignore_error.replace("*", "\*") + grepcmd += ignore_error+"|" + grepcmd = grepcmd[:-1] + grepcmd += "\'" + return grepcmd + + #grep only the errors so that their context could be collected. Default context is 10 lines before and after the error itself + def parse_logs(self, errors, ignore_errors, logs, lines_before = 10, lines_after = 10): + results = {} + rez = [] + for log in logs: + thegrep = self.build_grepcmd(errors, ignore_errors, log) + try: + (status, result) = self.target.run(thegrep) + except: + pass + if result: + results[log] = {} + rez = result.splitlines() + for xrez in rez: + command = "grep \"\\"+str(xrez)+"\" -B "+str(lines_before)+" -A "+str(lines_after)+" "+str(log) + try: + (status, yrez) = self.target.run(command) + except: + pass + results[log][xrez]=yrez + return results + + #get the output of dmesg and write it in a file. This file is added to log_locations. + def write_dmesg(self): + (status, dmesg) = self.target.run("dmesg") + (status, dmesg2) = self.target.run("echo \""+str(dmesg)+"\" > /tmp/dmesg_output.log") + + @skipUnlessPassed('test_ssh') + def test_parselogs(self): + self.write_dmesg() + log_list = self.getLogList(self.log_locations) + result = self.parse_logs(self.errors, self.ignore_errors, log_list) + print self.getHardwareInfo() + errcount = 0 + for log in result: + self.msg += "Log: "+log+"\n" + self.msg += "-----------------------\n" + for error in result[log]: + errcount += 1 + self.msg += "Central error: "+str(error)+"\n" + self.msg += "***********************\n" + self.msg += result[str(log)][str(error)]+"\n" + self.msg += "***********************\n" + self.msg += "%s errors found in logs." % errcount + self.assertEqual(errcount, 0, msg=self.msg) diff --git a/meta/lib/oeqa/runtime/perl.py b/meta/lib/oeqa/runtime/perl.py new file mode 100644 index 0000000000..65da028d4b --- /dev/null +++ b/meta/lib/oeqa/runtime/perl.py @@ -0,0 +1,29 @@ +import unittest +import os +from oeqa.oetest import oeRuntimeTest, skipModule +from oeqa.utils.decorators import * + +def setUpModule(): + if not oeRuntimeTest.hasPackage("perl"): + skipModule("No perl package in the image") + + +class PerlTest(oeRuntimeTest): + + @classmethod + def setUpClass(self): + oeRuntimeTest.tc.target.copy_to(os.path.join(oeRuntimeTest.tc.filesdir, "test.pl"), "/tmp/test.pl") + + def test_perl_exists(self): + (status, output) = self.target.run('which perl') + self.assertEqual(status, 0, msg="Perl binary not in PATH or not on target.") + + @testcase(208) + def test_perl_works(self): + (status, output) = self.target.run('perl /tmp/test.pl') + self.assertEqual(status, 0, msg="Exit status was not 0. Output: %s" % output) + self.assertEqual(output, "the value of a is 0.01", msg="Incorrect output: %s" % output) + + @classmethod + def tearDownClass(self): + oeRuntimeTest.tc.target.run("rm /tmp/test.pl") diff --git a/meta/lib/oeqa/runtime/ping.py b/meta/lib/oeqa/runtime/ping.py new file mode 100644 index 0000000000..a73c72402a --- /dev/null +++ b/meta/lib/oeqa/runtime/ping.py @@ -0,0 +1,20 @@ +import subprocess +import unittest +import sys +import time +from oeqa.oetest import oeRuntimeTest + +class PingTest(oeRuntimeTest): + + def test_ping(self): + output = '' + count = 0 + endtime = time.time() + 60 + while count < 5 and time.time() < endtime: + proc = subprocess.Popen("ping -c 1 %s" % self.target.ip, shell=True, stdout=subprocess.PIPE) + output += proc.communicate()[0] + if proc.poll() == 0: + count += 1 + else: + count = 0 + self.assertEqual(count, 5, msg = "Expected 5 consecutive replies, got %d.\nping output is:\n%s" % (count,output)) diff --git a/meta/lib/oeqa/runtime/python.py b/meta/lib/oeqa/runtime/python.py new file mode 100644 index 0000000000..0387b9a03e --- /dev/null +++ b/meta/lib/oeqa/runtime/python.py @@ -0,0 +1,34 @@ +import unittest +import os +from oeqa.oetest import oeRuntimeTest, skipModule +from oeqa.utils.decorators import * + +def setUpModule(): + if not oeRuntimeTest.hasPackage("python"): + skipModule("No python package in the image") + + +class PythonTest(oeRuntimeTest): + + @classmethod + def setUpClass(self): + oeRuntimeTest.tc.target.copy_to(os.path.join(oeRuntimeTest.tc.filesdir, "test.py"), "/tmp/test.py") + + def test_python_exists(self): + (status, output) = self.target.run('which python') + self.assertEqual(status, 0, msg="Python binary not in PATH or not on target.") + + @testcase(965) + def test_python_stdout(self): + (status, output) = self.target.run('python /tmp/test.py') + self.assertEqual(status, 0, msg="Exit status was not 0. Output: %s" % output) + self.assertEqual(output, "the value of a is 0.01", msg="Incorrect output: %s" % output) + + def test_python_testfile(self): + (status, output) = self.target.run('ls /tmp/testfile.python') + self.assertEqual(status, 0, msg="Python test file generate failed.") + + + @classmethod + def tearDownClass(self): + oeRuntimeTest.tc.target.run("rm /tmp/test.py /tmp/testfile.python") diff --git a/meta/lib/oeqa/runtime/rpm.py b/meta/lib/oeqa/runtime/rpm.py new file mode 100644 index 0000000000..b17e8b46a8 --- /dev/null +++ b/meta/lib/oeqa/runtime/rpm.py @@ -0,0 +1,53 @@ +import unittest +import os +import fnmatch +from oeqa.oetest import oeRuntimeTest, skipModule +from oeqa.utils.decorators import * + +def setUpModule(): + if not oeRuntimeTest.hasFeature("package-management"): + skipModule("rpm module skipped: target doesn't have package-management in IMAGE_FEATURES") + if "package_rpm" != oeRuntimeTest.tc.d.getVar("PACKAGE_CLASSES", True).split()[0]: + skipModule("rpm module skipped: target doesn't have rpm as primary package manager") + + +class RpmBasicTest(oeRuntimeTest): + + @skipUnlessPassed('test_ssh') + def test_rpm_help(self): + (status, output) = self.target.run('rpm --help') + self.assertEqual(status, 0, msg="status and output: %s and %s" % (status,output)) + + @testcase(191) + @skipUnlessPassed('test_rpm_help') + def test_rpm_query(self): + (status, output) = self.target.run('rpm -q rpm') + self.assertEqual(status, 0, msg="status and output: %s and %s" % (status,output)) + +class RpmInstallRemoveTest(oeRuntimeTest): + + @classmethod + def setUpClass(self): + pkgarch = oeRuntimeTest.tc.d.getVar('TUNE_PKGARCH', True).replace("-", "_") + rpmdir = os.path.join(oeRuntimeTest.tc.d.getVar('DEPLOY_DIR', True), "rpm", pkgarch) + # pick rpm-doc as a test file to get installed, because it's small and it will always be built for standard targets + for f in fnmatch.filter(os.listdir(rpmdir), "rpm-doc-*.%s.rpm" % pkgarch): + testrpmfile = f + oeRuntimeTest.tc.target.copy_to(os.path.join(rpmdir,testrpmfile), "/tmp/rpm-doc.rpm") + + @testcase(192) + @skipUnlessPassed('test_rpm_help') + def test_rpm_install(self): + (status, output) = self.target.run('rpm -ivh /tmp/rpm-doc.rpm') + self.assertEqual(status, 0, msg="Failed to install rpm-doc package: %s" % output) + + @testcase(194) + @skipUnlessPassed('test_rpm_install') + def test_rpm_remove(self): + (status,output) = self.target.run('rpm -e rpm-doc') + self.assertEqual(status, 0, msg="Failed to remove rpm-doc package: %s" % output) + + @classmethod + def tearDownClass(self): + oeRuntimeTest.tc.target.run('rm -f /tmp/rpm-doc.rpm') + diff --git a/meta/lib/oeqa/runtime/scanelf.py b/meta/lib/oeqa/runtime/scanelf.py new file mode 100644 index 0000000000..43a024ab9a --- /dev/null +++ b/meta/lib/oeqa/runtime/scanelf.py @@ -0,0 +1,28 @@ +import unittest +from oeqa.oetest import oeRuntimeTest, skipModule +from oeqa.utils.decorators import * + +def setUpModule(): + if not oeRuntimeTest.hasPackage("pax-utils"): + skipModule("pax-utils package not installed") + +class ScanelfTest(oeRuntimeTest): + + def setUp(self): + self.scancmd = 'scanelf --quiet --recursive --mount --ldpath --path' + + @testcase(966) + @skipUnlessPassed('test_ssh') + def test_scanelf_textrel(self): + # print TEXTREL information + self.scancmd += " --textrel" + (status, output) = self.target.run(self.scancmd) + self.assertEqual(output.strip(), "", "\n".join([self.scancmd, output])) + + @testcase(967) + @skipUnlessPassed('test_ssh') + def test_scanelf_rpath(self): + # print RPATH information + self.scancmd += " --rpath" + (status, output) = self.target.run(self.scancmd) + self.assertEqual(output.strip(), "", "\n".join([self.scancmd, output])) diff --git a/meta/lib/oeqa/runtime/scp.py b/meta/lib/oeqa/runtime/scp.py new file mode 100644 index 0000000000..48e87d2d0b --- /dev/null +++ b/meta/lib/oeqa/runtime/scp.py @@ -0,0 +1,22 @@ +import os +from oeqa.oetest import oeRuntimeTest, skipModule +from oeqa.utils.decorators import skipUnlessPassed, testcase + +def setUpModule(): + if not (oeRuntimeTest.hasPackage("dropbear") or oeRuntimeTest.hasPackage("openssh-sshd")): + skipModule("No ssh package in image") + +class ScpTest(oeRuntimeTest): + + @testcase(220) + @skipUnlessPassed('test_ssh') + def test_scp_file(self): + test_log_dir = oeRuntimeTest.tc.d.getVar("TEST_LOG_DIR", True) + test_file_path = os.path.join(test_log_dir, 'test_scp_file') + with open(test_file_path, 'w') as test_scp_file: + test_scp_file.seek(2 ** 22 - 1) + test_scp_file.write(os.linesep) + (status, output) = self.target.copy_to(test_file_path, '/tmp/test_scp_file') + self.assertEqual(status, 0, msg = "File could not be copied. Output: %s" % output) + (status, output) = self.target.run("ls -la /tmp/test_scp_file") + self.assertEqual(status, 0, msg = "SCP test failed") diff --git a/meta/lib/oeqa/runtime/skeletoninit.py b/meta/lib/oeqa/runtime/skeletoninit.py new file mode 100644 index 0000000000..7c7f402e5d --- /dev/null +++ b/meta/lib/oeqa/runtime/skeletoninit.py @@ -0,0 +1,29 @@ +# This test should cover https://bugzilla.yoctoproject.org/tr_show_case.cgi?case_id=284 testcase +# Note that the image under test must have meta-skeleton layer in bblayers and IMAGE_INSTALL_append = " service" in local.conf + +import unittest +from oeqa.oetest import oeRuntimeTest, skipModule +from oeqa.utils.decorators import * + +def setUpModule(): + if not oeRuntimeTest.hasPackage("service"): + skipModule("No service package in image") + + +class SkeletonBasicTest(oeRuntimeTest): + + @skipUnlessPassed('test_ssh') + @unittest.skipIf("systemd" == oeRuntimeTest.tc.d.getVar("VIRTUAL-RUNTIME_init_manager"), "Not appropiate for systemd image") + def test_skeleton_availability(self): + (status, output) = self.target.run('ls /etc/init.d/skeleton') + self.assertEqual(status, 0, msg = "skeleton init script not found. Output:\n%s " % output) + (status, output) = self.target.run('ls /usr/sbin/skeleton-test') + self.assertEqual(status, 0, msg = "skeleton-test not found. Output:\n%s" % output) + + @testcase(284) + @skipUnlessPassed('test_skeleton_availability') + @unittest.skipIf("systemd" == oeRuntimeTest.tc.d.getVar("VIRTUAL-RUNTIME_init_manager"), "Not appropiate for systemd image") + def test_skeleton_script(self): + output1 = self.target.run("/etc/init.d/skeleton start")[1] + (status, output2) = self.target.run(oeRuntimeTest.pscmd + ' | grep [s]keleton-test') + self.assertEqual(status, 0, msg = "Skeleton script could not be started:\n%s\n%s" % (output1, output2)) diff --git a/meta/lib/oeqa/runtime/smart.py b/meta/lib/oeqa/runtime/smart.py new file mode 100644 index 0000000000..3b49314df7 --- /dev/null +++ b/meta/lib/oeqa/runtime/smart.py @@ -0,0 +1,121 @@ +import unittest +import re +from oeqa.oetest import oeRuntimeTest, skipModule +from oeqa.utils.decorators import * +from oeqa.utils.httpserver import HTTPService + +def setUpModule(): + if not oeRuntimeTest.hasFeature("package-management"): + skipModule("Image doesn't have package management feature") + if not oeRuntimeTest.hasPackage("smart"): + skipModule("Image doesn't have smart installed") + if "package_rpm" != oeRuntimeTest.tc.d.getVar("PACKAGE_CLASSES", True).split()[0]: + skipModule("Rpm is not the primary package manager") + +class SmartTest(oeRuntimeTest): + + @skipUnlessPassed('test_smart_help') + def smart(self, command, expected = 0): + command = 'smart %s' % command + status, output = self.target.run(command, 1500) + message = os.linesep.join([command, output]) + self.assertEqual(status, expected, message) + self.assertFalse("Cannot allocate memory" in output, message) + return output + +class SmartBasicTest(SmartTest): + + @testcase(716) + @skipUnlessPassed('test_ssh') + def test_smart_help(self): + self.smart('--help') + + def test_smart_version(self): + self.smart('--version') + + @testcase(721) + def test_smart_info(self): + self.smart('info python-smartpm') + + @testcase(421) + def test_smart_query(self): + self.smart('query python-smartpm') + + @testcase(720) + def test_smart_search(self): + self.smart('search python-smartpm') + + @testcase(722) + def test_smart_stats(self): + self.smart('stats') + +class SmartRepoTest(SmartTest): + + @classmethod + def setUpClass(self): + self.repo_server = HTTPService(oeRuntimeTest.tc.d.getVar('DEPLOY_DIR', True), oeRuntimeTest.tc.target.server_ip) + self.repo_server.start() + + @classmethod + def tearDownClass(self): + self.repo_server.stop() + + def test_smart_channel(self): + self.smart('channel', 1) + + @testcase(719) + def test_smart_channel_add(self): + image_pkgtype = self.tc.d.getVar('IMAGE_PKGTYPE', True) + deploy_url = 'http://%s:%s/%s' %(self.target.server_ip, self.repo_server.port, image_pkgtype) + pkgarchs = self.tc.d.getVar('PACKAGE_ARCHS', True).replace("-","_").split() + for arch in os.listdir('%s/%s' % (self.repo_server.root_dir, image_pkgtype)): + if arch in pkgarchs: + self.smart('channel -y --add {a} type=rpm-md baseurl={u}/{a}'.format(a=arch, u=deploy_url)) + self.smart('update') + + def test_smart_channel_help(self): + self.smart('channel --help') + + def test_smart_channel_list(self): + self.smart('channel --list') + + def test_smart_channel_show(self): + self.smart('channel --show') + + @testcase(717) + def test_smart_channel_rpmsys(self): + self.smart('channel --show rpmsys') + self.smart('channel --disable rpmsys') + self.smart('channel --enable rpmsys') + + @skipUnlessPassed('test_smart_channel_add') + def test_smart_install(self): + self.smart('remove -y psplash-default') + self.smart('install -y psplash-default') + + @testcase(728) + @skipUnlessPassed('test_smart_install') + def test_smart_install_dependency(self): + self.smart('remove -y psplash') + self.smart('install -y psplash-default') + + @testcase(723) + @skipUnlessPassed('test_smart_channel_add') + def test_smart_install_from_disk(self): + self.smart('remove -y psplash-default') + self.smart('download psplash-default') + self.smart('install -y ./psplash-default*') + + @testcase(725) + @skipUnlessPassed('test_smart_channel_add') + def test_smart_install_from_http(self): + output = self.smart('download --urls psplash-default') + url = re.search('(http://.*/psplash-default.*\.rpm)', output) + self.assertTrue(url, msg="Couln't find download url in %s" % output) + self.smart('remove -y psplash-default') + self.smart('install -y %s' % url.group(0)) + + @testcase(729) + @skipUnlessPassed('test_smart_install') + def test_smart_reinstall(self): + self.smart('reinstall -y psplash-default') diff --git a/meta/lib/oeqa/runtime/ssh.py b/meta/lib/oeqa/runtime/ssh.py new file mode 100644 index 0000000000..0e76d5d512 --- /dev/null +++ b/meta/lib/oeqa/runtime/ssh.py @@ -0,0 +1,19 @@ +import subprocess +import unittest +import sys +from oeqa.oetest import oeRuntimeTest, skipModule +from oeqa.utils.decorators import * + +def setUpModule(): + if not (oeRuntimeTest.hasPackage("dropbear") or oeRuntimeTest.hasPackage("openssh")): + skipModule("No ssh package in image") + +class SshTest(oeRuntimeTest): + + @testcase(224) + @skipUnlessPassed('test_ping') + def test_ssh(self): + (status, output) = self.target.run('uname -a') + self.assertEqual(status, 0, msg="SSH Test failed: %s" % output) + (status, output) = self.target.run('cat /etc/masterimage') + self.assertEqual(status, 1, msg="This isn't the right image - /etc/masterimage shouldn't be here %s" % output) diff --git a/meta/lib/oeqa/runtime/syslog.py b/meta/lib/oeqa/runtime/syslog.py new file mode 100644 index 0000000000..7fa018e97f --- /dev/null +++ b/meta/lib/oeqa/runtime/syslog.py @@ -0,0 +1,48 @@ +import unittest +from oeqa.oetest import oeRuntimeTest, skipModule +from oeqa.utils.decorators import * + +def setUpModule(): + if not oeRuntimeTest.hasPackage("syslog"): + skipModule("No syslog package in image") + +class SyslogTest(oeRuntimeTest): + + @skipUnlessPassed("test_ssh") + def test_syslog_help(self): + (status,output) = self.target.run('/sbin/syslogd --help') + self.assertEqual(status, 0, msg="status and output: %s and %s" % (status,output)) + + @testcase(201) + @skipUnlessPassed("test_syslog_help") + def test_syslog_running(self): + (status,output) = self.target.run(oeRuntimeTest.pscmd + ' | grep -i [s]yslogd') + self.assertEqual(status, 0, msg="no syslogd process, ps output: %s" % self.target.run(oeRuntimeTest.pscmd)[1]) + + +class SyslogTestConfig(oeRuntimeTest): + + @skipUnlessPassed("test_syslog_running") + def test_syslog_logger(self): + (status,output) = self.target.run('logger foobar && test -e /var/log/messages && grep foobar /var/log/messages || logread | grep foobar') + self.assertEqual(status, 0, msg="Test log string not found in /var/log/messages. Output: %s " % output) + + @skipUnlessPassed("test_syslog_running") + def test_syslog_restart(self): + if "systemd" != oeRuntimeTest.tc.d.getVar("VIRTUAL-RUNTIME_init_manager"): + (status,output) = self.target.run('/etc/init.d/syslog restart') + else: + (status,output) = self.target.run('systemctl restart syslog.service') + + @testcase(202) + @skipUnlessPassed("test_syslog_restart") + @skipUnlessPassed("test_syslog_logger") + @unittest.skipIf("systemd" == oeRuntimeTest.tc.d.getVar("VIRTUAL-RUNTIME_init_manager"), "Not appropiate for systemd image") + def test_syslog_startup_config(self): + self.target.run('echo "LOGFILE=/var/log/test" >> /etc/syslog-startup.conf') + (status,output) = self.target.run('/etc/init.d/syslog restart') + self.assertEqual(status, 0, msg="Could not restart syslog service. Status and output: %s and %s" % (status,output)) + (status,output) = self.target.run('logger foobar && grep foobar /var/log/test') + self.assertEqual(status, 0, msg="Test log string not found. Output: %s " % output) + self.target.run("sed -i 's#LOGFILE=/var/log/test##' /etc/syslog-startup.conf") + self.target.run('/etc/init.d/syslog restart') diff --git a/meta/lib/oeqa/runtime/systemd.py b/meta/lib/oeqa/runtime/systemd.py new file mode 100644 index 0000000000..1451698bb3 --- /dev/null +++ b/meta/lib/oeqa/runtime/systemd.py @@ -0,0 +1,88 @@ +import unittest +import re +from oeqa.oetest import oeRuntimeTest, skipModule +from oeqa.utils.decorators import * + +def setUpModule(): + if not oeRuntimeTest.hasFeature("systemd"): + skipModule("target doesn't have systemd in DISTRO_FEATURES") + if "systemd" != oeRuntimeTest.tc.d.getVar("VIRTUAL-RUNTIME_init_manager", True): + skipModule("systemd is not the init manager for this image") + + +class SystemdTest(oeRuntimeTest): + + def systemctl(self, action = '', target = '', expected = 0, verbose = False): + command = 'systemctl %s %s' % (action, target) + status, output = self.target.run(command) + message = '\n'.join([command, output]) + if status != expected and verbose: + message += self.target.run('systemctl status --full %s' % target)[1] + self.assertEqual(status, expected, message) + return output + + +class SystemdBasicTests(SystemdTest): + + @skipUnlessPassed('test_ssh') + def test_systemd_basic(self): + self.systemctl('--version') + + @testcase(551) + @skipUnlessPassed('test_system_basic') + def test_systemd_list(self): + self.systemctl('list-unit-files') + + def settle(self): + """ + Block until systemd has finished activating any units being activated, + or until two minutes has elapsed. + + Returns a tuple, either (True, '') if all units have finished + activating, or (False, message string) if there are still units + activating (generally, failing units that restart). + """ + import time + endtime = time.time() + (60 * 2) + while True: + status, output = self.target.run('systemctl --state=activating') + if "0 loaded units listed" in output: + return (True, '') + if time.time() >= endtime: + return (False, output) + time.sleep(10) + + @testcase(550) + @skipUnlessPassed('test_systemd_basic') + def test_systemd_failed(self): + settled, output = self.settle() + self.assertTrue(settled, msg="Timed out waiting for systemd to settle:\n" + output) + + output = self.systemctl('list-units', '--failed') + match = re.search("0 loaded units listed", output) + if not match: + output += self.systemctl('status --full --failed') + self.assertTrue(match, msg="Some systemd units failed:\n%s" % output) + + +class SystemdServiceTests(SystemdTest): + + @skipUnlessPassed('test_systemd_basic') + def test_systemd_status(self): + self.systemctl('status --full', 'avahi-daemon.service') + + @testcase(695) + @skipUnlessPassed('test_systemd_status') + def test_systemd_stop_start(self): + self.systemctl('stop', 'avahi-daemon.service') + self.systemctl('is-active', 'avahi-daemon.service', expected=3, verbose=True) + self.systemctl('start','avahi-daemon.service') + self.systemctl('is-active', 'avahi-daemon.service', verbose=True) + + @testcase(696) + @skipUnlessPassed('test_systemd_basic') + def test_systemd_disable_enable(self): + self.systemctl('disable', 'avahi-daemon.service') + self.systemctl('is-enabled', 'avahi-daemon.service', expected=1) + self.systemctl('enable', 'avahi-daemon.service') + self.systemctl('is-enabled', 'avahi-daemon.service') diff --git a/meta/lib/oeqa/runtime/vnc.py b/meta/lib/oeqa/runtime/vnc.py new file mode 100644 index 0000000000..f31deff306 --- /dev/null +++ b/meta/lib/oeqa/runtime/vnc.py @@ -0,0 +1,20 @@ +from oeqa.oetest import oeRuntimeTest, skipModuleUnless +from oeqa.utils.decorators import * +import re + +def setUpModule(): + skipModuleUnless(oeRuntimeTest.hasPackage('x11vnc'), "No x11vnc package in image") + +class VNCTest(oeRuntimeTest): + + @testcase(213) + @skipUnlessPassed('test_ssh') + def test_vnc(self): + (status, output) = self.target.run('x11vnc -display :0 -bg -o x11vnc.log') + self.assertEqual(status, 0, msg="x11vnc server failed to start: %s" % output) + port = re.search('PORT=[0-9]*', output) + self.assertTrue(port, msg="Listening port not specified in command output: %s" %output) + + vncport = port.group(0).split('=')[1] + (status, output) = self.target.run('netstat -ntl | grep ":%s"' % vncport) + self.assertEqual(status, 0, msg="x11vnc server not running on port %s\n\n%s" % (vncport, self.target.run('netstat -ntl; cat x11vnc.log')[1])) diff --git a/meta/lib/oeqa/runtime/x32lib.py b/meta/lib/oeqa/runtime/x32lib.py new file mode 100644 index 0000000000..ce5e214035 --- /dev/null +++ b/meta/lib/oeqa/runtime/x32lib.py @@ -0,0 +1,18 @@ +import unittest +from oeqa.oetest import oeRuntimeTest, skipModule +from oeqa.utils.decorators import * + +def setUpModule(): + #check if DEFAULTTUNE is set and it's value is: x86-64-x32 + defaulttune = oeRuntimeTest.tc.d.getVar("DEFAULTTUNE", True) + if "x86-64-x32" not in defaulttune: + skipModule("DEFAULTTUNE is not set to x86-64-x32") + +class X32libTest(oeRuntimeTest): + + @testcase(281) + @skipUnlessPassed("test_ssh") + def test_x32_file(self): + status1 = self.target.run("readelf -h /bin/ls | grep Class | grep ELF32")[0] + status2 = self.target.run("readelf -h /bin/ls | grep Machine | grep X86-64")[0] + self.assertTrue(status1 == 0 and status2 == 0, msg="/bin/ls isn't an X86-64 ELF32 binary. readelf says: %s" % self.target.run("readelf -h /bin/ls")[1]) diff --git a/meta/lib/oeqa/runtime/xorg.py b/meta/lib/oeqa/runtime/xorg.py new file mode 100644 index 0000000000..a07031e5c8 --- /dev/null +++ b/meta/lib/oeqa/runtime/xorg.py @@ -0,0 +1,17 @@ +import unittest +from oeqa.oetest import oeRuntimeTest, skipModule +from oeqa.utils.decorators import * + +def setUpModule(): + if not oeRuntimeTest.hasFeature("x11-base"): + skipModule("target doesn't have x11 in IMAGE_FEATURES") + + +class XorgTest(oeRuntimeTest): + + @skipUnlessPassed('test_ssh') + def test_xorg_running(self): + (status, output) = self.target.run(oeRuntimeTest.pscmd + ' | grep -v xinit | grep [X]org') + self.assertEqual(status, 0, msg="Xorg does not appear to be running %s" % self.target.run(oeRuntimeTest.pscmd)[1]) + + diff --git a/meta/lib/oeqa/sdk/__init__.py b/meta/lib/oeqa/sdk/__init__.py new file mode 100644 index 0000000000..4cf3fa76b6 --- /dev/null +++ b/meta/lib/oeqa/sdk/__init__.py @@ -0,0 +1,3 @@ +# Enable other layers to have tests in the same named directory +from pkgutil import extend_path +__path__ = extend_path(__path__, __name__) diff --git a/meta/lib/oeqa/sdk/buildcvs.py b/meta/lib/oeqa/sdk/buildcvs.py new file mode 100644 index 0000000000..c7146fa4af --- /dev/null +++ b/meta/lib/oeqa/sdk/buildcvs.py @@ -0,0 +1,25 @@ +from oeqa.oetest import oeSDKTest, skipModule +from oeqa.utils.decorators import * +from oeqa.utils.targetbuild import SDKBuildProject + +class BuildCvsTest(oeSDKTest): + + @classmethod + def setUpClass(self): + self.project = SDKBuildProject(oeSDKTest.tc.sdktestdir + "/cvs/", oeSDKTest.tc.sdkenv, oeSDKTest.tc.d, + "http://ftp.gnu.org/non-gnu/cvs/source/feature/1.12.13/cvs-1.12.13.tar.bz2") + self.project.download_archive() + + def test_cvs(self): + self.assertEqual(self.project.run_configure(), 0, + msg="Running configure failed") + + self.assertEqual(self.project.run_make(), 0, + msg="Running make failed") + + self.assertEqual(self.project.run_install(), 0, + msg="Running make install failed") + + @classmethod + def tearDownClass(self): + self.project.clean() diff --git a/meta/lib/oeqa/sdk/buildiptables.py b/meta/lib/oeqa/sdk/buildiptables.py new file mode 100644 index 0000000000..062e5316e7 --- /dev/null +++ b/meta/lib/oeqa/sdk/buildiptables.py @@ -0,0 +1,26 @@ +from oeqa.oetest import oeSDKTest +from oeqa.utils.decorators import * +from oeqa.utils.targetbuild import SDKBuildProject + + +class BuildIptablesTest(oeSDKTest): + + @classmethod + def setUpClass(self): + self.project = SDKBuildProject(oeSDKTest.tc.sdktestdir + "/iptables/", oeSDKTest.tc.sdkenv, oeSDKTest.tc.d, + "http://netfilter.org/projects/iptables/files/iptables-1.4.13.tar.bz2") + self.project.download_archive() + + def test_iptables(self): + self.assertEqual(self.project.run_configure(), 0, + msg="Running configure failed") + + self.assertEqual(self.project.run_make(), 0, + msg="Running make failed") + + self.assertEqual(self.project.run_install(), 0, + msg="Running make install failed") + + @classmethod + def tearDownClass(self): + self.project.clean() diff --git a/meta/lib/oeqa/sdk/buildsudoku.py b/meta/lib/oeqa/sdk/buildsudoku.py new file mode 100644 index 0000000000..dea77c6599 --- /dev/null +++ b/meta/lib/oeqa/sdk/buildsudoku.py @@ -0,0 +1,26 @@ +from oeqa.oetest import oeSDKTest, skipModule +from oeqa.utils.decorators import * +from oeqa.utils.targetbuild import SDKBuildProject + +def setUpModule(): + if not oeSDKTest.hasPackage("gtk\+"): + skipModule("Image doesn't have gtk+ in manifest") + +class SudokuTest(oeSDKTest): + + @classmethod + def setUpClass(self): + self.project = SDKBuildProject(oeSDKTest.tc.sdktestdir + "/sudoku/", oeSDKTest.tc.sdkenv, oeSDKTest.tc.d, + "http://downloads.sourceforge.net/project/sudoku-savant/sudoku-savant/sudoku-savant-1.3/sudoku-savant-1.3.tar.bz2") + self.project.download_archive() + + def test_sudoku(self): + self.assertEqual(self.project.run_configure(), 0, + msg="Running configure failed") + + self.assertEqual(self.project.run_make(), 0, + msg="Running make failed") + + @classmethod + def tearDownClass(self): + self.project.clean() diff --git a/meta/lib/oeqa/selftest/__init__.py b/meta/lib/oeqa/selftest/__init__.py new file mode 100644 index 0000000000..3ad9513f40 --- /dev/null +++ b/meta/lib/oeqa/selftest/__init__.py @@ -0,0 +1,2 @@ +from pkgutil import extend_path +__path__ = extend_path(__path__, __name__) diff --git a/meta/lib/oeqa/selftest/_sstatetests_noauto.py b/meta/lib/oeqa/selftest/_sstatetests_noauto.py new file mode 100644 index 0000000000..fc9ae7efb9 --- /dev/null +++ b/meta/lib/oeqa/selftest/_sstatetests_noauto.py @@ -0,0 +1,95 @@ +import datetime +import unittest +import os +import re +import shutil + +import oeqa.utils.ftools as ftools +from oeqa.selftest.base import oeSelfTest +from oeqa.utils.commands import runCmd, bitbake, get_bb_var, get_test_layer +from oeqa.selftest.sstate import SStateBase + + +class RebuildFromSState(SStateBase): + + @classmethod + def setUpClass(self): + self.builddir = os.path.join(os.environ.get('BUILDDIR')) + + def get_dep_targets(self, primary_targets): + found_targets = [] + bitbake("-g " + ' '.join(map(str, primary_targets))) + with open(os.path.join(self.builddir, 'pn-buildlist'), 'r') as pnfile: + found_targets = pnfile.read().splitlines() + return found_targets + + def configure_builddir(self, builddir): + os.mkdir(builddir) + self.track_for_cleanup(builddir) + os.mkdir(os.path.join(builddir, 'conf')) + shutil.copyfile(os.path.join(os.environ.get('BUILDDIR'), 'conf/local.conf'), os.path.join(builddir, 'conf/local.conf')) + config = {} + config['default_sstate_dir'] = "SSTATE_DIR ?= \"${TOPDIR}/sstate-cache\"" + config['null_sstate_mirrors'] = "SSTATE_MIRRORS = \"\"" + config['default_tmp_dir'] = "TMPDIR = \"${TOPDIR}/tmp\"" + for key in config: + ftools.append_file(os.path.join(builddir, 'conf/selftest.inc'), config[key]) + shutil.copyfile(os.path.join(os.environ.get('BUILDDIR'), 'conf/bblayers.conf'), os.path.join(builddir, 'conf/bblayers.conf')) + try: + shutil.copyfile(os.path.join(os.environ.get('BUILDDIR'), 'conf/auto.conf'), os.path.join(builddir, 'conf/auto.conf')) + except: + pass + + def hardlink_tree(self, src, dst): + os.mkdir(dst) + self.track_for_cleanup(dst) + for root, dirs, files in os.walk(src): + if root == src: + continue + os.mkdir(os.path.join(dst, root.split(src)[1][1:])) + for sstate_file in files: + os.link(os.path.join(root, sstate_file), os.path.join(dst, root.split(src)[1][1:], sstate_file)) + + def run_test_sstate_rebuild(self, primary_targets, relocate=False, rebuild_dependencies=False): + buildA = os.path.join(self.builddir, 'buildA') + if relocate: + buildB = os.path.join(self.builddir, 'buildB') + else: + buildB = buildA + + if rebuild_dependencies: + rebuild_targets = self.get_dep_targets(primary_targets) + else: + rebuild_targets = primary_targets + + self.configure_builddir(buildA) + runCmd((". %s/oe-init-build-env %s && " % (get_bb_var('COREBASE'), buildA)) + 'bitbake ' + ' '.join(map(str, primary_targets)), shell=True, executable='/bin/bash') + self.hardlink_tree(os.path.join(buildA, 'sstate-cache'), os.path.join(self.builddir, 'sstate-cache-buildA')) + shutil.rmtree(buildA) + + failed_rebuild = [] + failed_cleansstate = [] + for target in rebuild_targets: + self.configure_builddir(buildB) + self.hardlink_tree(os.path.join(self.builddir, 'sstate-cache-buildA'), os.path.join(buildB, 'sstate-cache')) + + result_cleansstate = runCmd((". %s/oe-init-build-env %s && " % (get_bb_var('COREBASE'), buildB)) + 'bitbake -ccleansstate ' + target, ignore_status=True, shell=True, executable='/bin/bash') + if not result_cleansstate.status == 0: + failed_cleansstate.append(target) + shutil.rmtree(buildB) + continue + + result_build = runCmd((". %s/oe-init-build-env %s && " % (get_bb_var('COREBASE'), buildB)) + 'bitbake ' + target, ignore_status=True, shell=True, executable='/bin/bash') + if not result_build.status == 0: + failed_rebuild.append(target) + + shutil.rmtree(buildB) + + self.assertFalse(failed_rebuild, msg="The following recipes have failed to rebuild: %s" % ' '.join(map(str, failed_rebuild))) + self.assertFalse(failed_cleansstate, msg="The following recipes have failed cleansstate(all others have passed both cleansstate and rebuild from sstate tests): %s" % ' '.join(map(str, failed_cleansstate))) + + def test_sstate_relocation(self): + self.run_test_sstate_rebuild(['core-image-sato-sdk'], relocate=True, rebuild_dependencies=True) + + def test_sstate_rebuild(self): + self.run_test_sstate_rebuild(['core-image-sato-sdk'], relocate=False, rebuild_dependencies=True) diff --git a/meta/lib/oeqa/selftest/_toaster.py b/meta/lib/oeqa/selftest/_toaster.py new file mode 100644 index 0000000000..1cf28a0144 --- /dev/null +++ b/meta/lib/oeqa/selftest/_toaster.py @@ -0,0 +1,445 @@ +import unittest +import os +import sys +import shlex, subprocess +import urllib, commands, time, getpass, re, json, shlex + +import oeqa.utils.ftools as ftools +from oeqa.selftest.base import oeSelfTest +from oeqa.utils.commands import runCmd + +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../../../', 'bitbake/lib/toaster'))) +os.environ.setdefault("DJANGO_SETTINGS_MODULE", "toastermain.settings") + +import toastermain.settings +from django.db.models import Q +from orm.models import * +from oeqa.utils.decorators import testcase + +class ToasterSetup(oeSelfTest): + + def recipe_parse(self, file_path, var): + for line in open(file_path,'r'): + if line.find(var) > -1: + val = line.split(" = ")[1].replace("\"", "").strip() + return val + + def fix_file_path(self, file_path): + if ":" in file_path: + file_path=file_path.split(":")[2] + return file_path + +class Toaster_DB_Tests(ToasterSetup): + + # Check if build name is unique - tc_id=795 + @testcase(795) + def test_Build_Unique_Name(self): + all_builds = Build.objects.all().count() + distinct_builds = Build.objects.values('id').distinct().count() + self.assertEqual(distinct_builds, all_builds, msg = 'Build name is not unique') + + # Check if build coocker log path is unique - tc_id=819 + @testcase(819) + def test_Build_Unique_Cooker_Log_Path(self): + distinct_path = Build.objects.values('cooker_log_path').distinct().count() + total_builds = Build.objects.values('id').count() + self.assertEqual(distinct_path, total_builds, msg = 'Build coocker log path is not unique') + + # Check if the number of errors matches the number of orm_logmessage.level entries with value 2 - tc_id=820 + @testcase(820) + def test_Build_Errors_No(self): + builds = Build.objects.values('id', 'errors_no') + cnt_err = [] + for build in builds: + log_mess_err_no = LogMessage.objects.filter(build = build['id'], level = 2).count() + if (build['errors_no'] != log_mess_err_no): + cnt_err.append(build['id']) + self.assertEqual(len(cnt_err), 0, msg = 'Errors for build id: %s' % cnt_err) + + # Check if the number of warnings matches the number of orm_logmessage.level entries with value 1 - tc=821 + @testcase(821) + def test_Build_Warnings_No(self): + builds = Build.objects.values('id', 'warnings_no') + cnt_err = [] + for build in builds: + log_mess_warn_no = LogMessage.objects.filter(build = build['id'], level = 1).count() + if (build['warnings_no'] != log_mess_warn_no): + cnt_err.append(build['id']) + self.assertEqual(len(cnt_err), 0, msg = 'Errors for build id: %s' % cnt_err) + + # Check if the build succeeded then the errors_no is 0 - tc_id=822 + @testcase(822) + def test_Build_Suceeded_Errors_No(self): + builds = Build.objects.filter(outcome = 0).values('id', 'errors_no') + cnt_err = [] + for build in builds: + if (build['errors_no'] != 0): + cnt_err.append(build['id']) + self.assertEqual(len(cnt_err), 0, msg = 'Errors for build id: %s' % cnt_err) + + # Check if task order is unique for one build - tc=824 + @testcase(824) + def test_Task_Unique_Order(self): + builds = Build.objects.values('id') + cnt_err = [] + for build in builds: + total_task_order = Task.objects.filter(build = build['id']).values('order').count() + distinct_task_order = Task.objects.filter(build = build['id']).values('order').distinct().count() + if (total_task_order != distinct_task_order): + cnt_err.append(build['id']) + self.assertEqual(len(cnt_err), 0, msg = 'Errors for build id: %s' % cnt_err) + + # Check task order sequence for one build - tc=825 + @testcase(825) + def test_Task_Order_Sequence(self): + builds = builds = Build.objects.values('id') + cnt_err = [] + for build in builds: + tasks = Task.objects.filter(Q(build = build['id']), ~Q(order = None), ~Q(task_name__contains = '_setscene')).values('id', 'order').order_by("order") + cnt_tasks = 0 + for task in tasks: + cnt_tasks += 1 + if (task['order'] != cnt_tasks): + cnt_err.append(task['id']) + self.assertEqual(len(cnt_err), 0, msg = 'Errors for task id: %s' % cnt_err) + + # Check if disk_io matches the difference between EndTimeIO and StartTimeIO in build stats - tc=828 + ### this needs to be updated ### + #def test_Task_Disk_IO_TC828(self): + + # Check if outcome = 2 (SSTATE) then sstate_result must be 3 (RESTORED) - tc=832 + @testcase(832) + def test_Task_If_Outcome_2_Sstate_Result_Must_Be_3(self): + tasks = Task.objects.filter(outcome = 2).values('id', 'sstate_result') + cnt_err = [] + for task in tasks: + if (row['sstate_result'] != 3): + cnt_err.append(task['id']) + self.assertEqual(len(cnt_err), 0, msg = 'Errors for task id: %s' % cnt_err) + + # Check if outcome = 1 (COVERED) or 3 (EXISTING) then sstate_result must be 0 (SSTATE_NA) - tc=833 + @testcase(833) + def test_Task_If_Outcome_1_3_Sstate_Result_Must_Be_0(self): + tasks = Task.objects.filter(outcome__in = (1, 3)).values('id', 'sstate_result') + cnt_err = [] + for task in tasks: + if (task['sstate_result'] != 0): + cnt_err.append(task['id']) + self.assertEqual(len(cnt_err), 0, msg = 'Errors for task id: %s' % cnt_err) + + # Check if outcome is 0 (SUCCESS) or 4 (FAILED) then sstate_result must be 0 (NA), 1 (MISS) or 2 (FAILED) - tc=834 + @testcase(834) + def test_Task_If_Outcome_0_4_Sstate_Result_Must_Be_0_1_2(self): + tasks = Task.objects.filter(outcome__in = (0, 4)).values('id', 'sstate_result') + cnt_err = [] + for task in tasks: + if (task['sstate_result'] not in [0, 1, 2]): + cnt_err.append(task['id']) + self.assertEqual(len(cnt_err), 0, msg = 'Errors for task id: %s' % cnt_err) + + # Check if task_executed = TRUE (1), script_type must be 0 (CODING_NA), 2 (CODING_PYTHON), 3 (CODING_SHELL) - tc=891 + @testcase(891) + def test_Task_If_Task_Executed_True_Script_Type_0_2_3(self): + tasks = Task.objects.filter(task_executed = 1).values('id', 'script_type') + cnt_err = [] + for task in tasks: + if (task['script_type'] not in [0, 2, 3]): + cnt_err.append(task['id']) + self.assertEqual(len(cnt_err), 0, msg = 'Errors for task id: %s' % cnt_err) + + # Check if task_executed = TRUE (1), outcome must be 0 (SUCCESS) or 4 (FAILED) - tc=836 + @testcase(836) + def test_Task_If_Task_Executed_True_Outcome_0_4(self): + tasks = Task.objects.filter(task_executed = 1).values('id', 'outcome') + cnt_err = [] + for task in tasks: + if (task['outcome'] not in [0, 4]): + cnt_err.append(task['id']) + self.assertEqual(len(cnt_err), 0, msg = 'Errors for task id: %s' % cnt_err) + + # Check if task_executed = FALSE (0), script_type must be 0 - tc=890 + @testcase(890) + def test_Task_If_Task_Executed_False_Script_Type_0(self): + tasks = Task.objects.filter(task_executed = 0).values('id', 'script_type') + cnt_err = [] + for task in tasks: + if (task['script_type'] != 0): + cnt_err.append(task['id']) + self.assertEqual(len(cnt_err), 0, msg = 'Errors for task id: %s' % cnt_err) + + # Check if task_executed = FALSE (0) and build outcome = SUCCEEDED (0), task outcome must be 1 (COVERED), 2 (CACHED), 3 (PREBUILT), 5 (EMPTY) - tc=837 + @testcase(837) + def test_Task_If_Task_Executed_False_Outcome_1_2_3_5(self): + builds = Build.objects.filter(outcome = 0).values('id') + cnt_err = [] + for build in builds: + tasks = Task.objects.filter(build = build['id'], task_executed = 0).values('id', 'outcome') + for task in tasks: + if (task['outcome'] not in [1, 2, 3, 5]): + cnt_err.append(task['id']) + self.assertEqual(len(cnt_err), 0, msg = 'Errors for task id: %s' % cnt_err) + + # Key verification - tc=888 + @testcase(888) + def test_Target_Installed_Package(self): + rows = Target_Installed_Package.objects.values('id', 'target_id', 'package_id') + cnt_err = [] + for row in rows: + target = Target.objects.filter(id = row['target_id']).values('id') + package = Package.objects.filter(id = row['package_id']).values('id') + if (not target or not package): + cnt_err.append(row['id']) + self.assertEqual(len(cnt_err), 0, msg = 'Errors for target installed package id: %s' % cnt_err) + + # Key verification - tc=889 + @testcase(889) + def test_Task_Dependency(self): + rows = Task_Dependency.objects.values('id', 'task_id', 'depends_on_id') + cnt_err = [] + for row in rows: + task_id = Task.objects.filter(id = row['task_id']).values('id') + depends_on_id = Task.objects.filter(id = row['depends_on_id']).values('id') + if (not task_id or not depends_on_id): + cnt_err.append(row['id']) + self.assertEqual(len(cnt_err), 0, msg = 'Errors for task dependency id: %s' % cnt_err) + + # Check if build target file_name is populated only if is_image=true AND orm_build.outcome=0 then if the file exists and its size matches the file_size value + ### Need to add the tc in the test run + @testcase(1037) + def test_Target_File_Name_Populated(self): + builds = Build.objects.filter(outcome = 0).values('id') + for build in builds: + targets = Target.objects.filter(build_id = build['id'], is_image = 1).values('id') + for target in targets: + target_files = Target_Image_File.objects.filter(target_id = target['id']).values('id', 'file_name', 'file_size') + cnt_err = [] + for file_info in target_files: + target_id = file_info['id'] + target_file_name = file_info['file_name'] + target_file_size = file_info['file_size'] + if (not target_file_name or not target_file_size): + cnt_err.append(target_id) + else: + if (not os.path.exists(target_file_name)): + cnt_err.append(target_id) + else: + if (os.path.getsize(target_file_name) != target_file_size): + cnt_err.append(target_id) + self.assertEqual(len(cnt_err), 0, msg = 'Errors for target image file id: %s' % cnt_err) + + # Key verification - tc=884 + @testcase(884) + def test_Package_Dependency(self): + cnt_err = [] + deps = Package_Dependency.objects.values('id', 'package_id', 'depends_on_id') + for dep in deps: + if (dep['package_id'] == dep['depends_on_id']): + cnt_err.append(dep['id']) + self.assertEqual(len(cnt_err), 0, msg = 'Errors for package dependency id: %s' % cnt_err) + + # Check if recipe name does not start with a number (0-9) - tc=838 + @testcase(838) + def test_Recipe_Name(self): + recipes = Recipe.objects.values('id', 'name') + cnt_err = [] + for recipe in recipes: + if (recipe['name'][0].isdigit() is True): + cnt_err.append(recipe['id']) + self.assertEqual(len(cnt_err), 0, msg = 'Errors for recipe id: %s' % cnt_err) + + # Check if recipe section matches the content of the SECTION variable (if set) in file_path - tc=839 + @testcase(839) + def test_Recipe_DB_Section_Match_Recipe_File_Section(self): + recipes = Recipe.objects.values('id', 'section', 'file_path') + cnt_err = [] + for recipe in recipes: + file_path = self.fix_file_path(recipe['file_path']) + file_exists = os.path.isfile(file_path) + if (not file_path or (file_exists is False)): + cnt_err.append(recipe['id']) + else: + file_section = self.recipe_parse(file_path, "SECTION = ") + db_section = recipe['section'] + if file_section: + if (db_section != file_section): + cnt_err.append(recipe['id']) + self.assertEqual(len(cnt_err), 0, msg = 'Errors for recipe id: %s' % cnt_err) + + # Check if recipe license matches the content of the LICENSE variable (if set) in file_path - tc=840 + @testcase(840) + def test_Recipe_DB_License_Match_Recipe_File_License(self): + recipes = Recipe.objects.values('id', 'license', 'file_path') + cnt_err = [] + for recipe in recipes: + file_path = self.fix_file_path(recipe['file_path']) + file_exists = os.path.isfile(file_path) + if (not file_path or (file_exists is False)): + cnt_err.append(recipe['id']) + else: + file_license = self.recipe_parse(file_path, "LICENSE = ") + db_license = recipe['license'] + if file_license: + if (db_license != file_license): + cnt_err.append(recipe['id']) + self.assertEqual(len(cnt_err), 0, msg = 'Errors for recipe id: %s' % cnt_err) + + # Check if recipe homepage matches the content of the HOMEPAGE variable (if set) in file_path - tc=841 + @testcase(841) + def test_Recipe_DB_Homepage_Match_Recipe_File_Homepage(self): + recipes = Recipe.objects.values('id', 'homepage', 'file_path') + cnt_err = [] + for recipe in recipes: + file_path = self.fix_file_path(recipe['file_path']) + file_exists = os.path.isfile(file_path) + if (not file_path or (file_exists is False)): + cnt_err.append(recipe['id']) + else: + file_homepage = self.recipe_parse(file_path, "HOMEPAGE = ") + db_homepage = recipe['homepage'] + if file_homepage: + if (db_homepage != file_homepage): + cnt_err.append(recipe['id']) + self.assertEqual(len(cnt_err), 0, msg = 'Errors for recipe id: %s' % cnt_err) + + # Check if recipe bugtracker matches the content of the BUGTRACKER variable (if set) in file_path - tc=842 + @testcase(842) + def test_Recipe_DB_Bugtracker_Match_Recipe_File_Bugtracker(self): + recipes = Recipe.objects.values('id', 'bugtracker', 'file_path') + cnt_err = [] + for recipe in recipes: + file_path = self.fix_file_path(recipe['file_path']) + file_exists = os.path.isfile(file_path) + if (not file_path or (file_exists is False)): + cnt_err.append(recipe['id']) + else: + file_bugtracker = self.recipe_parse(file_path, "BUGTRACKER = ") + db_bugtracker = recipe['bugtracker'] + if file_bugtracker: + if (db_bugtracker != file_bugtracker): + cnt_err.append(recipe['id']) + self.assertEqual(len(cnt_err), 0, msg = 'Errors for recipe id: %s' % cnt_err) + + # Recipe key verification, recipe name does not depends on a recipe having the same name - tc=883 + @testcase(883) + def test_Recipe_Dependency(self): + deps = Recipe_Dependency.objects.values('id', 'recipe_id', 'depends_on_id') + cnt_err = [] + for dep in deps: + if (not dep['recipe_id'] or not dep['depends_on_id']): + cnt_err.append(dep['id']) + else: + name = Recipe.objects.filter(id = dep['recipe_id']).values('name') + dep_name = Recipe.objects.filter(id = dep['depends_on_id']).values('name') + if (name == dep_name): + cnt_err.append(dep['id']) + self.assertEqual(len(cnt_err), 0, msg = 'Errors for recipe dependency id: %s' % cnt_err) + + # Check if package name does not start with a number (0-9) - tc=846 + @testcase(846) + def test_Package_Name_For_Number(self): + packages = Package.objects.filter(~Q(size = -1)).values('id', 'name') + cnt_err = [] + for package in packages: + if (package['name'][0].isdigit() is True): + cnt_err.append(package['id']) + self.assertEqual(len(cnt_err), 0, msg = 'Errors for package id: %s' % cnt_err) + + # Check if package version starts with a number (0-9) - tc=847 + @testcase(847) + def test_Package_Version_Starts_With_Number(self): + packages = Package.objects.filter(~Q(size = -1)).values('id', 'version') + cnt_err = [] + for package in packages: + if (package['version'][0].isdigit() is False): + cnt_err.append(package['id']) + self.assertEqual(len(cnt_err), 0, msg = 'Errors for package id: %s' % cnt_err) + + # Check if package revision starts with 'r' - tc=848 + @testcase(848) + def test_Package_Revision_Starts_With_r(self): + packages = Package.objects.filter(~Q(size = -1)).values('id', 'revision') + cnt_err = [] + for package in packages: + if (package['revision'][0].startswith("r") is False): + cnt_err.append(package['id']) + self.assertEqual(len(cnt_err), 0, msg = 'Errors for package id: %s' % cnt_err) + + # Check the validity of the package build_id + ### TC must be added in test run + @testcase(1038) + def test_Package_Build_Id(self): + packages = Package.objects.filter(~Q(size = -1)).values('id', 'build_id') + cnt_err = [] + for package in packages: + build_id = Build.objects.filter(id = package['build_id']).values('id') + if (not build_id): + cnt_err.append(package['id']) + self.assertEqual(len(cnt_err), 0, msg = 'Errors for package id: %s' % cnt_err) + + # Check the validity of package recipe_id + ### TC must be added in test run + @testcase(1039) + def test_Package_Recipe_Id(self): + packages = Package.objects.filter(~Q(size = -1)).values('id', 'recipe_id') + cnt_err = [] + for package in packages: + recipe_id = Recipe.objects.filter(id = package['recipe_id']).values('id') + if (not recipe_id): + cnt_err.append(package['id']) + self.assertEqual(len(cnt_err), 0, msg = 'Errors for package id: %s' % cnt_err) + + # Check if package installed_size field is not null + ### TC must be aded in test run + @testcase(1040) + def test_Package_Installed_Size_Not_NULL(self): + packages = Package.objects.filter(installed_size__isnull = True).values('id') + cnt_err = [] + for package in packages: + cnt_err.append(package['id']) + self.assertEqual(len(cnt_err), 0, msg = 'Errors for package id: %s' % cnt_err) + + # Check if all layers requests return exit code is 200 - tc=843 + @testcase(843) + def test_Layers_Requests_Exit_Code(self): + layers = Layer.objects.values('id', 'layer_index_url') + cnt_err = [] + for layer in layers: + resp = urllib.urlopen(layer['layer_index_url']) + if (resp.getcode() != 200): + cnt_err.append(layer['id']) + self.assertEqual(len(cnt_err), 0, msg = 'Errors for layer id: %s' % cnt_err) + + # Check if the output of bitbake-layers show_layers matches the info from database - tc=895 + @testcase(895) + def test_Layers_Show_Layers(self): + layers = Layer.objects.values('id', 'name', 'local_path') + cmd = commands.getoutput('bitbake-layers show_layers') + cnt_err = [] + for layer in layers: + if (layer['name'] or layer['local_path']) not in cmd: + cnt_err.append(layer['id']) + self.assertEqual(len(cnt_err), 0, msg = 'Errors for layer id: %s' % cnt_err) + + # Check if django server starts regardless of the timezone set on the machine - tc=905 + @testcase(905) + def test_Start_Django_Timezone(self): + current_path = os.getcwd() + zonefilelist = [] + ZONEINFOPATH = '/usr/share/zoneinfo/' + os.chdir("../bitbake/lib/toaster/") + cnt_err = 0 + for filename in os.listdir(ZONEINFOPATH): + if os.path.isfile(os.path.join(ZONEINFOPATH, filename)): + zonefilelist.append(filename) + for k in range(len(zonefilelist)): + if k <= 5: + files = zonefilelist[k] + os.system("export TZ="+str(files)+"; python manage.py runserver > /dev/null 2>&1 &") + time.sleep(3) + pid = subprocess.check_output("ps aux | grep '[/u]sr/bin/python manage.py runserver' | awk '{print $2}'", shell = True) + if pid: + os.system("kill -9 "+str(pid)) + else: + cnt_err.append(zonefilelist[k]) + self.assertEqual(cnt_err, 0, msg = 'Errors django server does not start with timezone: %s' % cnt_err) + os.chdir(current_path) diff --git a/meta/lib/oeqa/selftest/base.py b/meta/lib/oeqa/selftest/base.py new file mode 100644 index 0000000000..80b9b4b312 --- /dev/null +++ b/meta/lib/oeqa/selftest/base.py @@ -0,0 +1,131 @@ +# Copyright (c) 2013 Intel Corporation +# +# Released under the MIT license (see COPYING.MIT) + + +# DESCRIPTION +# Base class inherited by test classes in meta/lib/selftest + +import unittest +import os +import sys +import shutil +import logging +import errno + +import oeqa.utils.ftools as ftools +from oeqa.utils.commands import runCmd, bitbake, get_bb_var, get_test_layer +from oeqa.utils.decorators import LogResults + +@LogResults +class oeSelfTest(unittest.TestCase): + + log = logging.getLogger("selftest.base") + longMessage = True + + def __init__(self, methodName="runTest"): + self.builddir = os.environ.get("BUILDDIR") + self.localconf_path = os.path.join(self.builddir, "conf/local.conf") + self.testinc_path = os.path.join(self.builddir, "conf/selftest.inc") + self.testlayer_path = oeSelfTest.testlayer_path + self._extra_tear_down_commands = [] + self._track_for_cleanup = [] + super(oeSelfTest, self).__init__(methodName) + + def setUp(self): + os.chdir(self.builddir) + # we don't know what the previous test left around in config or inc files + # if it failed so we need a fresh start + try: + os.remove(self.testinc_path) + except OSError as e: + if e.errno != errno.ENOENT: + raise + for root, _, files in os.walk(self.testlayer_path): + for f in files: + if f == 'test_recipe.inc': + os.remove(os.path.join(root, f)) + # tests might need their own setup + # but if they overwrite this one they have to call + # super each time, so let's give them an alternative + self.setUpLocal() + + def setUpLocal(self): + pass + + def tearDown(self): + if self._extra_tear_down_commands: + failed_extra_commands = [] + for command in self._extra_tear_down_commands: + result = runCmd(command, ignore_status=True) + if not result.status == 0: + failed_extra_commands.append(command) + if failed_extra_commands: + self.log.warning("tearDown commands have failed: %s" % ', '.join(map(str, failed_extra_commands))) + self.log.debug("Trying to move on.") + self._extra_tear_down_commands = [] + + if self._track_for_cleanup: + for path in self._track_for_cleanup: + if os.path.isdir(path): + shutil.rmtree(path) + if os.path.isfile(path): + os.remove(path) + self._track_for_cleanup = [] + + self.tearDownLocal() + + def tearDownLocal(self): + pass + + # add test specific commands to the tearDown method. + def add_command_to_tearDown(self, command): + self.log.debug("Adding command '%s' to tearDown for this test." % command) + self._extra_tear_down_commands.append(command) + # add test specific files or directories to be removed in the tearDown method + def track_for_cleanup(self, path): + self.log.debug("Adding path '%s' to be cleaned up when test is over" % path) + self._track_for_cleanup.append(path) + + # write to /conf/selftest.inc + def write_config(self, data): + self.log.debug("Writing to: %s\n%s\n" % (self.testinc_path, data)) + ftools.write_file(self.testinc_path, data) + + # append to /conf/selftest.inc + def append_config(self, data): + self.log.debug("Appending to: %s\n%s\n" % (self.testinc_path, data)) + ftools.append_file(self.testinc_path, data) + + # remove data from /conf/selftest.inc + def remove_config(self, data): + self.log.debug("Removing from: %s\n\%s\n" % (self.testinc_path, data)) + ftools.remove_from_file(self.testinc_path, data) + + # write to meta-sefltest/recipes-test//test_recipe.inc + def write_recipeinc(self, recipe, data): + inc_file = os.path.join(self.testlayer_path, 'recipes-test', recipe, 'test_recipe.inc') + self.log.debug("Writing to: %s\n%s\n" % (inc_file, data)) + ftools.write_file(inc_file, data) + + # append data to meta-sefltest/recipes-test//test_recipe.inc + def append_recipeinc(self, recipe, data): + inc_file = os.path.join(self.testlayer_path, 'recipes-test', recipe, 'test_recipe.inc') + self.log.debug("Appending to: %s\n%s\n" % (inc_file, data)) + ftools.append_file(inc_file, data) + + # remove data from meta-sefltest/recipes-test//test_recipe.inc + def remove_recipeinc(self, recipe, data): + inc_file = os.path.join(self.testlayer_path, 'recipes-test', recipe, 'test_recipe.inc') + self.log.debug("Removing from: %s\n%s\n" % (inc_file, data)) + ftools.remove_from_file(inc_file, data) + + # delete meta-sefltest/recipes-test//test_recipe.inc file + def delete_recipeinc(self, recipe): + inc_file = os.path.join(self.testlayer_path, 'recipes-test', recipe, 'test_recipe.inc') + self.log.debug("Deleting file: %s" % inc_file) + try: + os.remove(inc_file) + except OSError as e: + if e.errno != errno.ENOENT: + raise diff --git a/meta/lib/oeqa/selftest/bblayers.py b/meta/lib/oeqa/selftest/bblayers.py new file mode 100644 index 0000000000..1ead8e8671 --- /dev/null +++ b/meta/lib/oeqa/selftest/bblayers.py @@ -0,0 +1,43 @@ +import unittest +import os +import logging +import re +import shutil + +import oeqa.utils.ftools as ftools +from oeqa.selftest.base import oeSelfTest +from oeqa.utils.commands import runCmd +from oeqa.utils.decorators import testcase + +class BitbakeLayers(oeSelfTest): + + @testcase(756) + def test_bitbakelayers_showcrossdepends(self): + result = runCmd('bitbake-layers show-cross-depends') + self.assertTrue('aspell' in result.output) + + @testcase(83) + def test_bitbakelayers_showlayers(self): + result = runCmd('bitbake-layers show_layers') + self.assertTrue('meta-selftest' in result.output) + + @testcase(93) + def test_bitbakelayers_showappends(self): + result = runCmd('bitbake-layers show_appends') + self.assertTrue('xcursor-transparent-theme_0.1.1.bbappend' in result.output, msg='xcursor-transparent-theme_0.1.1.bbappend file was not recognised') + + @testcase(90) + def test_bitbakelayers_showoverlayed(self): + result = runCmd('bitbake-layers show_overlayed') + self.assertTrue('aspell' in result.output, msg='xcursor-transparent-theme_0.1.1.bbappend file was not recognised') + + @testcase(95) + def test_bitbakelayers_flatten(self): + self.assertFalse(os.path.isdir(os.path.join(self.builddir, 'test'))) + result = runCmd('bitbake-layers flatten test') + bb_file = os.path.join(self.builddir, 'test/recipes-graphics/xcursor-transparent-theme/xcursor-transparent-theme_0.1.1.bb') + self.assertTrue(os.path.isfile(bb_file)) + contents = ftools.read_file(bb_file) + find_in_contents = re.search("##### bbappended from meta-selftest #####\n(.*\n)*include test_recipe.inc", contents) + shutil.rmtree(os.path.join(self.builddir, 'test')) + self.assertTrue(find_in_contents) diff --git a/meta/lib/oeqa/selftest/bbtests.py b/meta/lib/oeqa/selftest/bbtests.py new file mode 100644 index 0000000000..68f97bd8e3 --- /dev/null +++ b/meta/lib/oeqa/selftest/bbtests.py @@ -0,0 +1,178 @@ +import unittest +import os +import logging +import re +import shutil + +import oeqa.utils.ftools as ftools +from oeqa.selftest.base import oeSelfTest +from oeqa.utils.commands import runCmd, bitbake, get_bb_var +from oeqa.utils.decorators import testcase + +class BitbakeTests(oeSelfTest): + + @testcase(789) + def test_run_bitbake_from_dir_1(self): + os.chdir(os.path.join(self.builddir, 'conf')) + bitbake('-e') + + @testcase(790) + def test_run_bitbake_from_dir_2(self): + my_env = os.environ.copy() + my_env['BBPATH'] = my_env['BUILDDIR'] + os.chdir(os.path.dirname(os.environ['BUILDDIR'])) + bitbake('-e', env=my_env) + + @testcase(806) + def test_event_handler(self): + self.write_config("INHERIT += \"test_events\"") + result = bitbake('m4-native') + find_build_started = re.search("NOTE: Test for bb\.event\.BuildStarted(\n.*)*NOTE: Preparing runqueue", result.output) + find_build_completed = re.search("Tasks Summary:.*(\n.*)*NOTE: Test for bb\.event\.BuildCompleted", result.output) + self.assertTrue(find_build_started, msg = "Match failed in:\n%s" % result.output) + self.assertTrue(find_build_completed, msg = "Match failed in:\n%s" % result.output) + self.assertFalse('Test for bb.event.InvalidEvent' in result.output) + + @testcase(103) + def test_local_sstate(self): + bitbake('m4-native -ccleansstate') + bitbake('m4-native') + bitbake('m4-native -cclean') + result = bitbake('m4-native') + find_setscene = re.search("m4-native.*do_.*_setscene", result.output) + self.assertTrue(find_setscene) + + @testcase(105) + def test_bitbake_invalid_recipe(self): + result = bitbake('-b asdf', ignore_status=True) + self.assertTrue("ERROR: Unable to find any recipe file matching 'asdf'" in result.output) + + @testcase(107) + def test_bitbake_invalid_target(self): + result = bitbake('asdf', ignore_status=True) + self.assertTrue("ERROR: Nothing PROVIDES 'asdf'" in result.output) + + @testcase(106) + def test_warnings_errors(self): + result = bitbake('-b asdf', ignore_status=True) + find_warnings = re.search("Summary: There w.{2,3}? [1-9][0-9]* WARNING messages* shown", result.output) + find_errors = re.search("Summary: There w.{2,3}? [1-9][0-9]* ERROR messages* shown", result.output) + self.assertTrue(find_warnings, msg="Did not find the mumber of warnings at the end of the build:\n" + result.output) + self.assertTrue(find_errors, msg="Did not find the mumber of errors at the end of the build:\n" + result.output) + + @testcase(108) + def test_invalid_patch(self): + self.write_recipeinc('man', 'SRC_URI += "file://man-1.5h1-make.patch"') + result = bitbake('man -c patch', ignore_status=True) + self.delete_recipeinc('man') + bitbake('-cclean man') + self.assertTrue("ERROR: Function failed: patch_do_patch" in result.output) + + @testcase(163) + def test_force_task(self): + bitbake('m4-native') + result = bitbake('-C compile m4-native') + look_for_tasks = ['do_compile', 'do_install', 'do_populate_sysroot'] + for task in look_for_tasks: + find_task = re.search("m4-native.*%s" % task, result.output) + self.assertTrue(find_task) + + @testcase(167) + def test_bitbake_g(self): + result = bitbake('-g core-image-full-cmdline') + self.assertTrue('NOTE: PN build list saved to \'pn-buildlist\'' in result.output) + self.assertTrue('openssh' in ftools.read_file(os.path.join(self.builddir, 'pn-buildlist'))) + for f in ['pn-buildlist', 'pn-depends.dot', 'package-depends.dot', 'task-depends.dot']: + os.remove(f) + + @testcase(899) + def test_image_manifest(self): + bitbake('core-image-minimal') + deploydir = get_bb_var("DEPLOY_DIR_IMAGE", target="core-image-minimal") + imagename = get_bb_var("IMAGE_LINK_NAME", target="core-image-minimal") + manifest = os.path.join(deploydir, imagename + ".manifest") + self.assertTrue(os.path.islink(manifest), msg="No manifest file created for image") + + @testcase(168) + def test_invalid_recipe_src_uri(self): + data = 'SRC_URI = "file://invalid"' + self.write_recipeinc('man', data) + bitbake('-ccleanall man') + result = bitbake('-c fetch man', ignore_status=True) + bitbake('-ccleanall man') + self.delete_recipeinc('man') + self.assertEqual(result.status, 1, msg='Command succeded when it should have failed') + self.assertTrue('Fetcher failure: Unable to find file file://invalid anywhere. The paths that were searched were:' in result.output) + self.assertTrue('ERROR: Function failed: Fetcher failure for URL: \'file://invalid\'. Unable to fetch URL from any source.' in result.output) + + @testcase(171) + def test_rename_downloaded_file(self): + data = 'SRC_URI_append = ";downloadfilename=test-aspell.tar.gz"' + self.write_recipeinc('aspell', data) + bitbake('-ccleanall aspell') + result = bitbake('-c fetch aspell', ignore_status=True) + self.delete_recipeinc('aspell') + self.assertEqual(result.status, 0) + self.assertTrue(os.path.isfile(os.path.join(get_bb_var("DL_DIR"), 'test-aspell.tar.gz'))) + self.assertTrue(os.path.isfile(os.path.join(get_bb_var("DL_DIR"), 'test-aspell.tar.gz.done'))) + bitbake('-ccleanall aspell') + + @testcase(1028) + def test_environment(self): + self.append_config("TEST_ENV=\"localconf\"") + result = runCmd('bitbake -e | grep TEST_ENV=') + self.assertTrue('localconf' in result.output) + self.remove_config("TEST_ENV=\"localconf\"") + + @testcase(1029) + def test_dry_run(self): + result = runCmd('bitbake -n m4-native') + self.assertEqual(0, result.status) + + @testcase(1030) + def test_just_parse(self): + result = runCmd('bitbake -p') + self.assertEqual(0, result.status) + + @testcase(1031) + def test_version(self): + result = runCmd('bitbake -s | grep wget') + find = re.search("wget *:([0-9a-zA-Z\.\-]+)", result.output) + self.assertTrue(find) + + @testcase(1032) + def test_prefile(self): + preconf = os.path.join(self.builddir, 'conf/prefile.conf') + self.track_for_cleanup(preconf) + ftools.write_file(preconf ,"TEST_PREFILE=\"prefile\"") + result = runCmd('bitbake -r conf/prefile.conf -e | grep TEST_PREFILE=') + self.assertTrue('prefile' in result.output) + self.append_config("TEST_PREFILE=\"localconf\"") + result = runCmd('bitbake -r conf/prefile.conf -e | grep TEST_PREFILE=') + self.assertTrue('localconf' in result.output) + self.remove_config("TEST_PREFILE=\"localconf\"") + + @testcase(1033) + def test_postfile(self): + postconf = os.path.join(self.builddir, 'conf/postfile.conf') + self.track_for_cleanup(postconf) + ftools.write_file(postconf , "TEST_POSTFILE=\"postfile\"") + self.append_config("TEST_POSTFILE=\"localconf\"") + result = runCmd('bitbake -R conf/postfile.conf -e | grep TEST_POSTFILE=') + self.assertTrue('postfile' in result.output) + self.remove_config("TEST_POSTFILE=\"localconf\"") + + @testcase(1034) + def test_checkuri(self): + result = runCmd('bitbake -c checkuri m4') + self.assertEqual(0, result.status) + + @testcase(1035) + def test_continue(self): + self.write_recipeinc('man',"\ndo_fail_task () {\nexit 1 \n}\n\naddtask do_fail_task before do_fetch\n" ) + runCmd('bitbake -c cleanall man xcursor-transparent-theme') + result = runCmd('bitbake man xcursor-transparent-theme -k', ignore_status=True) + errorpos = result.output.find('ERROR: Function failed: do_fail_task') + manver = re.search("NOTE: recipe xcursor-transparent-theme-(.*?): task do_unpack: Started", result.output) + continuepos = result.output.find('NOTE: recipe xcursor-transparent-theme-%s: task do_unpack: Started' % manver.group(1)) + self.assertLess(errorpos,continuepos) diff --git a/meta/lib/oeqa/selftest/buildhistory.py b/meta/lib/oeqa/selftest/buildhistory.py new file mode 100644 index 0000000000..d8cae4664b --- /dev/null +++ b/meta/lib/oeqa/selftest/buildhistory.py @@ -0,0 +1,45 @@ +import unittest +import os +import re +import shutil +import datetime + +import oeqa.utils.ftools as ftools +from oeqa.selftest.base import oeSelfTest +from oeqa.utils.commands import Command, runCmd, bitbake, get_bb_var, get_test_layer + + +class BuildhistoryBase(oeSelfTest): + + def config_buildhistory(self, tmp_bh_location=False): + if (not 'buildhistory' in get_bb_var('USER_CLASSES')) and (not 'buildhistory' in get_bb_var('INHERIT')): + add_buildhistory_config = 'INHERIT += "buildhistory"\nBUILDHISTORY_COMMIT = "1"' + self.append_config(add_buildhistory_config) + + if tmp_bh_location: + # Using a temporary buildhistory location for testing + tmp_bh_dir = os.path.join(self.builddir, "tmp_buildhistory_%s" % datetime.datetime.now().strftime('%Y%m%d%H%M%S')) + buildhistory_dir_config = "BUILDHISTORY_DIR = \"%s\"" % tmp_bh_dir + self.append_config(buildhistory_dir_config) + self.track_for_cleanup(tmp_bh_dir) + + def run_buildhistory_operation(self, target, global_config='', target_config='', change_bh_location=False, expect_error=False, error_regex=''): + if change_bh_location: + tmp_bh_location = True + else: + tmp_bh_location = False + self.config_buildhistory(tmp_bh_location) + + self.append_config(global_config) + self.append_recipeinc(target, target_config) + bitbake("-cclean %s" % target) + result = bitbake(target, ignore_status=True) + self.remove_config(global_config) + self.remove_recipeinc(target, target_config) + + if expect_error: + self.assertEqual(result.status, 1, msg="Error expected for global config '%s' and target config '%s'" % (global_config, target_config)) + search_for_error = re.search(error_regex, result.output) + self.assertTrue(search_for_error, msg="Could not find desired error in output: %s" % error_regex) + else: + self.assertEqual(result.status, 0, msg="Command 'bitbake %s' has failed unexpectedly: %s" % (target, result.output)) diff --git a/meta/lib/oeqa/selftest/buildoptions.py b/meta/lib/oeqa/selftest/buildoptions.py new file mode 100644 index 0000000000..a250cae0e1 --- /dev/null +++ b/meta/lib/oeqa/selftest/buildoptions.py @@ -0,0 +1,120 @@ +import unittest +import os +import logging +import re + +from oeqa.selftest.base import oeSelfTest +from oeqa.selftest.buildhistory import BuildhistoryBase +from oeqa.utils.commands import runCmd, bitbake, get_bb_var +import oeqa.utils.ftools as ftools +from oeqa.utils.decorators import testcase + +class ImageOptionsTests(oeSelfTest): + + @testcase(761) + def test_incremental_image_generation(self): + bitbake("-c cleanall core-image-minimal") + self.write_config('INC_RPM_IMAGE_GEN = "1"') + self.append_config('IMAGE_FEATURES += "ssh-server-openssh"') + bitbake("core-image-minimal") + res = runCmd("grep 'Installing openssh-sshd' %s" % (os.path.join(get_bb_var("WORKDIR", "core-image-minimal"), "temp/log.do_rootfs")), ignore_status=True) + self.remove_config('IMAGE_FEATURES += "ssh-server-openssh"') + self.assertEqual(0, res.status, msg="No match for openssh-sshd in log.do_rootfs") + bitbake("core-image-minimal") + res = runCmd("grep 'Removing openssh-sshd' %s" %(os.path.join(get_bb_var("WORKDIR", "core-image-minimal"), "temp/log.do_rootfs")),ignore_status=True) + self.assertEqual(0, res.status, msg="openssh-sshd was not removed from image") + + @testcase(925) + def test_rm_old_image(self): + bitbake("core-image-minimal") + deploydir = get_bb_var("DEPLOY_DIR_IMAGE", target="core-image-minimal") + imagename = get_bb_var("IMAGE_LINK_NAME", target="core-image-minimal") + deploydir_files = os.listdir(deploydir) + track_original_files = [] + for image_file in deploydir_files: + if imagename in image_file and os.path.islink(os.path.join(deploydir, image_file)): + track_original_files.append(os.path.realpath(os.path.join(deploydir, image_file))) + self.append_config("RM_OLD_IMAGE = \"1\"") + bitbake("-C rootfs core-image-minimal") + deploydir_files = os.listdir(deploydir) + remaining_not_expected = [path for path in track_original_files if os.path.basename(path) in deploydir_files] + self.assertFalse(remaining_not_expected, msg="\nThe following image files ware not removed: %s" % ', '.join(map(str, remaining_not_expected))) + + @testcase(286) + def test_ccache_tool(self): + bitbake("ccache-native") + self.assertTrue(os.path.isfile(os.path.join(get_bb_var('STAGING_BINDIR_NATIVE', 'ccache-native'), "ccache"))) + self.write_config('INHERIT += "ccache"') + bitbake("m4 -c cleansstate") + bitbake("m4 -c compile") + res = runCmd("grep ccache %s" % (os.path.join(get_bb_var("WORKDIR","m4"),"temp/log.do_compile")), ignore_status=True) + self.assertEqual(0, res.status, msg="No match for ccache in m4 log.do_compile") + bitbake("ccache-native -ccleansstate") + + +class DiskMonTest(oeSelfTest): + + @testcase(277) + def test_stoptask_behavior(self): + self.write_config('BB_DISKMON_DIRS = "STOPTASKS,${TMPDIR},100000G,100K"') + res = bitbake("m4", ignore_status = True) + self.assertTrue('ERROR: No new tasks can be executed since the disk space monitor action is "STOPTASKS"!' in res.output) + self.assertEqual(res.status, 1) + self.write_config('BB_DISKMON_DIRS = "ABORT,${TMPDIR},100000G,100K"') + res = bitbake("m4", ignore_status = True) + self.assertTrue('ERROR: Immediately abort since the disk space monitor action is "ABORT"!' in res.output) + self.assertEqual(res.status, 1) + self.write_config('BB_DISKMON_DIRS = "WARN,${TMPDIR},100000G,100K"') + res = bitbake("m4") + self.assertTrue('WARNING: The free space' in res.output) + +class SanityOptionsTest(oeSelfTest): + + @testcase(927) + def test_options_warnqa_errorqa_switch(self): + bitbake("xcursor-transparent-theme -ccleansstate") + + if "packages-list" not in get_bb_var("ERROR_QA"): + self.write_config("ERROR_QA_append = \" packages-list\"") + + self.write_recipeinc('xcursor-transparent-theme', 'PACKAGES += \"${PN}-dbg\"') + res = bitbake("xcursor-transparent-theme", ignore_status=True) + self.delete_recipeinc('xcursor-transparent-theme') + self.assertTrue("ERROR: QA Issue: xcursor-transparent-theme-dbg is listed in PACKAGES multiple times, this leads to packaging errors." in res.output, msg=res.output) + self.assertEqual(res.status, 1) + self.write_recipeinc('xcursor-transparent-theme', 'PACKAGES += \"${PN}-dbg\"') + self.append_config('ERROR_QA_remove = "packages-list"') + self.append_config('WARN_QA_append = " packages-list"') + bitbake("xcursor-transparent-theme -ccleansstate") + res = bitbake("xcursor-transparent-theme") + self.delete_recipeinc('xcursor-transparent-theme') + self.assertTrue("WARNING: QA Issue: xcursor-transparent-theme-dbg is listed in PACKAGES multiple times, this leads to packaging errors." in res.output, msg=res.output) + + @testcase(278) + def test_sanity_userspace_dependency(self): + self.append_config('WARN_QA_append = " unsafe-references-in-binaries unsafe-references-in-scripts"') + bitbake("-ccleansstate gzip nfs-utils") + res = bitbake("gzip nfs-utils") + self.assertTrue("WARNING: QA Issue: gzip" in res.output) + self.assertTrue("WARNING: QA Issue: nfs-utils" in res.output) + +class BuildhistoryTests(BuildhistoryBase): + + @testcase(293) + def test_buildhistory_basic(self): + self.run_buildhistory_operation('xcursor-transparent-theme') + self.assertTrue(os.path.isdir(get_bb_var('BUILDHISTORY_DIR'))) + + @testcase(294) + def test_buildhistory_buildtime_pr_backwards(self): + self.add_command_to_tearDown('cleanup-workdir') + target = 'xcursor-transparent-theme' + error = "ERROR: QA Issue: Package version for package %s went backwards which would break package feeds from (.*-r1 to .*-r0)" % target + self.run_buildhistory_operation(target, target_config="PR = \"r1\"", change_bh_location=True) + self.run_buildhistory_operation(target, target_config="PR = \"r0\"", change_bh_location=False, expect_error=True, error_regex=error) + + + + + + diff --git a/meta/lib/oeqa/selftest/oescripts.py b/meta/lib/oeqa/selftest/oescripts.py new file mode 100644 index 0000000000..31cd50809c --- /dev/null +++ b/meta/lib/oeqa/selftest/oescripts.py @@ -0,0 +1,54 @@ +import datetime +import unittest +import os +import re +import shutil + +import oeqa.utils.ftools as ftools +from oeqa.selftest.base import oeSelfTest +from oeqa.selftest.buildhistory import BuildhistoryBase +from oeqa.utils.commands import Command, runCmd, bitbake, get_bb_var, get_test_layer +from oeqa.utils.decorators import testcase + +class TestScripts(oeSelfTest): + + @testcase(300) + def test_cleanup_workdir(self): + path = os.path.dirname(get_bb_var('WORKDIR', 'gzip')) + old_version_recipe = os.path.join(get_bb_var('COREBASE'), 'meta/recipes-extended/gzip/gzip_1.3.12.bb') + old_version = '1.3.12' + bitbake("-ccleansstate gzip") + bitbake("-ccleansstate -b %s" % old_version_recipe) + if os.path.exists(get_bb_var('WORKDIR', "-b %s" % old_version_recipe)): + shutil.rmtree(get_bb_var('WORKDIR', "-b %s" % old_version_recipe)) + if os.path.exists(get_bb_var('WORKDIR', 'gzip')): + shutil.rmtree(get_bb_var('WORKDIR', 'gzip')) + + if os.path.exists(path): + initial_contents = os.listdir(path) + else: + initial_contents = [] + + bitbake('gzip') + intermediary_contents = os.listdir(path) + bitbake("-b %s" % old_version_recipe) + runCmd('cleanup-workdir') + remaining_contents = os.listdir(path) + + expected_contents = [x for x in intermediary_contents if x not in initial_contents] + remaining_not_expected = [x for x in remaining_contents if x not in expected_contents] + self.assertFalse(remaining_not_expected, msg="Not all necessary content has been deleted from %s: %s" % (path, ', '.join(map(str, remaining_not_expected)))) + expected_not_remaining = [x for x in expected_contents if x not in remaining_contents] + self.assertFalse(expected_not_remaining, msg="The script removed extra contents from %s: %s" % (path, ', '.join(map(str, expected_not_remaining)))) + +class BuildhistoryDiffTests(BuildhistoryBase): + + @testcase(295) + def test_buildhistory_diff(self): + self.add_command_to_tearDown('cleanup-workdir') + target = 'xcursor-transparent-theme' + self.run_buildhistory_operation(target, target_config="PR = \"r1\"", change_bh_location=True) + self.run_buildhistory_operation(target, target_config="PR = \"r0\"", change_bh_location=False, expect_error=True) + result = runCmd("buildhistory-diff -p %s" % get_bb_var('BUILDHISTORY_DIR')) + expected_output = 'PR changed from "r1" to "r0"' + self.assertTrue(expected_output in result.output, msg="Did not find expected output: %s" % result.output) diff --git a/meta/lib/oeqa/selftest/prservice.py b/meta/lib/oeqa/selftest/prservice.py new file mode 100644 index 0000000000..fb6d68d3bf --- /dev/null +++ b/meta/lib/oeqa/selftest/prservice.py @@ -0,0 +1,121 @@ +import unittest +import os +import logging +import re +import shutil +import datetime + +import oeqa.utils.ftools as ftools +from oeqa.selftest.base import oeSelfTest +from oeqa.utils.commands import runCmd, bitbake, get_bb_var +from oeqa.utils.decorators import testcase + +class BitbakePrTests(oeSelfTest): + + def get_pr_version(self, package_name): + pkgdata_dir = get_bb_var('PKGDATA_DIR') + package_data_file = os.path.join(pkgdata_dir, 'runtime', package_name) + package_data = ftools.read_file(package_data_file) + find_pr = re.search("PKGR: r[0-9]+\.([0-9]+)", package_data) + self.assertTrue(find_pr) + return int(find_pr.group(1)) + + def get_task_stamp(self, package_name, recipe_task): + stampdata = get_bb_var('STAMP', target=package_name).split('/') + prefix = stampdata[-1] + package_stamps_path = "/".join(stampdata[:-1]) + stamps = [] + for stamp in os.listdir(package_stamps_path): + find_stamp = re.match("%s\.%s\.([a-z0-9]{32})" % (prefix, recipe_task), stamp) + if find_stamp: + stamps.append(find_stamp.group(1)) + self.assertFalse(len(stamps) == 0, msg="Cound not find stamp for task %s for recipe %s" % (recipe_task, package_name)) + self.assertFalse(len(stamps) > 1, msg="Found multiple %s stamps for the %s recipe in the %s directory." % (recipe_task, package_name, package_stamps_path)) + return str(stamps[0]) + + def increment_package_pr(self, package_name): + inc_data = "do_package_append() {\nbb.build.exec_func('do_test_prserv', d)\n}\ndo_test_prserv() {\necho \"The current date is: %s\"\n}" % datetime.datetime.now() + self.write_recipeinc(package_name, inc_data) + bitbake("-ccleansstate %s" % package_name) + res = bitbake(package_name, ignore_status=True) + self.delete_recipeinc(package_name) + self.assertEqual(res.status, 0, msg=res.output) + self.assertTrue("NOTE: Started PRServer with DBfile" in res.output, msg=res.output) + + def config_pr_tests(self, package_name, package_type='rpm', pr_socket='localhost:0'): + config_package_data = 'PACKAGE_CLASSES = "package_%s"' % package_type + self.write_config(config_package_data) + config_server_data = 'PRSERV_HOST = "%s"' % pr_socket + self.append_config(config_server_data) + + def run_test_pr_service(self, package_name, package_type='rpm', track_task='do_package', pr_socket='localhost:0'): + self.config_pr_tests(package_name, package_type, pr_socket) + + self.increment_package_pr(package_name) + pr_1 = self.get_pr_version(package_name) + stamp_1 = self.get_task_stamp(package_name, track_task) + + self.increment_package_pr(package_name) + pr_2 = self.get_pr_version(package_name) + stamp_2 = self.get_task_stamp(package_name, track_task) + + bitbake("-ccleansstate %s" % package_name) + self.assertTrue(pr_2 - pr_1 == 1) + self.assertTrue(stamp_1 != stamp_2) + + def run_test_pr_export_import(self, package_name, replace_current_db=True): + self.config_pr_tests(package_name) + + self.increment_package_pr(package_name) + pr_1 = self.get_pr_version(package_name) + + exported_db_path = os.path.join(self.builddir, 'export.inc') + export_result = runCmd("bitbake-prserv-tool export %s" % exported_db_path, ignore_status=True) + self.assertEqual(export_result.status, 0, msg="PR Service database export failed: %s" % export_result.output) + + if replace_current_db: + current_db_path = os.path.join(get_bb_var('PERSISTENT_DIR'), 'prserv.sqlite3') + self.assertTrue(os.path.exists(current_db_path), msg="Path to current PR Service database is invalid: %s" % current_db_path) + os.remove(current_db_path) + + import_result = runCmd("bitbake-prserv-tool import %s" % exported_db_path, ignore_status=True) + os.remove(exported_db_path) + self.assertEqual(import_result.status, 0, msg="PR Service database import failed: %s" % import_result.output) + + self.increment_package_pr(package_name) + pr_2 = self.get_pr_version(package_name) + + bitbake("-ccleansstate %s" % package_name) + self.assertTrue(pr_2 - pr_1 == 1) + + @testcase(930) + def test_import_export_replace_db(self): + self.run_test_pr_export_import('m4') + + @testcase(931) + def test_import_export_override_db(self): + self.run_test_pr_export_import('m4', replace_current_db=False) + + @testcase(932) + def test_pr_service_rpm_arch_dep(self): + self.run_test_pr_service('m4', 'rpm', 'do_package') + + @testcase(934) + def test_pr_service_deb_arch_dep(self): + self.run_test_pr_service('m4', 'deb', 'do_package') + + @testcase(933) + def test_pr_service_ipk_arch_dep(self): + self.run_test_pr_service('m4', 'ipk', 'do_package') + + @testcase(935) + def test_pr_service_rpm_arch_indep(self): + self.run_test_pr_service('xcursor-transparent-theme', 'rpm', 'do_package') + + @testcase(937) + def test_pr_service_deb_arch_indep(self): + self.run_test_pr_service('xcursor-transparent-theme', 'deb', 'do_package') + + @testcase(936) + def test_pr_service_ipk_arch_indep(self): + self.run_test_pr_service('xcursor-transparent-theme', 'ipk', 'do_package') diff --git a/meta/lib/oeqa/selftest/sstate.py b/meta/lib/oeqa/selftest/sstate.py new file mode 100644 index 0000000000..5989724432 --- /dev/null +++ b/meta/lib/oeqa/selftest/sstate.py @@ -0,0 +1,53 @@ +import datetime +import unittest +import os +import re +import shutil + +import oeqa.utils.ftools as ftools +from oeqa.selftest.base import oeSelfTest +from oeqa.utils.commands import runCmd, bitbake, get_bb_var, get_test_layer + + +class SStateBase(oeSelfTest): + + def setUpLocal(self): + self.temp_sstate_location = None + self.sstate_path = get_bb_var('SSTATE_DIR') + self.distro = get_bb_var('NATIVELSBSTRING') + self.distro_specific_sstate = os.path.join(self.sstate_path, self.distro) + + # Creates a special sstate configuration with the option to add sstate mirrors + def config_sstate(self, temp_sstate_location=False, add_local_mirrors=[]): + self.temp_sstate_location = temp_sstate_location + + if self.temp_sstate_location: + temp_sstate_path = os.path.join(self.builddir, "temp_sstate_%s" % datetime.datetime.now().strftime('%Y%m%d%H%M%S')) + config_temp_sstate = "SSTATE_DIR = \"%s\"" % temp_sstate_path + self.append_config(config_temp_sstate) + self.track_for_cleanup(temp_sstate_path) + self.sstate_path = get_bb_var('SSTATE_DIR') + self.distro = get_bb_var('NATIVELSBSTRING') + self.distro_specific_sstate = os.path.join(self.sstate_path, self.distro) + + if add_local_mirrors: + config_set_sstate_if_not_set = 'SSTATE_MIRRORS ?= ""' + self.append_config(config_set_sstate_if_not_set) + for local_mirror in add_local_mirrors: + self.assertFalse(os.path.join(local_mirror) == os.path.join(self.sstate_path), msg='Cannot add the current sstate path as a sstate mirror') + config_sstate_mirror = "SSTATE_MIRRORS += \"file://.* file:///%s/PATH\"" % local_mirror + self.append_config(config_sstate_mirror) + + # Returns a list containing sstate files + def search_sstate(self, filename_regex, distro_specific=True, distro_nonspecific=True): + result = [] + for root, dirs, files in os.walk(self.sstate_path): + if distro_specific and re.search("%s/[a-z0-9]{2}$" % self.distro, root): + for f in files: + if re.search(filename_regex, f): + result.append(f) + if distro_nonspecific and re.search("%s/[a-z0-9]{2}$" % self.sstate_path, root): + for f in files: + if re.search(filename_regex, f): + result.append(f) + return result diff --git a/meta/lib/oeqa/selftest/sstatetests.py b/meta/lib/oeqa/selftest/sstatetests.py new file mode 100644 index 0000000000..d578ddd489 --- /dev/null +++ b/meta/lib/oeqa/selftest/sstatetests.py @@ -0,0 +1,204 @@ +import datetime +import unittest +import os +import re +import shutil + +import oeqa.utils.ftools as ftools +from oeqa.selftest.base import oeSelfTest +from oeqa.utils.commands import runCmd, bitbake, get_bb_var, get_test_layer +from oeqa.selftest.sstate import SStateBase +from oeqa.utils.decorators import testcase + +class SStateTests(SStateBase): + + # Test sstate files creation and their location + def run_test_sstate_creation(self, targets, distro_specific=True, distro_nonspecific=True, temp_sstate_location=True, should_pass=True): + self.config_sstate(temp_sstate_location) + + if self.temp_sstate_location: + bitbake(['-cclean'] + targets) + else: + bitbake(['-ccleansstate'] + targets) + + bitbake(targets) + file_tracker = self.search_sstate('|'.join(map(str, targets)), distro_specific, distro_nonspecific) + if should_pass: + self.assertTrue(file_tracker , msg="Could not find sstate files for: %s" % ', '.join(map(str, targets))) + else: + self.assertTrue(not file_tracker , msg="Found sstate files in the wrong place for: %s" % ', '.join(map(str, targets))) + + @testcase(975) + def test_sstate_creation_distro_specific_pass(self): + targetarch = get_bb_var('TUNE_ARCH') + self.run_test_sstate_creation(['binutils-cross-'+ targetarch, 'binutils-native'], distro_specific=True, distro_nonspecific=False, temp_sstate_location=True) + + @testcase(975) + def test_sstate_creation_distro_specific_fail(self): + targetarch = get_bb_var('TUNE_ARCH') + self.run_test_sstate_creation(['binutils-cross-'+ targetarch, 'binutils-native'], distro_specific=False, distro_nonspecific=True, temp_sstate_location=True, should_pass=False) + + @testcase(976) + def test_sstate_creation_distro_nonspecific_pass(self): + self.run_test_sstate_creation(['glibc-initial'], distro_specific=False, distro_nonspecific=True, temp_sstate_location=True) + + @testcase(976) + def test_sstate_creation_distro_nonspecific_fail(self): + self.run_test_sstate_creation(['glibc-initial'], distro_specific=True, distro_nonspecific=False, temp_sstate_location=True, should_pass=False) + + + # Test the sstate files deletion part of the do_cleansstate task + def run_test_cleansstate_task(self, targets, distro_specific=True, distro_nonspecific=True, temp_sstate_location=True): + self.config_sstate(temp_sstate_location) + + bitbake(['-ccleansstate'] + targets) + + bitbake(targets) + tgz_created = self.search_sstate('|'.join(map(str, [s + '.*?\.tgz$' for s in targets])), distro_specific, distro_nonspecific) + self.assertTrue(tgz_created, msg="Could not find sstate .tgz files for: %s" % ', '.join(map(str, targets))) + + siginfo_created = self.search_sstate('|'.join(map(str, [s + '.*?\.siginfo$' for s in targets])), distro_specific, distro_nonspecific) + self.assertTrue(siginfo_created, msg="Could not find sstate .siginfo files for: %s" % ', '.join(map(str, targets))) + + bitbake(['-ccleansstate'] + targets) + tgz_removed = self.search_sstate('|'.join(map(str, [s + '.*?\.tgz$' for s in targets])), distro_specific, distro_nonspecific) + self.assertTrue(not tgz_removed, msg="do_cleansstate didn't remove .tgz sstate files for: %s" % ', '.join(map(str, targets))) + + @testcase(977) + def test_cleansstate_task_distro_specific_nonspecific(self): + targetarch = get_bb_var('TUNE_ARCH') + self.run_test_cleansstate_task(['binutils-cross-' + targetarch, 'binutils-native', 'glibc-initial'], distro_specific=True, distro_nonspecific=True, temp_sstate_location=True) + + @testcase(977) + def test_cleansstate_task_distro_nonspecific(self): + self.run_test_cleansstate_task(['glibc-initial'], distro_specific=False, distro_nonspecific=True, temp_sstate_location=True) + + @testcase(977) + def test_cleansstate_task_distro_specific(self): + targetarch = get_bb_var('TUNE_ARCH') + self.run_test_cleansstate_task(['binutils-cross-'+ targetarch, 'binutils-native', 'glibc-initial'], distro_specific=True, distro_nonspecific=False, temp_sstate_location=True) + + + # Test rebuilding of distro-specific sstate files + def run_test_rebuild_distro_specific_sstate(self, targets, temp_sstate_location=True): + self.config_sstate(temp_sstate_location) + + bitbake(['-ccleansstate'] + targets) + + bitbake(targets) + self.assertTrue(self.search_sstate('|'.join(map(str, [s + '.*?\.tgz$' for s in targets])), distro_specific=False, distro_nonspecific=True) == [], msg="Found distro non-specific sstate for: %s" % ', '.join(map(str, targets))) + file_tracker_1 = self.search_sstate('|'.join(map(str, [s + '.*?\.tgz$' for s in targets])), distro_specific=True, distro_nonspecific=False) + self.assertTrue(len(file_tracker_1) >= len(targets), msg = "Not all sstate files ware created for: %s" % ', '.join(map(str, targets))) + + self.track_for_cleanup(self.distro_specific_sstate + "_old") + shutil.copytree(self.distro_specific_sstate, self.distro_specific_sstate + "_old") + shutil.rmtree(self.distro_specific_sstate) + + bitbake(['-cclean'] + targets) + bitbake(targets) + file_tracker_2 = self.search_sstate('|'.join(map(str, [s + '.*?\.tgz$' for s in targets])), distro_specific=True, distro_nonspecific=False) + self.assertTrue(len(file_tracker_2) >= len(targets), msg = "Not all sstate files ware created for: %s" % ', '.join(map(str, targets))) + + not_recreated = [x for x in file_tracker_1 if x not in file_tracker_2] + self.assertTrue(not_recreated == [], msg="The following sstate files ware not recreated: %s" % ', '.join(map(str, not_recreated))) + + created_once = [x for x in file_tracker_2 if x not in file_tracker_1] + self.assertTrue(created_once == [], msg="The following sstate files ware created only in the second run: %s" % ', '.join(map(str, created_once))) + + @testcase(175) + def test_rebuild_distro_specific_sstate_cross_native_targets(self): + targetarch = get_bb_var('TUNE_ARCH') + self.run_test_rebuild_distro_specific_sstate(['binutils-cross-' + targetarch, 'binutils-native'], temp_sstate_location=True) + + @testcase(175) + def test_rebuild_distro_specific_sstate_cross_target(self): + targetarch = get_bb_var('TUNE_ARCH') + self.run_test_rebuild_distro_specific_sstate(['binutils-cross-' + targetarch], temp_sstate_location=True) + + @testcase(175) + def test_rebuild_distro_specific_sstate_native_target(self): + self.run_test_rebuild_distro_specific_sstate(['binutils-native'], temp_sstate_location=True) + + + # Test the sstate-cache-management script. Each element in the global_config list is used with the corresponding element in the target_config list + # global_config elements are expected to not generate any sstate files that would be removed by sstate-cache-management.sh (such as changing the value of MACHINE) + def run_test_sstate_cache_management_script(self, target, global_config=[''], target_config=[''], ignore_patterns=[]): + self.assertTrue(global_config) + self.assertTrue(target_config) + self.assertTrue(len(global_config) == len(target_config), msg='Lists global_config and target_config should have the same number of elements') + self.config_sstate(temp_sstate_location=True, add_local_mirrors=[self.sstate_path]) + + # If buildhistory is enabled, we need to disable version-going-backwards QA checks for this test. It may report errors otherwise. + if ('buildhistory' in get_bb_var('USER_CLASSES')) or ('buildhistory' in get_bb_var('INHERIT')): + remove_errors_config = 'ERROR_QA_remove = "version-going-backwards"' + self.append_config(remove_errors_config) + + # For not this only checks if random sstate tasks are handled correctly as a group. + # In the future we should add control over what tasks we check for. + + sstate_archs_list = [] + expected_remaining_sstate = [] + for idx in range(len(target_config)): + self.append_config(global_config[idx]) + self.append_recipeinc(target, target_config[idx]) + sstate_arch = get_bb_var('SSTATE_PKGARCH', target) + if not sstate_arch in sstate_archs_list: + sstate_archs_list.append(sstate_arch) + if target_config[idx] == target_config[-1]: + target_sstate_before_build = self.search_sstate(target + '.*?\.tgz$') + bitbake("-cclean %s" % target) + result = bitbake(target, ignore_status=True) + if target_config[idx] == target_config[-1]: + target_sstate_after_build = self.search_sstate(target + '.*?\.tgz$') + expected_remaining_sstate += [x for x in target_sstate_after_build if x not in target_sstate_before_build if not any(pattern in x for pattern in ignore_patterns)] + self.remove_config(global_config[idx]) + self.remove_recipeinc(target, target_config[idx]) + self.assertEqual(result.status, 0) + + runCmd("sstate-cache-management.sh -y --cache-dir=%s --remove-duplicated --extra-archs=%s" % (self.sstate_path, ','.join(map(str, sstate_archs_list)))) + actual_remaining_sstate = [x for x in self.search_sstate(target + '.*?\.tgz$') if not any(pattern in x for pattern in ignore_patterns)] + + actual_not_expected = [x for x in actual_remaining_sstate if x not in expected_remaining_sstate] + self.assertFalse(actual_not_expected, msg="Files should have been removed but ware not: %s" % ', '.join(map(str, actual_not_expected))) + expected_not_actual = [x for x in expected_remaining_sstate if x not in actual_remaining_sstate] + self.assertFalse(expected_not_actual, msg="Extra files ware removed: %s" ', '.join(map(str, expected_not_actual))) + + @testcase(973) + def test_sstate_cache_management_script_using_pr_1(self): + global_config = [] + target_config = [] + global_config.append('') + target_config.append('PR = "0"') + self.run_test_sstate_cache_management_script('m4', global_config, target_config, ignore_patterns=['populate_lic']) + + @testcase(978) + def test_sstate_cache_management_script_using_pr_2(self): + global_config = [] + target_config = [] + global_config.append('') + target_config.append('PR = "0"') + global_config.append('') + target_config.append('PR = "1"') + self.run_test_sstate_cache_management_script('m4', global_config, target_config, ignore_patterns=['populate_lic']) + + @testcase(979) + def test_sstate_cache_management_script_using_pr_3(self): + global_config = [] + target_config = [] + global_config.append('MACHINE = "qemux86-64"') + target_config.append('PR = "0"') + global_config.append(global_config[0]) + target_config.append('PR = "1"') + global_config.append('MACHINE = "qemux86"') + target_config.append('PR = "1"') + self.run_test_sstate_cache_management_script('m4', global_config, target_config, ignore_patterns=['populate_lic']) + + @testcase(974) + def test_sstate_cache_management_script_using_machine(self): + global_config = [] + target_config = [] + global_config.append('MACHINE = "qemux86-64"') + target_config.append('') + global_config.append('MACHINE = "qemux86"') + target_config.append('') + self.run_test_sstate_cache_management_script('m4', global_config, target_config, ignore_patterns=['populate_lic']) diff --git a/meta/lib/oeqa/targetcontrol.py b/meta/lib/oeqa/targetcontrol.py new file mode 100644 index 0000000000..cc582dd1ad --- /dev/null +++ b/meta/lib/oeqa/targetcontrol.py @@ -0,0 +1,199 @@ +# Copyright (C) 2013 Intel Corporation +# +# Released under the MIT license (see COPYING.MIT) + +# This module is used by testimage.bbclass for setting up and controlling a target machine. + +import os +import shutil +import subprocess +import bb +import traceback +import sys +from oeqa.utils.sshcontrol import SSHControl +from oeqa.utils.qemurunner import QemuRunner +from oeqa.controllers.testtargetloader import TestTargetLoader +from abc import ABCMeta, abstractmethod + +def get_target_controller(d): + testtarget = d.getVar("TEST_TARGET", True) + # old, simple names + if testtarget == "qemu": + return QemuTarget(d) + elif testtarget == "simpleremote": + return SimpleRemoteTarget(d) + else: + # use the class name + try: + # is it a core class defined here? + controller = getattr(sys.modules[__name__], testtarget) + except AttributeError: + # nope, perhaps a layer defined one + try: + bbpath = d.getVar("BBPATH", True).split(':') + testtargetloader = TestTargetLoader() + controller = testtargetloader.get_controller_module(testtarget, bbpath) + except ImportError as e: + bb.fatal("Failed to import {0} from available controller modules:\n{1}".format(testtarget,traceback.format_exc())) + except AttributeError as e: + bb.fatal("Invalid TEST_TARGET - " + str(e)) + return controller(d) + + +class BaseTarget(object): + + __metaclass__ = ABCMeta + + supported_image_fstypes = [] + + def __init__(self, d): + self.connection = None + self.ip = None + self.server_ip = None + self.datetime = d.getVar('DATETIME', True) + self.testdir = d.getVar("TEST_LOG_DIR", True) + self.pn = d.getVar("PN", True) + + @abstractmethod + def deploy(self): + + self.sshlog = os.path.join(self.testdir, "ssh_target_log.%s" % self.datetime) + sshloglink = os.path.join(self.testdir, "ssh_target_log") + if os.path.islink(sshloglink): + os.unlink(sshloglink) + os.symlink(self.sshlog, sshloglink) + bb.note("SSH log file: %s" % self.sshlog) + + @abstractmethod + def start(self, params=None): + pass + + @abstractmethod + def stop(self): + pass + + @classmethod + def get_extra_files(self): + return None + + @classmethod + def match_image_fstype(self, d, image_fstypes=None): + if not image_fstypes: + image_fstypes = d.getVar('IMAGE_FSTYPES', True).split(' ') + possible_image_fstypes = [fstype for fstype in self.supported_image_fstypes if fstype in image_fstypes] + if possible_image_fstypes: + return possible_image_fstypes[0] + else: + return None + + def get_image_fstype(self, d): + image_fstype = self.match_image_fstype(d) + if image_fstype: + return image_fstype + else: + bb.fatal("IMAGE_FSTYPES should contain a Target Controller supported image fstype: %s " % ', '.join(map(str, self.supported_image_fstypes))) + + def restart(self, params=None): + self.stop() + self.start(params) + + def run(self, cmd, timeout=None): + return self.connection.run(cmd, timeout) + + def copy_to(self, localpath, remotepath): + return self.connection.copy_to(localpath, remotepath) + + def copy_from(self, remotepath, localpath): + return self.connection.copy_from(remotepath, localpath) + + + +class QemuTarget(BaseTarget): + + supported_image_fstypes = ['ext3'] + + def __init__(self, d): + + super(QemuTarget, self).__init__(d) + + self.image_fstype = self.get_image_fstype(d) + self.qemulog = os.path.join(self.testdir, "qemu_boot_log.%s" % self.datetime) + self.origrootfs = os.path.join(d.getVar("DEPLOY_DIR_IMAGE", True), d.getVar("IMAGE_LINK_NAME", True) + '.' + self.image_fstype) + self.rootfs = os.path.join(self.testdir, d.getVar("IMAGE_LINK_NAME", True) + '-testimage.' + self.image_fstype) + + self.runner = QemuRunner(machine=d.getVar("MACHINE", True), + rootfs=self.rootfs, + tmpdir = d.getVar("TMPDIR", True), + deploy_dir_image = d.getVar("DEPLOY_DIR_IMAGE", True), + display = d.getVar("BB_ORIGENV", False).getVar("DISPLAY", True), + logfile = self.qemulog, + boottime = int(d.getVar("TEST_QEMUBOOT_TIMEOUT", True))) + + def deploy(self): + try: + shutil.copyfile(self.origrootfs, self.rootfs) + except Exception as e: + bb.fatal("Error copying rootfs: %s" % e) + + qemuloglink = os.path.join(self.testdir, "qemu_boot_log") + if os.path.islink(qemuloglink): + os.unlink(qemuloglink) + os.symlink(self.qemulog, qemuloglink) + + bb.note("rootfs file: %s" % self.rootfs) + bb.note("Qemu log file: %s" % self.qemulog) + super(QemuTarget, self).deploy() + + def start(self, params=None): + if self.runner.start(params): + self.ip = self.runner.ip + self.server_ip = self.runner.server_ip + self.connection = SSHControl(ip=self.ip, logfile=self.sshlog) + else: + self.stop() + raise bb.build.FuncFailed("%s - FAILED to start qemu - check the task log and the boot log" % self.pn) + + def stop(self): + self.runner.stop() + self.connection = None + self.ip = None + self.server_ip = None + + def restart(self, params=None): + if self.runner.restart(params): + self.ip = self.runner.ip + self.server_ip = self.runner.server_ip + self.connection = SSHControl(ip=self.ip, logfile=self.sshlog) + else: + raise bb.build.FuncFailed("%s - FAILED to re-start qemu - check the task log and the boot log" % self.pn) + + +class SimpleRemoteTarget(BaseTarget): + + def __init__(self, d): + super(SimpleRemoteTarget, self).__init__(d) + addr = d.getVar("TEST_TARGET_IP", True) or bb.fatal('Please set TEST_TARGET_IP with the IP address of the machine you want to run the tests on.') + self.ip = addr.split(":")[0] + try: + self.port = addr.split(":")[1] + except IndexError: + self.port = None + bb.note("Target IP: %s" % self.ip) + self.server_ip = d.getVar("TEST_SERVER_IP", True) + if not self.server_ip: + try: + self.server_ip = subprocess.check_output(['ip', 'route', 'get', self.ip ]).split("\n")[0].split()[-1] + except Exception as e: + bb.fatal("Failed to determine the host IP address (alternatively you can set TEST_SERVER_IP with the IP address of this machine): %s" % e) + bb.note("Server IP: %s" % self.server_ip) + + def deploy(self): + super(SimpleRemoteTarget, self).deploy() + + def start(self, params=None): + self.connection = SSHControl(self.ip, logfile=self.sshlog, port=self.port) + + def stop(self): + self.connection = None + self.ip = None + self.server_ip = None diff --git a/meta/lib/oeqa/utils/__init__.py b/meta/lib/oeqa/utils/__init__.py new file mode 100644 index 0000000000..2260046026 --- /dev/null +++ b/meta/lib/oeqa/utils/__init__.py @@ -0,0 +1,15 @@ +# Enable other layers to have modules in the same named directory +from pkgutil import extend_path +__path__ = extend_path(__path__, __name__) + + +# Borrowed from CalledProcessError + +class CommandError(Exception): + def __init__(self, retcode, cmd, output = None): + self.retcode = retcode + self.cmd = cmd + self.output = output + def __str__(self): + return "Command '%s' returned non-zero exit status %d with output: %s" % (self.cmd, self.retcode, self.output) + diff --git a/meta/lib/oeqa/utils/commands.py b/meta/lib/oeqa/utils/commands.py new file mode 100644 index 0000000000..802bc2f208 --- /dev/null +++ b/meta/lib/oeqa/utils/commands.py @@ -0,0 +1,154 @@ +# Copyright (c) 2013-2014 Intel Corporation +# +# Released under the MIT license (see COPYING.MIT) + +# DESCRIPTION +# This module is mainly used by scripts/oe-selftest and modules under meta/oeqa/selftest +# It provides a class and methods for running commands on the host in a convienent way for tests. + + + +import os +import sys +import signal +import subprocess +import threading +import logging +from oeqa.utils import CommandError +from oeqa.utils import ftools + +class Command(object): + def __init__(self, command, bg=False, timeout=None, data=None, **options): + + self.defaultopts = { + "stdout": subprocess.PIPE, + "stderr": subprocess.STDOUT, + "stdin": None, + "shell": False, + "bufsize": -1, + } + + self.cmd = command + self.bg = bg + self.timeout = timeout + self.data = data + + self.options = dict(self.defaultopts) + if isinstance(self.cmd, basestring): + self.options["shell"] = True + if self.data: + self.options['stdin'] = subprocess.PIPE + self.options.update(options) + + self.status = None + self.output = None + self.error = None + self.thread = None + + self.log = logging.getLogger("utils.commands") + + def run(self): + self.process = subprocess.Popen(self.cmd, **self.options) + + def commThread(): + self.output, self.error = self.process.communicate(self.data) + + self.thread = threading.Thread(target=commThread) + self.thread.start() + + self.log.debug("Running command '%s'" % self.cmd) + + if not self.bg: + self.thread.join(self.timeout) + self.stop() + + def stop(self): + if self.thread.isAlive(): + self.process.terminate() + # let's give it more time to terminate gracefully before killing it + self.thread.join(5) + if self.thread.isAlive(): + self.process.kill() + self.thread.join() + + self.output = self.output.rstrip() + self.status = self.process.poll() + + self.log.debug("Command '%s' returned %d as exit code." % (self.cmd, self.status)) + # logging the complete output is insane + # bitbake -e output is really big + # and makes the log file useless + if self.status: + lout = "\n".join(self.output.splitlines()[-20:]) + self.log.debug("Last 20 lines:\n%s" % lout) + + +class Result(object): + pass + + +def runCmd(command, ignore_status=False, timeout=None, assert_error=True, **options): + result = Result() + + cmd = Command(command, timeout=timeout, **options) + cmd.run() + + result.command = command + result.status = cmd.status + result.output = cmd.output + result.pid = cmd.process.pid + + if result.status and not ignore_status: + if assert_error: + raise AssertionError("Command '%s' returned non-zero exit status %d:\n%s" % (command, result.status, result.output)) + else: + raise CommandError(result.status, command, result.output) + + return result + + +def bitbake(command, ignore_status=False, timeout=None, postconfig=None, **options): + + if postconfig: + postconfig_file = os.path.join(os.environ.get('BUILDDIR'), 'oeqa-post.conf') + ftools.write_file(postconfig_file, postconfig) + extra_args = "-R %s" % postconfig_file + else: + extra_args = "" + + if isinstance(command, basestring): + cmd = "bitbake " + extra_args + " " + command + else: + cmd = [ "bitbake" ] + [a for a in (command + extra_args.split(" ")) if a not in [""]] + + try: + return runCmd(cmd, ignore_status, timeout, **options) + finally: + if postconfig: + os.remove(postconfig_file) + + +def get_bb_env(target=None, postconfig=None): + if target: + return bitbake("-e %s" % target, postconfig=postconfig).output + else: + return bitbake("-e", postconfig=postconfig).output + +def get_bb_var(var, target=None, postconfig=None): + val = None + bbenv = get_bb_env(target, postconfig=postconfig) + for line in bbenv.splitlines(): + if line.startswith(var + "="): + val = line.split('=')[1] + val = val.replace('\"','') + break + return val + +def get_test_layer(): + layers = get_bb_var("BBLAYERS").split() + testlayer = None + for l in layers: + if "/meta-selftest" in l and os.path.isdir(l): + testlayer = l + break + return testlayer diff --git a/meta/lib/oeqa/utils/decorators.py b/meta/lib/oeqa/utils/decorators.py new file mode 100644 index 0000000000..40bd4ef2db --- /dev/null +++ b/meta/lib/oeqa/utils/decorators.py @@ -0,0 +1,158 @@ +# Copyright (C) 2013 Intel Corporation +# +# Released under the MIT license (see COPYING.MIT) + +# Some custom decorators that can be used by unittests +# Most useful is skipUnlessPassed which can be used for +# creating dependecies between two test methods. + +import os +import logging +import sys +import unittest + +#get the "result" object from one of the upper frames provided that one of these upper frames is a unittest.case frame +class getResults(object): + def __init__(self): + #dynamically determine the unittest.case frame and use it to get the name of the test method + upperf = sys._current_frames().values()[0] + while (upperf.f_globals['__name__'] != 'unittest.case'): + upperf = upperf.f_back + + def handleList(items): + ret = [] + # items is a list of tuples, (test, failure) or (_ErrorHandler(), Exception()) + for i in items: + s = i[0].id() + #Handle the _ErrorHolder objects from skipModule failures + if "setUpModule (" in s: + ret.append(s.replace("setUpModule (", "").replace(")","")) + else: + ret.append(s) + return ret + self.faillist = handleList(upperf.f_locals['result'].failures) + self.errorlist = handleList(upperf.f_locals['result'].errors) + self.skiplist = handleList(upperf.f_locals['result'].skipped) + + def getFailList(self): + return self.faillist + + def getErrorList(self): + return self.errorlist + + def getSkipList(self): + return self.skiplist + +class skipIfFailure(object): + + def __init__(self,testcase): + self.testcase = testcase + + def __call__(self,f): + def wrapped_f(*args): + res = getResults() + if self.testcase in (res.getFailList() or res.getErrorList()): + raise unittest.SkipTest("Testcase dependency not met: %s" % self.testcase) + return f(*args) + wrapped_f.__name__ = f.__name__ + return wrapped_f + +class skipIfSkipped(object): + + def __init__(self,testcase): + self.testcase = testcase + + def __call__(self,f): + def wrapped_f(*args): + res = getResults() + if self.testcase in res.getSkipList(): + raise unittest.SkipTest("Testcase dependency not met: %s" % self.testcase) + return f(*args) + wrapped_f.__name__ = f.__name__ + return wrapped_f + +class skipUnlessPassed(object): + + def __init__(self,testcase): + self.testcase = testcase + + def __call__(self,f): + def wrapped_f(*args): + res = getResults() + if self.testcase in res.getSkipList() or \ + self.testcase in res.getFailList() or \ + self.testcase in res.getErrorList(): + raise unittest.SkipTest("Testcase dependency not met: %s" % self.testcase) + return f(*args) + wrapped_f.__name__ = f.__name__ + return wrapped_f + +class testcase(object): + + def __init__(self, test_case): + self.test_case = test_case + + def __call__(self, func): + def wrapped_f(*args): + return func(*args) + wrapped_f.test_case = self.test_case + return wrapped_f + +class NoParsingFilter(logging.Filter): + def filter(self, record): + return record.levelno == 100 + +def LogResults(original_class): + orig_method = original_class.run + + #rewrite the run method of unittest.TestCase to add testcase logging + def run(self, result, *args, **kws): + orig_method(self, result, *args, **kws) + passed = True + testMethod = getattr(self, self._testMethodName) + + #if test case is decorated then use it's number, else use it's name + try: + test_case = testMethod.test_case + except AttributeError: + test_case = self._testMethodName + + #create custom logging level for filtering. + custom_log_level = 100 + logging.addLevelName(custom_log_level, 'RESULTS') + caller = os.path.basename(sys.argv[0]) + + def results(self, message, *args, **kws): + if self.isEnabledFor(custom_log_level): + self.log(custom_log_level, message, *args, **kws) + logging.Logger.results = results + + logging.basicConfig(filename=os.path.join(os.getcwd(),'results-'+caller+'.log'), + filemode='w', + format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', + datefmt='%H:%M:%S', + level=custom_log_level) + for handler in logging.root.handlers: + handler.addFilter(NoParsingFilter()) + local_log = logging.getLogger(caller) + + #check status of tests and record it + for (name, msg) in result.errors: + if self._testMethodName == str(name).split(' ')[0]: + local_log.results("Testcase "+str(test_case)+": ERROR") + local_log.results("Testcase "+str(test_case)+":\n"+msg) + passed = False + for (name, msg) in result.failures: + if self._testMethodName == str(name).split(' ')[0]: + local_log.results("Testcase "+str(test_case)+": FAILED") + local_log.results("Testcase "+str(test_case)+":\n"+msg) + passed = False + for (name, msg) in result.skipped: + if self._testMethodName == str(name).split(' ')[0]: + local_log.results("Testcase "+str(test_case)+": SKIPPED") + passed = False + if passed: + local_log.results("Testcase "+str(test_case)+": PASSED") + + original_class.run = run + return original_class diff --git a/meta/lib/oeqa/utils/ftools.py b/meta/lib/oeqa/utils/ftools.py new file mode 100644 index 0000000000..64ebe3d217 --- /dev/null +++ b/meta/lib/oeqa/utils/ftools.py @@ -0,0 +1,27 @@ +import os +import re + +def write_file(path, data): + wdata = data.rstrip() + "\n" + with open(path, "w") as f: + f.write(wdata) + +def append_file(path, data): + wdata = data.rstrip() + "\n" + with open(path, "a") as f: + f.write(wdata) + +def read_file(path): + data = None + with open(path) as f: + data = f.read() + return data + +def remove_from_file(path, data): + lines = read_file(path).splitlines() + rmdata = data.strip().splitlines() + for l in rmdata: + for c in range(0, lines.count(l)): + i = lines.index(l) + del(lines[i]) + write_file(path, "\n".join(lines)) diff --git a/meta/lib/oeqa/utils/httpserver.py b/meta/lib/oeqa/utils/httpserver.py new file mode 100644 index 0000000000..76518d8ef9 --- /dev/null +++ b/meta/lib/oeqa/utils/httpserver.py @@ -0,0 +1,35 @@ +import SimpleHTTPServer +import multiprocessing +import os + +class HTTPServer(SimpleHTTPServer.BaseHTTPServer.HTTPServer): + + def server_start(self, root_dir): + import signal + signal.signal(signal.SIGTERM, signal.SIG_DFL) + os.chdir(root_dir) + self.serve_forever() + +class HTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler): + + def log_message(self, format_str, *args): + pass + +class HTTPService(object): + + def __init__(self, root_dir, host=''): + self.root_dir = root_dir + self.host = host + self.port = 0 + + def start(self): + self.server = HTTPServer((self.host, self.port), HTTPRequestHandler) + if self.port == 0: + self.port = self.server.server_port + self.process = multiprocessing.Process(target=self.server.server_start, args=[self.root_dir]) + self.process.start() + + def stop(self): + self.server.server_close() + self.process.terminate() + self.process.join() diff --git a/meta/lib/oeqa/utils/logparser.py b/meta/lib/oeqa/utils/logparser.py new file mode 100644 index 0000000000..87b50354cd --- /dev/null +++ b/meta/lib/oeqa/utils/logparser.py @@ -0,0 +1,125 @@ +#!/usr/bin/env python + +import sys +import os +import re +import ftools + + +# A parser that can be used to identify weather a line is a test result or a section statement. +class Lparser(object): + + def __init__(self, test_0_pass_regex, test_0_fail_regex, section_0_begin_regex=None, section_0_end_regex=None, **kwargs): + # Initialize the arguments dictionary + if kwargs: + self.args = kwargs + else: + self.args = {} + + # Add the default args to the dictionary + self.args['test_0_pass_regex'] = test_0_pass_regex + self.args['test_0_fail_regex'] = test_0_fail_regex + if section_0_begin_regex: + self.args['section_0_begin_regex'] = section_0_begin_regex + if section_0_end_regex: + self.args['section_0_end_regex'] = section_0_end_regex + + self.test_possible_status = ['pass', 'fail', 'error'] + self.section_possible_status = ['begin', 'end'] + + self.initialized = False + + + # Initialize the parser with the current configuration + def init(self): + + # extra arguments can be added by the user to define new test and section categories. They must follow a pre-defined pattern: ___regex + self.test_argument_pattern = "^test_(.+?)_(%s)_regex" % '|'.join(map(str, self.test_possible_status)) + self.section_argument_pattern = "^section_(.+?)_(%s)_regex" % '|'.join(map(str, self.section_possible_status)) + + # Initialize the test and section regex dictionaries + self.test_regex = {} + self.section_regex ={} + + for arg, value in self.args.items(): + if not value: + raise Exception('The value of provided argument %s is %s. Should have a valid value.' % (key, value)) + is_test = re.search(self.test_argument_pattern, arg) + is_section = re.search(self.section_argument_pattern, arg) + if is_test: + if not is_test.group(1) in self.test_regex: + self.test_regex[is_test.group(1)] = {} + self.test_regex[is_test.group(1)][is_test.group(2)] = re.compile(value) + elif is_section: + if not is_section.group(1) in self.section_regex: + self.section_regex[is_section.group(1)] = {} + self.section_regex[is_section.group(1)][is_section.group(2)] = re.compile(value) + else: + # TODO: Make these call a traceback instead of a simple exception.. + raise Exception("The provided argument name does not correspond to any valid type. Please give one of the following types:\nfor tests: %s\nfor sections: %s" % (self.test_argument_pattern, self.section_argument_pattern)) + + self.initialized = True + + # Parse a line and return a tuple containing the type of result (test/section) and its category, status and name + def parse_line(self, line): + if not self.initialized: + raise Exception("The parser is not initialized..") + + for test_category, test_status_list in self.test_regex.items(): + for test_status, status_regex in test_status_list.items(): + test_name = status_regex.search(line) + if test_name: + return ['test', test_category, test_status, test_name.group(1)] + + for section_category, section_status_list in self.section_regex.items(): + for section_status, status_regex in section_status_list.items(): + section_name = status_regex.search(line) + if section_name: + return ['section', section_category, section_status, section_name.group(1)] + return None + + +class Result(object): + + def __init__(self): + self.result_dict = {} + + def store(self, section, test, status): + if not section in self.result_dict: + self.result_dict[section] = [] + + self.result_dict[section].append((test, status)) + + # sort tests by the test name(the first element of the tuple), for each section. This can be helpful when using git to diff for changes by making sure they are always in the same order. + def sort_tests(self): + for package in self.result_dict: + sorted_results = sorted(self.result_dict[package], key=lambda tup: tup[0]) + self.result_dict[package] = sorted_results + + # Log the results as files. The file name is the section name and the contents are the tests in that section. + def log_as_files(self, target_dir, test_status): + status_regex = re.compile('|'.join(map(str, test_status))) + if not type(test_status) == type([]): + raise Exception("test_status should be a list. Got " + str(test_status) + " instead.") + if not os.path.exists(target_dir): + raise Exception("Target directory does not exist: %s" % target_dir) + + for section, test_results in self.result_dict.items(): + prefix = '' + for x in test_status: + prefix +=x+'.' + if (section != ''): + prefix += section + section_file = os.path.join(target_dir, prefix) + # purge the file contents if it exists + open(section_file, 'w').close() + for test_result in test_results: + (test_name, status) = test_result + # we log only the tests with status in the test_status list + match_status = status_regex.search(status) + if match_status: + ftools.append_file(section_file, status + ": " + test_name) + + # Not yet implemented! + def log_to_lava(self): + pass diff --git a/meta/lib/oeqa/utils/qemurunner.py b/meta/lib/oeqa/utils/qemurunner.py new file mode 100644 index 0000000000..f1a7e24ab7 --- /dev/null +++ b/meta/lib/oeqa/utils/qemurunner.py @@ -0,0 +1,237 @@ +# Copyright (C) 2013 Intel Corporation +# +# Released under the MIT license (see COPYING.MIT) + +# This module provides a class for starting qemu images using runqemu. +# It's used by testimage.bbclass. + +import subprocess +import os +import time +import signal +import re +import socket +import select +import bb + +class QemuRunner: + + def __init__(self, machine, rootfs, display, tmpdir, deploy_dir_image, logfile, boottime): + + # Popen object for runqemu + self.runqemu = None + # pid of the qemu process that runqemu will start + self.qemupid = None + # target ip - from the command line + self.ip = None + # host ip - where qemu is running + self.server_ip = None + + self.machine = machine + self.rootfs = rootfs + self.display = display + self.tmpdir = tmpdir + self.deploy_dir_image = deploy_dir_image + self.logfile = logfile + self.boottime = boottime + + self.runqemutime = 60 + + self.create_socket() + + + def create_socket(self): + + self.bootlog = '' + self.qemusock = None + + try: + self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + self.server_socket.setblocking(0) + self.server_socket.bind(("127.0.0.1",0)) + self.server_socket.listen(2) + self.serverport = self.server_socket.getsockname()[1] + bb.note("Created listening socket for qemu serial console on: 127.0.0.1:%s" % self.serverport) + except socket.error, msg: + self.server_socket.close() + bb.fatal("Failed to create listening socket: %s" %msg[1]) + + + def log(self, msg): + if self.logfile: + with open(self.logfile, "a") as f: + f.write("%s" % msg) + + def start(self, qemuparams = None): + + if self.display: + os.environ["DISPLAY"] = self.display + else: + bb.error("To start qemu I need a X desktop, please set DISPLAY correctly (e.g. DISPLAY=:1)") + return False + if not os.path.exists(self.rootfs): + bb.error("Invalid rootfs %s" % self.rootfs) + return False + if not os.path.exists(self.tmpdir): + bb.error("Invalid TMPDIR path %s" % self.tmpdir) + return False + else: + os.environ["OE_TMPDIR"] = self.tmpdir + if not os.path.exists(self.deploy_dir_image): + bb.error("Invalid DEPLOY_DIR_IMAGE path %s" % self.deploy_dir_image) + return False + else: + os.environ["DEPLOY_DIR_IMAGE"] = self.deploy_dir_image + + # Set this flag so that Qemu doesn't do any grabs as SDL grabs interact + # badly with screensavers. + os.environ["QEMU_DONT_GRAB"] = "1" + self.qemuparams = 'bootparams="console=tty1 console=ttyS0,115200n8" qemuparams="-serial tcp:127.0.0.1:%s"' % self.serverport + if qemuparams: + self.qemuparams = self.qemuparams[:-1] + " " + qemuparams + " " + '\"' + + launch_cmd = 'runqemu %s %s %s' % (self.machine, self.rootfs, self.qemuparams) + self.runqemu = subprocess.Popen(launch_cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.STDOUT,preexec_fn=os.setpgrp) + + bb.note("runqemu started, pid is %s" % self.runqemu.pid) + bb.note("waiting at most %s seconds for qemu pid" % self.runqemutime) + endtime = time.time() + self.runqemutime + while not self.is_alive() and time.time() < endtime: + time.sleep(1) + + if self.is_alive(): + bb.note("qemu started - qemu procces pid is %s" % self.qemupid) + cmdline = '' + with open('/proc/%s/cmdline' % self.qemupid) as p: + cmdline = p.read() + ips = re.findall("((?:[0-9]{1,3}\.){3}[0-9]{1,3})", cmdline.split("ip=")[1]) + if not ips or len(ips) != 3: + bb.note("Couldn't get ip from qemu process arguments! Here is the qemu command line used: %s" % cmdline) + self.stop() + return False + else: + self.ip = ips[0] + self.server_ip = ips[1] + bb.note("Target IP: %s" % self.ip) + bb.note("Server IP: %s" % self.server_ip) + bb.note("Waiting at most %d seconds for login banner" % self.boottime ) + endtime = time.time() + self.boottime + socklist = [self.server_socket] + reachedlogin = False + stopread = False + while time.time() < endtime and not stopread: + sread, swrite, serror = select.select(socklist, [], [], 5) + for sock in sread: + if sock is self.server_socket: + self.qemusock, addr = self.server_socket.accept() + self.qemusock.setblocking(0) + socklist.append(self.qemusock) + socklist.remove(self.server_socket) + bb.note("Connection from %s:%s" % addr) + else: + data = sock.recv(1024) + if data: + self.log(data) + self.bootlog += data + if re.search("qemu.* login:", self.bootlog): + stopread = True + reachedlogin = True + bb.note("Reached login banner") + else: + socklist.remove(sock) + sock.close() + stopread = True + + if not reachedlogin: + bb.note("Target didn't reached login boot in %d seconds" % self.boottime) + lines = "\n".join(self.bootlog.splitlines()[-5:]) + bb.note("Last 5 lines of text:\n%s" % lines) + bb.note("Check full boot log: %s" % self.logfile) + self.stop() + return False + else: + bb.note("Qemu pid didn't appeared in %s seconds" % self.runqemutime) + output = self.runqemu.stdout + self.stop() + bb.note("Output from runqemu:\n%s" % output.read()) + return False + + return self.is_alive() + + def stop(self): + + if self.runqemu: + bb.note("Sending SIGTERM to runqemu") + os.killpg(self.runqemu.pid, signal.SIGTERM) + endtime = time.time() + self.runqemutime + while self.runqemu.poll() is None and time.time() < endtime: + time.sleep(1) + if self.runqemu.poll() is None: + bb.note("Sending SIGKILL to runqemu") + os.killpg(self.runqemu.pid, signal.SIGKILL) + self.runqemu = None + if self.server_socket: + self.server_socket.close() + self.server_socket = None + self.qemupid = None + self.ip = None + + def restart(self, qemuparams = None): + bb.note("Restarting qemu process") + if self.runqemu.poll() is None: + self.stop() + self.create_socket() + if self.start(qemuparams): + return True + return False + + def is_alive(self): + qemu_child = self.find_child(str(self.runqemu.pid)) + if qemu_child: + self.qemupid = qemu_child[0] + if os.path.exists("/proc/" + str(self.qemupid)): + return True + return False + + def find_child(self,parent_pid): + # + # Walk the process tree from the process specified looking for a qemu-system. Return its [pid'cmd] + # + ps = subprocess.Popen(['ps', 'axww', '-o', 'pid,ppid,command'], stdout=subprocess.PIPE).communicate()[0] + processes = ps.split('\n') + nfields = len(processes[0].split()) - 1 + pids = {} + commands = {} + for row in processes[1:]: + data = row.split(None, nfields) + if len(data) != 3: + continue + if data[1] not in pids: + pids[data[1]] = [] + + pids[data[1]].append(data[0]) + commands[data[0]] = data[2] + + if parent_pid not in pids: + return [] + + parents = [] + newparents = pids[parent_pid] + while newparents: + next = [] + for p in newparents: + if p in pids: + for n in pids[p]: + if n not in parents and n not in next: + next.append(n) + if p not in parents: + parents.append(p) + newparents = next + #print "Children matching %s:" % str(parents) + for p in parents: + # Need to be careful here since runqemu-internal runs "ldd qemu-system-xxxx" + # Also, old versions of ldd (2.11) run "LD_XXXX qemu-system-xxxx" + basecmd = commands[p].split()[0] + basecmd = os.path.basename(basecmd) + if "qemu-system" in basecmd and "-serial tcp" in commands[p]: + return [int(p),commands[p]] diff --git a/meta/lib/oeqa/utils/sshcontrol.py b/meta/lib/oeqa/utils/sshcontrol.py new file mode 100644 index 0000000000..1c81795a87 --- /dev/null +++ b/meta/lib/oeqa/utils/sshcontrol.py @@ -0,0 +1,138 @@ +# Copyright (C) 2013 Intel Corporation +# +# Released under the MIT license (see COPYING.MIT) + +# Provides a class for setting up ssh connections, +# running commands and copying files to/from a target. +# It's used by testimage.bbclass and tests in lib/oeqa/runtime. + +import subprocess +import time +import os +import select + + +class SSHProcess(object): + def __init__(self, **options): + + self.defaultopts = { + "stdout": subprocess.PIPE, + "stderr": subprocess.STDOUT, + "stdin": None, + "shell": False, + "bufsize": -1, + "preexec_fn": os.setsid, + } + self.options = dict(self.defaultopts) + self.options.update(options) + self.status = None + self.output = None + self.process = None + self.starttime = None + self.logfile = None + + def log(self, msg): + if self.logfile: + with open(self.logfile, "a") as f: + f.write("%s" % msg) + + def run(self, command, timeout=None, logfile=None): + self.logfile = logfile + self.starttime = time.time() + output = '' + self.process = subprocess.Popen(command, **self.options) + if timeout: + endtime = self.starttime + timeout + eof = False + while time.time() < endtime and not eof: + if select.select([self.process.stdout], [], [], 5)[0] != []: + data = os.read(self.process.stdout.fileno(), 1024) + if not data: + self.process.stdout.close() + eof = True + else: + output += data + self.log(data) + endtime = time.time() + timeout + + + # process hasn't returned yet + if not eof: + self.process.terminate() + time.sleep(5) + try: + self.process.kill() + except OSError: + pass + lastline = "\nProcess killed - no output for %d seconds. Total running time: %d seconds." % (timeout, time.time() - self.starttime) + self.log(lastline) + output += lastline + else: + output = self.process.communicate()[0] + self.log(output.rstrip()) + + self.status = self.process.wait() + self.output = output.rstrip() + return (self.status, self.output) + + +class SSHControl(object): + def __init__(self, ip, logfile=None, timeout=300, user='root', port=None): + self.ip = ip + self.defaulttimeout = timeout + self.ignore_status = True + self.logfile = logfile + self.user = user + self.ssh_options = [ + '-o', 'UserKnownHostsFile=/dev/null', + '-o', 'StrictHostKeyChecking=no', + '-o', 'LogLevel=ERROR' + ] + self.ssh = ['ssh', '-l', self.user ] + self.ssh_options + self.scp = ['scp'] + self.ssh_options + if port: + self.ssh = self.ssh + [ '-p', port ] + self.scp = self.scp + [ '-P', port ] + + def log(self, msg): + if self.logfile: + with open(self.logfile, "a") as f: + f.write("%s\n" % msg) + + def _internal_run(self, command, timeout=None, ignore_status = True): + self.log("[Running]$ %s" % " ".join(command)) + + proc = SSHProcess() + status, output = proc.run(command, timeout, logfile=self.logfile) + + self.log("[Command returned '%d' after %.2f seconds]" % (status, time.time() - proc.starttime)) + + if status and not ignore_status: + raise AssertionError("Command '%s' returned non-zero exit status %d:\n%s" % (command, status, output)) + + return (status, output) + + def run(self, command, timeout=None): + """ + command - ssh command to run + timeout= - kill command if there is no output after seconds + timeout=None - kill command if there is no output after a default value seconds + timeout=0 - no timeout, let command run until it returns + """ + + # We need to source /etc/profile for a proper PATH on the target + command = self.ssh + [self.ip, ' . /etc/profile; ' + command] + + if timeout is None: + return self._internal_run(command, self.defaulttimeout, self.ignore_status) + if timeout == 0: + return self._internal_run(command, None, self.ignore_status) + return self._internal_run(command, timeout, self.ignore_status) + + def copy_to(self, localpath, remotepath): + command = self.scp + [localpath, '%s@%s:%s' % (self.user, self.ip, remotepath)] + return self._internal_run(command, ignore_status=False) + + def copy_from(self, remotepath, localpath): + command = self.scp + ['%s@%s:%s' % (self.user, self.ip, remotepath), localpath] + return self._internal_run(command, ignore_status=False) diff --git a/meta/lib/oeqa/utils/targetbuild.py b/meta/lib/oeqa/utils/targetbuild.py new file mode 100644 index 0000000000..eeb08ba716 --- /dev/null +++ b/meta/lib/oeqa/utils/targetbuild.py @@ -0,0 +1,132 @@ +# Copyright (C) 2013 Intel Corporation +# +# Released under the MIT license (see COPYING.MIT) + +# Provides a class for automating build tests for projects + +import os +import re +import bb.utils +import subprocess +from abc import ABCMeta, abstractmethod + +class BuildProject(): + + __metaclass__ = ABCMeta + + def __init__(self, d, uri, foldername=None, tmpdir="/tmp/"): + self.d = d + self.uri = uri + self.archive = os.path.basename(uri) + self.localarchive = os.path.join(tmpdir,self.archive) + self.fname = re.sub(r'.tar.bz2|tar.gz$', '', self.archive) + if foldername: + self.fname = foldername + + # Download self.archive to self.localarchive + def _download_archive(self): + + exportvars = ['HTTP_PROXY', 'http_proxy', + 'HTTPS_PROXY', 'https_proxy', + 'FTP_PROXY', 'ftp_proxy', + 'FTPS_PROXY', 'ftps_proxy', + 'NO_PROXY', 'no_proxy', + 'ALL_PROXY', 'all_proxy', + 'SOCKS5_USER', 'SOCKS5_PASSWD'] + + cmd = '' + for var in exportvars: + val = self.d.getVar(var, True) + if val: + cmd = 'export ' + var + '=\"%s\"; %s' % (val, cmd) + + cmd = cmd + "wget -O %s %s" % (self.localarchive, self.uri) + subprocess.check_call(cmd, shell=True) + + # This method should provide a way to run a command in the desired environment. + @abstractmethod + def _run(self, cmd): + pass + + # The timeout parameter of target.run is set to 0 to make the ssh command + # run with no timeout. + def run_configure(self, configure_args=''): + return self._run('cd %s; ./configure %s' % (self.targetdir, configure_args)) + + def run_make(self, make_args=''): + return self._run('cd %s; make %s' % (self.targetdir, make_args)) + + def run_install(self, install_args=''): + return self._run('cd %s; make install %s' % (self.targetdir, install_args)) + + def clean(self): + self._run('rm -rf %s' % self.targetdir) + subprocess.call('rm -f %s' % self.localarchive, shell=True) + pass + +class TargetBuildProject(BuildProject): + + def __init__(self, target, d, uri, foldername=None): + self.target = target + self.targetdir = "~/" + BuildProject.__init__(self, d, uri, foldername, tmpdir="/tmp") + + def download_archive(self): + + self._download_archive() + + (status, output) = self.target.copy_to(self.localarchive, self.targetdir) + if status != 0: + raise Exception("Failed to copy archive to target, output: %s" % output) + + (status, output) = self.target.run('tar xf %s%s -C %s' % (self.targetdir, self.archive, self.targetdir)) + if status != 0: + raise Exception("Failed to extract archive, output: %s" % output) + + #Change targetdir to project folder + self.targetdir = self.targetdir + self.fname + + # The timeout parameter of target.run is set to 0 to make the ssh command + # run with no timeout. + def _run(self, cmd): + return self.target.run(cmd, 0)[0] + + +class SDKBuildProject(BuildProject): + + def __init__(self, testpath, sdkenv, d, uri, foldername=None): + self.sdkenv = sdkenv + self.testdir = testpath + self.targetdir = testpath + bb.utils.mkdirhier(testpath) + self.datetime = d.getVar('DATETIME', True) + self.testlogdir = d.getVar("TEST_LOG_DIR", True) + bb.utils.mkdirhier(self.testlogdir) + self.logfile = os.path.join(self.testlogdir, "sdk_target_log.%s" % self.datetime) + BuildProject.__init__(self, d, uri, foldername, tmpdir=testpath) + + def download_archive(self): + + self._download_archive() + + cmd = 'tar xf %s%s -C %s' % (self.targetdir, self.archive, self.targetdir) + subprocess.check_call(cmd, shell=True) + + #Change targetdir to project folder + self.targetdir = self.targetdir + self.fname + + def run_configure(self, configure_args=''): + return super(SDKBuildProject, self).run_configure(configure_args=(configure_args or '$CONFIGURE_FLAGS')) + + def run_install(self, install_args=''): + return super(SDKBuildProject, self).run_install(install_args=(install_args or "DESTDIR=%s/../install" % self.targetdir)) + + def log(self, msg): + if self.logfile: + with open(self.logfile, "a") as f: + f.write("%s\n" % msg) + + def _run(self, cmd): + self.log("Running source %s; " % self.sdkenv + cmd) + return subprocess.call("source %s; " % self.sdkenv + cmd, shell=True) + -- cgit v1.2.3-54-g00ecf