diff options
| author | Mark Hatle <mark.hatle@amd.com> | 2025-03-20 14:45:23 -0600 |
|---|---|---|
| committer | Mark Hatle <mark.hatle@amd.com> | 2025-03-30 14:16:15 -0600 |
| commit | aac160dbfa1e4efd6e8239bfbc1d7fe97364d87c (patch) | |
| tree | 9ae86099e835c740fb51baf2f8cd8b1f223840f3 /meta-xilinx-core/lib | |
| parent | 4a95d4cdccb17a5630787ed7e5097b3c3b42891a (diff) | |
| download | meta-xilinx-aac160dbfa1e4efd6e8239bfbc1d7fe97364d87c.tar.gz | |
meta-xilinx-core: Bring in a copy of wic from poky
Import a copy of wic and lib/oe as of poky commit:
8f74fa4073d4b2ba8e0d9559aa654f3cafcf373a
Signed-off-by: Mark Hatle <mark.hatle@amd.com>
Diffstat (limited to 'meta-xilinx-core/lib')
38 files changed, 11892 insertions, 0 deletions
diff --git a/meta-xilinx-core/lib/oe/__init__.py b/meta-xilinx-core/lib/oe/__init__.py new file mode 100644 index 00000000..6eb536ad --- /dev/null +++ b/meta-xilinx-core/lib/oe/__init__.py | |||
| @@ -0,0 +1,12 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: GPL-2.0-only | ||
| 5 | # | ||
| 6 | |||
| 7 | from pkgutil import extend_path | ||
| 8 | __path__ = extend_path(__path__, __name__) | ||
| 9 | |||
| 10 | BBIMPORTS = ["data", "path", "utils", "types", "package", "packagedata", \ | ||
| 11 | "packagegroup", "sstatesig", "lsb", "cachedpath", "license", \ | ||
| 12 | "qa", "reproducible", "rust", "buildcfg", "go"] | ||
diff --git a/meta-xilinx-core/lib/oe/buildcfg.py b/meta-xilinx-core/lib/oe/buildcfg.py new file mode 100644 index 00000000..27b059b8 --- /dev/null +++ b/meta-xilinx-core/lib/oe/buildcfg.py | |||
| @@ -0,0 +1,79 @@ | |||
| 1 | |||
| 2 | import os | ||
| 3 | import subprocess | ||
| 4 | import bb.process | ||
| 5 | |||
| 6 | def detect_revision(d): | ||
| 7 | path = get_scmbasepath(d) | ||
| 8 | return get_metadata_git_revision(path) | ||
| 9 | |||
| 10 | def detect_branch(d): | ||
| 11 | path = get_scmbasepath(d) | ||
| 12 | return get_metadata_git_branch(path) | ||
| 13 | |||
| 14 | def get_scmbasepath(d): | ||
| 15 | return os.path.join(d.getVar('COREBASE'), 'meta') | ||
| 16 | |||
| 17 | def get_metadata_git_branch(path): | ||
| 18 | try: | ||
| 19 | rev, _ = bb.process.run('git rev-parse --abbrev-ref HEAD', cwd=path) | ||
| 20 | except bb.process.ExecutionError: | ||
| 21 | rev = '<unknown>' | ||
| 22 | return rev.strip() | ||
| 23 | |||
| 24 | def get_metadata_git_revision(path): | ||
| 25 | try: | ||
| 26 | rev, _ = bb.process.run('git rev-parse HEAD', cwd=path) | ||
| 27 | except bb.process.ExecutionError: | ||
| 28 | rev = '<unknown>' | ||
| 29 | return rev.strip() | ||
| 30 | |||
| 31 | def get_metadata_git_toplevel(path): | ||
| 32 | try: | ||
| 33 | toplevel, _ = bb.process.run('git rev-parse --show-toplevel', cwd=path) | ||
| 34 | except bb.process.ExecutionError: | ||
| 35 | return "" | ||
| 36 | return toplevel.strip() | ||
| 37 | |||
| 38 | def get_metadata_git_remotes(path): | ||
| 39 | try: | ||
| 40 | remotes_list, _ = bb.process.run('git remote', cwd=path) | ||
| 41 | remotes = remotes_list.split() | ||
| 42 | except bb.process.ExecutionError: | ||
| 43 | remotes = [] | ||
| 44 | return remotes | ||
| 45 | |||
| 46 | def get_metadata_git_remote_url(path, remote): | ||
| 47 | try: | ||
| 48 | uri, _ = bb.process.run('git remote get-url {remote}'.format(remote=remote), cwd=path) | ||
| 49 | except bb.process.ExecutionError: | ||
| 50 | return "" | ||
| 51 | return uri.strip() | ||
| 52 | |||
| 53 | def get_metadata_git_describe(path): | ||
| 54 | try: | ||
| 55 | describe, _ = bb.process.run('git describe --tags', cwd=path) | ||
| 56 | except bb.process.ExecutionError: | ||
| 57 | return "" | ||
| 58 | return describe.strip() | ||
| 59 | |||
| 60 | def is_layer_modified(path): | ||
| 61 | try: | ||
| 62 | subprocess.check_output("""cd %s; export PSEUDO_UNLOAD=1; set -e; | ||
| 63 | git diff --quiet --no-ext-diff | ||
| 64 | git diff --quiet --no-ext-diff --cached""" % path, | ||
| 65 | shell=True, | ||
| 66 | stderr=subprocess.STDOUT) | ||
| 67 | return "" | ||
| 68 | except subprocess.CalledProcessError as ex: | ||
| 69 | # Silently treat errors as "modified", without checking for the | ||
| 70 | # (expected) return code 1 in a modified git repo. For example, we get | ||
| 71 | # output and a 129 return code when a layer isn't a git repo at all. | ||
| 72 | return " -- modified" | ||
| 73 | |||
| 74 | def get_layer_revisions(d): | ||
| 75 | layers = (d.getVar("BBLAYERS") or "").split() | ||
| 76 | revisions = [] | ||
| 77 | for i in layers: | ||
| 78 | revisions.append((i, os.path.basename(i), get_metadata_git_branch(i).strip(), get_metadata_git_revision(i), is_layer_modified(i))) | ||
| 79 | return revisions | ||
diff --git a/meta-xilinx-core/lib/oe/buildhistory_analysis.py b/meta-xilinx-core/lib/oe/buildhistory_analysis.py new file mode 100644 index 00000000..4edad015 --- /dev/null +++ b/meta-xilinx-core/lib/oe/buildhistory_analysis.py | |||
| @@ -0,0 +1,723 @@ | |||
| 1 | # Report significant differences in the buildhistory repository since a specific revision | ||
| 2 | # | ||
| 3 | # Copyright (C) 2012-2013, 2016-2017 Intel Corporation | ||
| 4 | # Author: Paul Eggleton <paul.eggleton@linux.intel.com> | ||
| 5 | # | ||
| 6 | # SPDX-License-Identifier: GPL-2.0-only | ||
| 7 | # | ||
| 8 | # Note: requires GitPython 0.3.1+ | ||
| 9 | # | ||
| 10 | # You can use this from the command line by running scripts/buildhistory-diff | ||
| 11 | # | ||
| 12 | |||
| 13 | import sys | ||
| 14 | import os.path | ||
| 15 | import difflib | ||
| 16 | import git | ||
| 17 | import re | ||
| 18 | import shlex | ||
| 19 | import hashlib | ||
| 20 | import collections | ||
| 21 | import bb.utils | ||
| 22 | import bb.tinfoil | ||
| 23 | |||
| 24 | |||
| 25 | # How to display fields | ||
| 26 | list_fields = ['DEPENDS', 'RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RREPLACES', 'RCONFLICTS', 'FILES', 'FILELIST', 'USER_CLASSES', 'IMAGE_CLASSES', 'IMAGE_FEATURES', 'IMAGE_LINGUAS', 'IMAGE_INSTALL', 'BAD_RECOMMENDATIONS', 'PACKAGE_EXCLUDE'] | ||
| 27 | list_order_fields = ['PACKAGES'] | ||
| 28 | defaultval_map = {'PKG': 'PKG', 'PKGE': 'PE', 'PKGV': 'PV', 'PKGR': 'PR'} | ||
| 29 | numeric_fields = ['PKGSIZE', 'IMAGESIZE'] | ||
| 30 | # Fields to monitor | ||
| 31 | monitor_fields = ['RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RREPLACES', 'RCONFLICTS', 'PACKAGES', 'FILELIST', 'PKGSIZE', 'IMAGESIZE', 'PKG'] | ||
| 32 | ver_monitor_fields = ['PKGE', 'PKGV', 'PKGR'] | ||
| 33 | # Percentage change to alert for numeric fields | ||
| 34 | monitor_numeric_threshold = 10 | ||
| 35 | # Image files to monitor (note that image-info.txt is handled separately) | ||
| 36 | img_monitor_files = ['installed-package-names.txt', 'files-in-image.txt'] | ||
| 37 | |||
| 38 | colours = { | ||
| 39 | 'colour_default': '', | ||
| 40 | 'colour_add': '', | ||
| 41 | 'colour_remove': '', | ||
| 42 | } | ||
| 43 | |||
| 44 | def init_colours(use_colours): | ||
| 45 | global colours | ||
| 46 | if use_colours: | ||
| 47 | colours = { | ||
| 48 | 'colour_default': '\033[0m', | ||
| 49 | 'colour_add': '\033[1;32m', | ||
| 50 | 'colour_remove': '\033[1;31m', | ||
| 51 | } | ||
| 52 | else: | ||
| 53 | colours = { | ||
| 54 | 'colour_default': '', | ||
| 55 | 'colour_add': '', | ||
| 56 | 'colour_remove': '', | ||
| 57 | } | ||
| 58 | |||
| 59 | class ChangeRecord: | ||
| 60 | def __init__(self, path, fieldname, oldvalue, newvalue, monitored): | ||
| 61 | self.path = path | ||
| 62 | self.fieldname = fieldname | ||
| 63 | self.oldvalue = oldvalue | ||
| 64 | self.newvalue = newvalue | ||
| 65 | self.monitored = monitored | ||
| 66 | self.filechanges = None | ||
| 67 | |||
| 68 | def __str__(self): | ||
| 69 | return self._str_internal(True) | ||
| 70 | |||
| 71 | def _str_internal(self, outer): | ||
| 72 | if outer: | ||
| 73 | if '/image-files/' in self.path: | ||
| 74 | prefix = '%s: ' % self.path.split('/image-files/')[0] | ||
| 75 | else: | ||
| 76 | prefix = '%s: ' % self.path | ||
| 77 | else: | ||
| 78 | prefix = '' | ||
| 79 | |||
| 80 | def pkglist_combine(depver): | ||
| 81 | pkglist = [] | ||
| 82 | for k,v in depver.items(): | ||
| 83 | if v: | ||
| 84 | pkglist.append("%s (%s)" % (k,v)) | ||
| 85 | else: | ||
| 86 | pkglist.append(k) | ||
| 87 | return pkglist | ||
| 88 | |||
| 89 | def detect_renamed_dirs(aitems, bitems): | ||
| 90 | adirs = set(map(os.path.dirname, aitems)) | ||
| 91 | bdirs = set(map(os.path.dirname, bitems)) | ||
| 92 | files_ab = [(name, sorted(os.path.basename(item) for item in aitems if os.path.dirname(item) == name)) \ | ||
| 93 | for name in adirs - bdirs] | ||
| 94 | files_ba = [(name, sorted(os.path.basename(item) for item in bitems if os.path.dirname(item) == name)) \ | ||
| 95 | for name in bdirs - adirs] | ||
| 96 | renamed_dirs = [] | ||
| 97 | for dir1, files1 in files_ab: | ||
| 98 | rename = False | ||
| 99 | for dir2, files2 in files_ba: | ||
| 100 | if files1 == files2 and not rename: | ||
| 101 | renamed_dirs.append((dir1,dir2)) | ||
| 102 | # Make sure that we don't use this (dir, files) pair again. | ||
| 103 | files_ba.remove((dir2,files2)) | ||
| 104 | # If a dir has already been found to have a rename, stop and go no further. | ||
| 105 | rename = True | ||
| 106 | |||
| 107 | # remove files that belong to renamed dirs from aitems and bitems | ||
| 108 | for dir1, dir2 in renamed_dirs: | ||
| 109 | aitems = [item for item in aitems if os.path.dirname(item) not in (dir1, dir2)] | ||
| 110 | bitems = [item for item in bitems if os.path.dirname(item) not in (dir1, dir2)] | ||
| 111 | return renamed_dirs, aitems, bitems | ||
| 112 | |||
| 113 | if self.fieldname in list_fields or self.fieldname in list_order_fields: | ||
| 114 | renamed_dirs = [] | ||
| 115 | changed_order = False | ||
| 116 | if self.fieldname in ['RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RREPLACES', 'RCONFLICTS']: | ||
| 117 | (depvera, depverb) = compare_pkg_lists(self.oldvalue, self.newvalue) | ||
| 118 | aitems = pkglist_combine(depvera) | ||
| 119 | bitems = pkglist_combine(depverb) | ||
| 120 | else: | ||
| 121 | if self.fieldname == 'FILELIST': | ||
| 122 | aitems = shlex.split(self.oldvalue) | ||
| 123 | bitems = shlex.split(self.newvalue) | ||
| 124 | renamed_dirs, aitems, bitems = detect_renamed_dirs(aitems, bitems) | ||
| 125 | else: | ||
| 126 | aitems = self.oldvalue.split() | ||
| 127 | bitems = self.newvalue.split() | ||
| 128 | |||
| 129 | removed = list(set(aitems) - set(bitems)) | ||
| 130 | added = list(set(bitems) - set(aitems)) | ||
| 131 | |||
| 132 | if not removed and not added and self.fieldname in ['RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RREPLACES', 'RCONFLICTS']: | ||
| 133 | depvera = bb.utils.explode_dep_versions2(self.oldvalue, sort=False) | ||
| 134 | depverb = bb.utils.explode_dep_versions2(self.newvalue, sort=False) | ||
| 135 | for i, j in zip(depvera.items(), depverb.items()): | ||
| 136 | if i[0] != j[0]: | ||
| 137 | changed_order = True | ||
| 138 | break | ||
| 139 | |||
| 140 | lines = [] | ||
| 141 | if renamed_dirs: | ||
| 142 | for dfrom, dto in renamed_dirs: | ||
| 143 | lines.append('directory renamed {colour_remove}{}{colour_default} -> {colour_add}{}{colour_default}'.format(dfrom, dto, **colours)) | ||
| 144 | if removed or added: | ||
| 145 | if removed and not bitems: | ||
| 146 | lines.append('removed all items "{colour_remove}{}{colour_default}"'.format(' '.join(removed), **colours)) | ||
| 147 | else: | ||
| 148 | if removed: | ||
| 149 | lines.append('removed "{colour_remove}{value}{colour_default}"'.format(value=' '.join(removed), **colours)) | ||
| 150 | if added: | ||
| 151 | lines.append('added "{colour_add}{value}{colour_default}"'.format(value=' '.join(added), **colours)) | ||
| 152 | else: | ||
| 153 | lines.append('changed order') | ||
| 154 | |||
| 155 | if not (removed or added or changed_order): | ||
| 156 | out = '' | ||
| 157 | else: | ||
| 158 | out = '%s: %s' % (self.fieldname, ', '.join(lines)) | ||
| 159 | |||
| 160 | elif self.fieldname in numeric_fields: | ||
| 161 | aval = int(self.oldvalue or 0) | ||
| 162 | bval = int(self.newvalue or 0) | ||
| 163 | if aval != 0: | ||
| 164 | percentchg = ((bval - aval) / float(aval)) * 100 | ||
| 165 | else: | ||
| 166 | percentchg = 100 | ||
| 167 | out = '{} changed from {colour_remove}{}{colour_default} to {colour_add}{}{colour_default} ({}{:.0f}%)'.format(self.fieldname, self.oldvalue or "''", self.newvalue or "''", '+' if percentchg > 0 else '', percentchg, **colours) | ||
| 168 | elif self.fieldname in defaultval_map: | ||
| 169 | out = '{} changed from {colour_remove}{}{colour_default} to {colour_add}{}{colour_default}'.format(self.fieldname, self.oldvalue, self.newvalue, **colours) | ||
| 170 | if self.fieldname == 'PKG' and '[default]' in self.newvalue: | ||
| 171 | out += ' - may indicate debian renaming failure' | ||
| 172 | elif self.fieldname in ['pkg_preinst', 'pkg_postinst', 'pkg_prerm', 'pkg_postrm']: | ||
| 173 | if self.oldvalue and self.newvalue: | ||
| 174 | out = '%s changed:\n ' % self.fieldname | ||
| 175 | elif self.newvalue: | ||
| 176 | out = '%s added:\n ' % self.fieldname | ||
| 177 | elif self.oldvalue: | ||
| 178 | out = '%s cleared:\n ' % self.fieldname | ||
| 179 | alines = self.oldvalue.splitlines() | ||
| 180 | blines = self.newvalue.splitlines() | ||
| 181 | diff = difflib.unified_diff(alines, blines, self.fieldname, self.fieldname, lineterm='') | ||
| 182 | out += '\n '.join(list(diff)[2:]) | ||
| 183 | out += '\n --' | ||
| 184 | elif self.fieldname in img_monitor_files or '/image-files/' in self.path or self.fieldname == "sysroot": | ||
| 185 | if self.filechanges or (self.oldvalue and self.newvalue): | ||
| 186 | fieldname = self.fieldname | ||
| 187 | if '/image-files/' in self.path: | ||
| 188 | fieldname = os.path.join('/' + self.path.split('/image-files/')[1], self.fieldname) | ||
| 189 | out = 'Changes to %s:\n ' % fieldname | ||
| 190 | else: | ||
| 191 | if outer: | ||
| 192 | prefix = 'Changes to %s ' % self.path | ||
| 193 | out = '(%s):\n ' % self.fieldname | ||
| 194 | if self.filechanges: | ||
| 195 | out += '\n '.join(['%s' % i for i in self.filechanges]) | ||
| 196 | else: | ||
| 197 | alines = self.oldvalue.splitlines() | ||
| 198 | blines = self.newvalue.splitlines() | ||
| 199 | diff = difflib.unified_diff(alines, blines, fieldname, fieldname, lineterm='') | ||
| 200 | out += '\n '.join(list(diff)) | ||
| 201 | out += '\n --' | ||
| 202 | else: | ||
| 203 | out = '' | ||
| 204 | else: | ||
| 205 | out = '{} changed from "{colour_remove}{}{colour_default}" to "{colour_add}{}{colour_default}"'.format(self.fieldname, self.oldvalue, self.newvalue, **colours) | ||
| 206 | |||
| 207 | return '%s%s' % (prefix, out) if out else '' | ||
| 208 | |||
| 209 | class FileChange: | ||
| 210 | changetype_add = 'A' | ||
| 211 | changetype_remove = 'R' | ||
| 212 | changetype_type = 'T' | ||
| 213 | changetype_perms = 'P' | ||
| 214 | changetype_ownergroup = 'O' | ||
| 215 | changetype_link = 'L' | ||
| 216 | changetype_move = 'M' | ||
| 217 | |||
| 218 | def __init__(self, path, changetype, oldvalue = None, newvalue = None): | ||
| 219 | self.path = path | ||
| 220 | self.changetype = changetype | ||
| 221 | self.oldvalue = oldvalue | ||
| 222 | self.newvalue = newvalue | ||
| 223 | |||
| 224 | def _ftype_str(self, ftype): | ||
| 225 | if ftype == '-': | ||
| 226 | return 'file' | ||
| 227 | elif ftype == 'd': | ||
| 228 | return 'directory' | ||
| 229 | elif ftype == 'l': | ||
| 230 | return 'symlink' | ||
| 231 | elif ftype == 'c': | ||
| 232 | return 'char device' | ||
| 233 | elif ftype == 'b': | ||
| 234 | return 'block device' | ||
| 235 | elif ftype == 'p': | ||
| 236 | return 'fifo' | ||
| 237 | elif ftype == 's': | ||
| 238 | return 'socket' | ||
| 239 | else: | ||
| 240 | return 'unknown (%s)' % ftype | ||
| 241 | |||
| 242 | def __str__(self): | ||
| 243 | if self.changetype == self.changetype_add: | ||
| 244 | return '%s was added' % self.path | ||
| 245 | elif self.changetype == self.changetype_remove: | ||
| 246 | return '%s was removed' % self.path | ||
| 247 | elif self.changetype == self.changetype_type: | ||
| 248 | return '%s changed type from %s to %s' % (self.path, self._ftype_str(self.oldvalue), self._ftype_str(self.newvalue)) | ||
| 249 | elif self.changetype == self.changetype_perms: | ||
| 250 | return '%s changed permissions from %s to %s' % (self.path, self.oldvalue, self.newvalue) | ||
| 251 | elif self.changetype == self.changetype_ownergroup: | ||
| 252 | return '%s changed owner/group from %s to %s' % (self.path, self.oldvalue, self.newvalue) | ||
| 253 | elif self.changetype == self.changetype_link: | ||
| 254 | return '%s changed symlink target from %s to %s' % (self.path, self.oldvalue, self.newvalue) | ||
| 255 | elif self.changetype == self.changetype_move: | ||
| 256 | return '%s moved to %s' % (self.path, self.oldvalue) | ||
| 257 | else: | ||
| 258 | return '%s changed (unknown)' % self.path | ||
| 259 | |||
| 260 | def blob_to_dict(blob): | ||
| 261 | alines = [line for line in blob.data_stream.read().decode('utf-8').splitlines()] | ||
| 262 | adict = {} | ||
| 263 | for line in alines: | ||
| 264 | splitv = [i.strip() for i in line.split('=',1)] | ||
| 265 | if len(splitv) > 1: | ||
| 266 | adict[splitv[0]] = splitv[1] | ||
| 267 | return adict | ||
| 268 | |||
| 269 | |||
| 270 | def file_list_to_dict(lines): | ||
| 271 | adict = {} | ||
| 272 | for line in lines: | ||
| 273 | # Leave the last few fields intact so we handle file names containing spaces | ||
| 274 | splitv = line.split(None,4) | ||
| 275 | # Grab the path and remove the leading . | ||
| 276 | path = splitv[4][1:].strip() | ||
| 277 | # Handle symlinks | ||
| 278 | if(' -> ' in path): | ||
| 279 | target = path.split(' -> ')[1] | ||
| 280 | path = path.split(' -> ')[0] | ||
| 281 | adict[path] = splitv[0:3] + [target] | ||
| 282 | else: | ||
| 283 | adict[path] = splitv[0:3] | ||
| 284 | return adict | ||
| 285 | |||
| 286 | numeric_removal = str.maketrans('0123456789', 'XXXXXXXXXX') | ||
| 287 | |||
| 288 | def compare_file_lists(alines, blines, compare_ownership=True): | ||
| 289 | adict = file_list_to_dict(alines) | ||
| 290 | bdict = file_list_to_dict(blines) | ||
| 291 | filechanges = [] | ||
| 292 | additions = [] | ||
| 293 | removals = [] | ||
| 294 | for path, splitv in adict.items(): | ||
| 295 | newsplitv = bdict.pop(path, None) | ||
| 296 | if newsplitv: | ||
| 297 | # Check type | ||
| 298 | oldvalue = splitv[0][0] | ||
| 299 | newvalue = newsplitv[0][0] | ||
| 300 | if oldvalue != newvalue: | ||
| 301 | filechanges.append(FileChange(path, FileChange.changetype_type, oldvalue, newvalue)) | ||
| 302 | |||
| 303 | # Check permissions | ||
| 304 | oldvalue = splitv[0][1:] | ||
| 305 | newvalue = newsplitv[0][1:] | ||
| 306 | if oldvalue != newvalue: | ||
| 307 | filechanges.append(FileChange(path, FileChange.changetype_perms, oldvalue, newvalue)) | ||
| 308 | |||
| 309 | if compare_ownership: | ||
| 310 | # Check owner/group | ||
| 311 | oldvalue = '%s/%s' % (splitv[1], splitv[2]) | ||
| 312 | newvalue = '%s/%s' % (newsplitv[1], newsplitv[2]) | ||
| 313 | if oldvalue != newvalue: | ||
| 314 | filechanges.append(FileChange(path, FileChange.changetype_ownergroup, oldvalue, newvalue)) | ||
| 315 | |||
| 316 | # Check symlink target | ||
| 317 | if newsplitv[0][0] == 'l': | ||
| 318 | if len(splitv) > 3: | ||
| 319 | oldvalue = splitv[3] | ||
| 320 | else: | ||
| 321 | oldvalue = None | ||
| 322 | newvalue = newsplitv[3] | ||
| 323 | if oldvalue != newvalue: | ||
| 324 | filechanges.append(FileChange(path, FileChange.changetype_link, oldvalue, newvalue)) | ||
| 325 | else: | ||
| 326 | removals.append(path) | ||
| 327 | |||
| 328 | # Whatever is left over has been added | ||
| 329 | for path in bdict: | ||
| 330 | additions.append(path) | ||
| 331 | |||
| 332 | # Rather than print additions and removals, its nicer to print file 'moves' | ||
| 333 | # where names or paths are similar. | ||
| 334 | revmap_remove = {} | ||
| 335 | for removal in removals: | ||
| 336 | translated = removal.translate(numeric_removal) | ||
| 337 | if translated not in revmap_remove: | ||
| 338 | revmap_remove[translated] = [] | ||
| 339 | revmap_remove[translated].append(removal) | ||
| 340 | |||
| 341 | # | ||
| 342 | # We want to detect renames of large trees of files like | ||
| 343 | # /lib/modules/5.4.40-yocto-standard to /lib/modules/5.4.43-yocto-standard | ||
| 344 | # | ||
| 345 | renames = {} | ||
| 346 | for addition in additions.copy(): | ||
| 347 | if addition not in additions: | ||
| 348 | continue | ||
| 349 | translated = addition.translate(numeric_removal) | ||
| 350 | if translated in revmap_remove: | ||
| 351 | if len(revmap_remove[translated]) != 1: | ||
| 352 | continue | ||
| 353 | removal = revmap_remove[translated][0] | ||
| 354 | commondir = addition.split("/") | ||
| 355 | commondir2 = removal.split("/") | ||
| 356 | idx = None | ||
| 357 | for i in range(len(commondir)): | ||
| 358 | if commondir[i] != commondir2[i]: | ||
| 359 | idx = i | ||
| 360 | break | ||
| 361 | commondir = "/".join(commondir[:i+1]) | ||
| 362 | commondir2 = "/".join(commondir2[:i+1]) | ||
| 363 | # If the common parent is in one dict and not the other its likely a rename | ||
| 364 | # so iterate through those files and process as such | ||
| 365 | if commondir2 not in bdict and commondir not in adict: | ||
| 366 | if commondir not in renames: | ||
| 367 | renames[commondir] = commondir2 | ||
| 368 | for addition2 in additions.copy(): | ||
| 369 | if addition2.startswith(commondir): | ||
| 370 | removal2 = addition2.replace(commondir, commondir2) | ||
| 371 | if removal2 in removals: | ||
| 372 | additions.remove(addition2) | ||
| 373 | removals.remove(removal2) | ||
| 374 | continue | ||
| 375 | filechanges.append(FileChange(removal, FileChange.changetype_move, addition)) | ||
| 376 | if addition in additions: | ||
| 377 | additions.remove(addition) | ||
| 378 | if removal in removals: | ||
| 379 | removals.remove(removal) | ||
| 380 | for rename in renames: | ||
| 381 | filechanges.append(FileChange(renames[rename], FileChange.changetype_move, rename)) | ||
| 382 | |||
| 383 | for addition in additions: | ||
| 384 | filechanges.append(FileChange(addition, FileChange.changetype_add)) | ||
| 385 | for removal in removals: | ||
| 386 | filechanges.append(FileChange(removal, FileChange.changetype_remove)) | ||
| 387 | |||
| 388 | return filechanges | ||
| 389 | |||
| 390 | |||
| 391 | def compare_lists(alines, blines): | ||
| 392 | removed = list(set(alines) - set(blines)) | ||
| 393 | added = list(set(blines) - set(alines)) | ||
| 394 | |||
| 395 | filechanges = [] | ||
| 396 | for pkg in removed: | ||
| 397 | filechanges.append(FileChange(pkg, FileChange.changetype_remove)) | ||
| 398 | for pkg in added: | ||
| 399 | filechanges.append(FileChange(pkg, FileChange.changetype_add)) | ||
| 400 | |||
| 401 | return filechanges | ||
| 402 | |||
| 403 | |||
| 404 | def compare_pkg_lists(astr, bstr): | ||
| 405 | depvera = bb.utils.explode_dep_versions2(astr) | ||
| 406 | depverb = bb.utils.explode_dep_versions2(bstr) | ||
| 407 | |||
| 408 | # Strip out changes where the version has increased | ||
| 409 | remove = [] | ||
| 410 | for k in depvera: | ||
| 411 | if k in depverb: | ||
| 412 | dva = depvera[k] | ||
| 413 | dvb = depverb[k] | ||
| 414 | if dva and dvb and len(dva) == len(dvb): | ||
| 415 | # Since length is the same, sort so that prefixes (e.g. >=) will line up | ||
| 416 | dva.sort() | ||
| 417 | dvb.sort() | ||
| 418 | removeit = True | ||
| 419 | for dvai, dvbi in zip(dva, dvb): | ||
| 420 | if dvai != dvbi: | ||
| 421 | aiprefix = dvai.split(' ')[0] | ||
| 422 | biprefix = dvbi.split(' ')[0] | ||
| 423 | if aiprefix == biprefix and aiprefix in ['>=', '=']: | ||
| 424 | if bb.utils.vercmp(bb.utils.split_version(dvai), bb.utils.split_version(dvbi)) > 0: | ||
| 425 | removeit = False | ||
| 426 | break | ||
| 427 | else: | ||
| 428 | removeit = False | ||
| 429 | break | ||
| 430 | if removeit: | ||
| 431 | remove.append(k) | ||
| 432 | |||
| 433 | for k in remove: | ||
| 434 | depvera.pop(k) | ||
| 435 | depverb.pop(k) | ||
| 436 | |||
| 437 | return (depvera, depverb) | ||
| 438 | |||
| 439 | |||
| 440 | def compare_dict_blobs(path, ablob, bblob, report_all, report_ver): | ||
| 441 | adict = blob_to_dict(ablob) | ||
| 442 | bdict = blob_to_dict(bblob) | ||
| 443 | |||
| 444 | pkgname = os.path.basename(path) | ||
| 445 | |||
| 446 | defaultvals = {} | ||
| 447 | defaultvals['PKG'] = pkgname | ||
| 448 | defaultvals['PKGE'] = '0' | ||
| 449 | |||
| 450 | changes = [] | ||
| 451 | keys = list(set(adict.keys()) | set(bdict.keys()) | set(defaultval_map.keys())) | ||
| 452 | for key in keys: | ||
| 453 | astr = adict.get(key, '') | ||
| 454 | bstr = bdict.get(key, '') | ||
| 455 | if key in ver_monitor_fields: | ||
| 456 | monitored = report_ver or astr or bstr | ||
| 457 | else: | ||
| 458 | monitored = key in monitor_fields | ||
| 459 | mapped_key = defaultval_map.get(key, '') | ||
| 460 | if mapped_key: | ||
| 461 | if not astr: | ||
| 462 | astr = '%s [default]' % adict.get(mapped_key, defaultvals.get(key, '')) | ||
| 463 | if not bstr: | ||
| 464 | bstr = '%s [default]' % bdict.get(mapped_key, defaultvals.get(key, '')) | ||
| 465 | |||
| 466 | if astr != bstr: | ||
| 467 | if (not report_all) and key in numeric_fields: | ||
| 468 | aval = int(astr or 0) | ||
| 469 | bval = int(bstr or 0) | ||
| 470 | if aval != 0: | ||
| 471 | percentchg = ((bval - aval) / float(aval)) * 100 | ||
| 472 | else: | ||
| 473 | percentchg = 100 | ||
| 474 | if abs(percentchg) < monitor_numeric_threshold: | ||
| 475 | continue | ||
| 476 | elif (not report_all) and key in list_fields: | ||
| 477 | if key == "FILELIST" and (path.endswith("-dbg") or path.endswith("-src")) and bstr.strip() != '': | ||
| 478 | continue | ||
| 479 | if key in ['RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RREPLACES', 'RCONFLICTS']: | ||
| 480 | (depvera, depverb) = compare_pkg_lists(astr, bstr) | ||
| 481 | if depvera == depverb: | ||
| 482 | continue | ||
| 483 | if key == 'FILELIST': | ||
| 484 | alist = shlex.split(astr) | ||
| 485 | blist = shlex.split(bstr) | ||
| 486 | else: | ||
| 487 | alist = astr.split() | ||
| 488 | blist = bstr.split() | ||
| 489 | alist.sort() | ||
| 490 | blist.sort() | ||
| 491 | # We don't care about the removal of self-dependencies | ||
| 492 | if pkgname in alist and not pkgname in blist: | ||
| 493 | alist.remove(pkgname) | ||
| 494 | if ' '.join(alist) == ' '.join(blist): | ||
| 495 | continue | ||
| 496 | |||
| 497 | if key == 'PKGR' and not report_all: | ||
| 498 | vers = [] | ||
| 499 | # strip leading 'r' and dots | ||
| 500 | for ver in (astr.split()[0], bstr.split()[0]): | ||
| 501 | if ver.startswith('r'): | ||
| 502 | ver = ver[1:] | ||
| 503 | vers.append(ver.replace('.', '')) | ||
| 504 | maxlen = max(len(vers[0]), len(vers[1])) | ||
| 505 | try: | ||
| 506 | # pad with '0' and convert to int | ||
| 507 | vers = [int(ver.ljust(maxlen, '0')) for ver in vers] | ||
| 508 | except ValueError: | ||
| 509 | pass | ||
| 510 | else: | ||
| 511 | # skip decrements and increments | ||
| 512 | if abs(vers[0] - vers[1]) == 1: | ||
| 513 | continue | ||
| 514 | |||
| 515 | chg = ChangeRecord(path, key, astr, bstr, monitored) | ||
| 516 | changes.append(chg) | ||
| 517 | return changes | ||
| 518 | |||
| 519 | |||
| 520 | def compare_siglists(a_blob, b_blob, taskdiff=False): | ||
| 521 | # FIXME collapse down a recipe's tasks? | ||
| 522 | alines = a_blob.data_stream.read().decode('utf-8').splitlines() | ||
| 523 | blines = b_blob.data_stream.read().decode('utf-8').splitlines() | ||
| 524 | keys = [] | ||
| 525 | pnmap = {} | ||
| 526 | def readsigs(lines): | ||
| 527 | sigs = {} | ||
| 528 | for line in lines: | ||
| 529 | linesplit = line.split() | ||
| 530 | if len(linesplit) > 2: | ||
| 531 | sigs[linesplit[0]] = linesplit[2] | ||
| 532 | if not linesplit[0] in keys: | ||
| 533 | keys.append(linesplit[0]) | ||
| 534 | pnmap[linesplit[1]] = linesplit[0].rsplit('.', 1)[0] | ||
| 535 | return sigs | ||
| 536 | adict = readsigs(alines) | ||
| 537 | bdict = readsigs(blines) | ||
| 538 | out = [] | ||
| 539 | |||
| 540 | changecount = 0 | ||
| 541 | addcount = 0 | ||
| 542 | removecount = 0 | ||
| 543 | if taskdiff: | ||
| 544 | with bb.tinfoil.Tinfoil() as tinfoil: | ||
| 545 | tinfoil.prepare(config_only=True) | ||
| 546 | |||
| 547 | changes = collections.OrderedDict() | ||
| 548 | |||
| 549 | def compare_hashfiles(pn, taskname, hash1, hash2): | ||
| 550 | hashes = [hash1, hash2] | ||
| 551 | hashfiles = bb.siggen.find_siginfo(pn, taskname, hashes, tinfoil.config_data) | ||
| 552 | |||
| 553 | if not taskname: | ||
| 554 | (pn, taskname) = pn.rsplit('.', 1) | ||
| 555 | pn = pnmap.get(pn, pn) | ||
| 556 | desc = '%s.%s' % (pn, taskname) | ||
| 557 | |||
| 558 | if len(hashfiles) == 0: | ||
| 559 | out.append("Unable to find matching sigdata for %s with hashes %s or %s" % (desc, hash1, hash2)) | ||
| 560 | elif not hash1 in hashfiles: | ||
| 561 | out.append("Unable to find matching sigdata for %s with hash %s" % (desc, hash1)) | ||
| 562 | elif not hash2 in hashfiles: | ||
| 563 | out.append("Unable to find matching sigdata for %s with hash %s" % (desc, hash2)) | ||
| 564 | else: | ||
| 565 | out2 = bb.siggen.compare_sigfiles(hashfiles[hash1]['path'], hashfiles[hash2]['path'], recursecb, collapsed=True) | ||
| 566 | for line in out2: | ||
| 567 | m = hashlib.sha256() | ||
| 568 | m.update(line.encode('utf-8')) | ||
| 569 | entry = changes.get(m.hexdigest(), (line, [])) | ||
| 570 | if desc not in entry[1]: | ||
| 571 | changes[m.hexdigest()] = (line, entry[1] + [desc]) | ||
| 572 | |||
| 573 | # Define recursion callback | ||
| 574 | def recursecb(key, hash1, hash2): | ||
| 575 | compare_hashfiles(key, None, hash1, hash2) | ||
| 576 | return [] | ||
| 577 | |||
| 578 | for key in keys: | ||
| 579 | siga = adict.get(key, None) | ||
| 580 | sigb = bdict.get(key, None) | ||
| 581 | if siga is not None and sigb is not None and siga != sigb: | ||
| 582 | changecount += 1 | ||
| 583 | (pn, taskname) = key.rsplit('.', 1) | ||
| 584 | compare_hashfiles(pn, taskname, siga, sigb) | ||
| 585 | elif siga is None: | ||
| 586 | addcount += 1 | ||
| 587 | elif sigb is None: | ||
| 588 | removecount += 1 | ||
| 589 | for key, item in changes.items(): | ||
| 590 | line, tasks = item | ||
| 591 | if len(tasks) == 1: | ||
| 592 | desc = tasks[0] | ||
| 593 | elif len(tasks) == 2: | ||
| 594 | desc = '%s and %s' % (tasks[0], tasks[1]) | ||
| 595 | else: | ||
| 596 | desc = '%s and %d others' % (tasks[-1], len(tasks)-1) | ||
| 597 | out.append('%s: %s' % (desc, line)) | ||
| 598 | else: | ||
| 599 | for key in keys: | ||
| 600 | siga = adict.get(key, None) | ||
| 601 | sigb = bdict.get(key, None) | ||
| 602 | if siga is not None and sigb is not None and siga != sigb: | ||
| 603 | out.append('%s changed from %s to %s' % (key, siga, sigb)) | ||
| 604 | changecount += 1 | ||
| 605 | elif siga is None: | ||
| 606 | out.append('%s was added' % key) | ||
| 607 | addcount += 1 | ||
| 608 | elif sigb is None: | ||
| 609 | out.append('%s was removed' % key) | ||
| 610 | removecount += 1 | ||
| 611 | out.append('Summary: %d tasks added, %d tasks removed, %d tasks modified (%.1f%%)' % (addcount, removecount, changecount, (changecount / float(len(bdict)) * 100))) | ||
| 612 | return '\n'.join(out) | ||
| 613 | |||
| 614 | |||
| 615 | def process_changes(repopath, revision1, revision2='HEAD', report_all=False, report_ver=False, | ||
| 616 | sigs=False, sigsdiff=False, exclude_path=None): | ||
| 617 | repo = git.Repo(repopath) | ||
| 618 | assert repo.bare == False | ||
| 619 | commit = repo.commit(revision1) | ||
| 620 | diff = commit.diff(revision2) | ||
| 621 | |||
| 622 | changes = [] | ||
| 623 | |||
| 624 | if sigs or sigsdiff: | ||
| 625 | for d in diff.iter_change_type('M'): | ||
| 626 | if d.a_blob.path == 'siglist.txt': | ||
| 627 | changes.append(compare_siglists(d.a_blob, d.b_blob, taskdiff=sigsdiff)) | ||
| 628 | return changes | ||
| 629 | |||
| 630 | for d in diff.iter_change_type('M'): | ||
| 631 | path = os.path.dirname(d.a_blob.path) | ||
| 632 | if path.startswith('packages/'): | ||
| 633 | filename = os.path.basename(d.a_blob.path) | ||
| 634 | if filename == 'latest': | ||
| 635 | changes.extend(compare_dict_blobs(path, d.a_blob, d.b_blob, report_all, report_ver)) | ||
| 636 | elif filename.startswith('latest.'): | ||
| 637 | chg = ChangeRecord(path, filename, d.a_blob.data_stream.read().decode('utf-8'), d.b_blob.data_stream.read().decode('utf-8'), True) | ||
| 638 | changes.append(chg) | ||
| 639 | elif filename == 'sysroot': | ||
| 640 | alines = d.a_blob.data_stream.read().decode('utf-8').splitlines() | ||
| 641 | blines = d.b_blob.data_stream.read().decode('utf-8').splitlines() | ||
| 642 | filechanges = compare_file_lists(alines,blines, compare_ownership=False) | ||
| 643 | if filechanges: | ||
| 644 | chg = ChangeRecord(path, filename, None, None, True) | ||
| 645 | chg.filechanges = filechanges | ||
| 646 | changes.append(chg) | ||
| 647 | |||
| 648 | elif path.startswith('images/'): | ||
| 649 | filename = os.path.basename(d.a_blob.path) | ||
| 650 | if filename in img_monitor_files: | ||
| 651 | if filename == 'files-in-image.txt': | ||
| 652 | alines = d.a_blob.data_stream.read().decode('utf-8').splitlines() | ||
| 653 | blines = d.b_blob.data_stream.read().decode('utf-8').splitlines() | ||
| 654 | filechanges = compare_file_lists(alines,blines) | ||
| 655 | if filechanges: | ||
| 656 | chg = ChangeRecord(path, filename, None, None, True) | ||
| 657 | chg.filechanges = filechanges | ||
| 658 | changes.append(chg) | ||
| 659 | elif filename == 'installed-package-names.txt': | ||
| 660 | alines = d.a_blob.data_stream.read().decode('utf-8').splitlines() | ||
| 661 | blines = d.b_blob.data_stream.read().decode('utf-8').splitlines() | ||
| 662 | filechanges = compare_lists(alines,blines) | ||
| 663 | if filechanges: | ||
| 664 | chg = ChangeRecord(path, filename, None, None, True) | ||
| 665 | chg.filechanges = filechanges | ||
| 666 | changes.append(chg) | ||
| 667 | else: | ||
| 668 | chg = ChangeRecord(path, filename, d.a_blob.data_stream.read().decode('utf-8'), d.b_blob.data_stream.read().decode('utf-8'), True) | ||
| 669 | changes.append(chg) | ||
| 670 | elif filename == 'image-info.txt': | ||
| 671 | changes.extend(compare_dict_blobs(path, d.a_blob, d.b_blob, report_all, report_ver)) | ||
| 672 | elif '/image-files/' in path: | ||
| 673 | chg = ChangeRecord(path, filename, d.a_blob.data_stream.read().decode('utf-8'), d.b_blob.data_stream.read().decode('utf-8'), True) | ||
| 674 | changes.append(chg) | ||
| 675 | |||
| 676 | # Look for added preinst/postinst/prerm/postrm | ||
| 677 | # (without reporting newly added recipes) | ||
| 678 | addedpkgs = [] | ||
| 679 | addedchanges = [] | ||
| 680 | for d in diff.iter_change_type('A'): | ||
| 681 | path = os.path.dirname(d.b_blob.path) | ||
| 682 | if path.startswith('packages/'): | ||
| 683 | filename = os.path.basename(d.b_blob.path) | ||
| 684 | if filename == 'latest': | ||
| 685 | addedpkgs.append(path) | ||
| 686 | elif filename.startswith('latest.'): | ||
| 687 | chg = ChangeRecord(path, filename[7:], '', d.b_blob.data_stream.read().decode('utf-8'), True) | ||
| 688 | addedchanges.append(chg) | ||
| 689 | for chg in addedchanges: | ||
| 690 | found = False | ||
| 691 | for pkg in addedpkgs: | ||
| 692 | if chg.path.startswith(pkg): | ||
| 693 | found = True | ||
| 694 | break | ||
| 695 | if not found: | ||
| 696 | changes.append(chg) | ||
| 697 | |||
| 698 | # Look for cleared preinst/postinst/prerm/postrm | ||
| 699 | for d in diff.iter_change_type('D'): | ||
| 700 | path = os.path.dirname(d.a_blob.path) | ||
| 701 | if path.startswith('packages/'): | ||
| 702 | filename = os.path.basename(d.a_blob.path) | ||
| 703 | if filename != 'latest' and filename.startswith('latest.'): | ||
| 704 | chg = ChangeRecord(path, filename[7:], d.a_blob.data_stream.read().decode('utf-8'), '', True) | ||
| 705 | changes.append(chg) | ||
| 706 | |||
| 707 | # filter out unwanted paths | ||
| 708 | if exclude_path: | ||
| 709 | for chg in changes: | ||
| 710 | if chg.filechanges: | ||
| 711 | fchgs = [] | ||
| 712 | for fchg in chg.filechanges: | ||
| 713 | for epath in exclude_path: | ||
| 714 | if fchg.path.startswith(epath): | ||
| 715 | break | ||
| 716 | else: | ||
| 717 | fchgs.append(fchg) | ||
| 718 | chg.filechanges = fchgs | ||
| 719 | |||
| 720 | if report_all: | ||
| 721 | return changes | ||
| 722 | else: | ||
| 723 | return [chg for chg in changes if chg.monitored] | ||
diff --git a/meta-xilinx-core/lib/oe/cachedpath.py b/meta-xilinx-core/lib/oe/cachedpath.py new file mode 100644 index 00000000..0138b791 --- /dev/null +++ b/meta-xilinx-core/lib/oe/cachedpath.py | |||
| @@ -0,0 +1,237 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: GPL-2.0-only | ||
| 5 | # | ||
| 6 | # Based on standard python library functions but avoid | ||
| 7 | # repeated stat calls. Its assumed the files will not change from under us | ||
| 8 | # so we can cache stat calls. | ||
| 9 | # | ||
| 10 | |||
| 11 | import os | ||
| 12 | import errno | ||
| 13 | import stat as statmod | ||
| 14 | |||
| 15 | class CachedPath(object): | ||
| 16 | def __init__(self): | ||
| 17 | self.statcache = {} | ||
| 18 | self.lstatcache = {} | ||
| 19 | self.normpathcache = {} | ||
| 20 | return | ||
| 21 | |||
| 22 | def updatecache(self, x): | ||
| 23 | x = self.normpath(x) | ||
| 24 | if x in self.statcache: | ||
| 25 | del self.statcache[x] | ||
| 26 | if x in self.lstatcache: | ||
| 27 | del self.lstatcache[x] | ||
| 28 | |||
| 29 | def normpath(self, path): | ||
| 30 | if path in self.normpathcache: | ||
| 31 | return self.normpathcache[path] | ||
| 32 | newpath = os.path.normpath(path) | ||
| 33 | self.normpathcache[path] = newpath | ||
| 34 | return newpath | ||
| 35 | |||
| 36 | def _callstat(self, path): | ||
| 37 | if path in self.statcache: | ||
| 38 | return self.statcache[path] | ||
| 39 | try: | ||
| 40 | st = os.stat(path) | ||
| 41 | self.statcache[path] = st | ||
| 42 | return st | ||
| 43 | except os.error: | ||
| 44 | self.statcache[path] = False | ||
| 45 | return False | ||
| 46 | |||
| 47 | # We might as well call lstat and then only | ||
| 48 | # call stat as well in the symbolic link case | ||
| 49 | # since this turns out to be much more optimal | ||
| 50 | # in real world usage of this cache | ||
| 51 | def callstat(self, path): | ||
| 52 | path = self.normpath(path) | ||
| 53 | self.calllstat(path) | ||
| 54 | return self.statcache[path] | ||
| 55 | |||
| 56 | def calllstat(self, path): | ||
| 57 | path = self.normpath(path) | ||
| 58 | if path in self.lstatcache: | ||
| 59 | return self.lstatcache[path] | ||
| 60 | #bb.error("LStatpath:" + path) | ||
| 61 | try: | ||
| 62 | lst = os.lstat(path) | ||
| 63 | self.lstatcache[path] = lst | ||
| 64 | if not statmod.S_ISLNK(lst.st_mode): | ||
| 65 | self.statcache[path] = lst | ||
| 66 | else: | ||
| 67 | self._callstat(path) | ||
| 68 | return lst | ||
| 69 | except (os.error, AttributeError): | ||
| 70 | self.lstatcache[path] = False | ||
| 71 | self.statcache[path] = False | ||
| 72 | return False | ||
| 73 | |||
| 74 | # This follows symbolic links, so both islink() and isdir() can be true | ||
| 75 | # for the same path ono systems that support symlinks | ||
| 76 | def isfile(self, path): | ||
| 77 | """Test whether a path is a regular file""" | ||
| 78 | st = self.callstat(path) | ||
| 79 | if not st: | ||
| 80 | return False | ||
| 81 | return statmod.S_ISREG(st.st_mode) | ||
| 82 | |||
| 83 | # Is a path a directory? | ||
| 84 | # This follows symbolic links, so both islink() and isdir() | ||
| 85 | # can be true for the same path on systems that support symlinks | ||
| 86 | def isdir(self, s): | ||
| 87 | """Return true if the pathname refers to an existing directory.""" | ||
| 88 | st = self.callstat(s) | ||
| 89 | if not st: | ||
| 90 | return False | ||
| 91 | return statmod.S_ISDIR(st.st_mode) | ||
| 92 | |||
| 93 | def islink(self, path): | ||
| 94 | """Test whether a path is a symbolic link""" | ||
| 95 | st = self.calllstat(path) | ||
| 96 | if not st: | ||
| 97 | return False | ||
| 98 | return statmod.S_ISLNK(st.st_mode) | ||
| 99 | |||
| 100 | # Does a path exist? | ||
| 101 | # This is false for dangling symbolic links on systems that support them. | ||
| 102 | def exists(self, path): | ||
| 103 | """Test whether a path exists. Returns False for broken symbolic links""" | ||
| 104 | if self.callstat(path): | ||
| 105 | return True | ||
| 106 | return False | ||
| 107 | |||
| 108 | def lexists(self, path): | ||
| 109 | """Test whether a path exists. Returns True for broken symbolic links""" | ||
| 110 | if self.calllstat(path): | ||
| 111 | return True | ||
| 112 | return False | ||
| 113 | |||
| 114 | def stat(self, path): | ||
| 115 | return self.callstat(path) | ||
| 116 | |||
| 117 | def lstat(self, path): | ||
| 118 | return self.calllstat(path) | ||
| 119 | |||
| 120 | def walk(self, top, topdown=True, onerror=None, followlinks=False): | ||
| 121 | # Matches os.walk, not os.path.walk() | ||
| 122 | |||
| 123 | # We may not have read permission for top, in which case we can't | ||
| 124 | # get a list of the files the directory contains. os.path.walk | ||
| 125 | # always suppressed the exception then, rather than blow up for a | ||
| 126 | # minor reason when (say) a thousand readable directories are still | ||
| 127 | # left to visit. That logic is copied here. | ||
| 128 | try: | ||
| 129 | names = os.listdir(top) | ||
| 130 | except os.error as err: | ||
| 131 | if onerror is not None: | ||
| 132 | onerror(err) | ||
| 133 | return | ||
| 134 | |||
| 135 | dirs, nondirs = [], [] | ||
| 136 | for name in names: | ||
| 137 | if self.isdir(os.path.join(top, name)): | ||
| 138 | dirs.append(name) | ||
| 139 | else: | ||
| 140 | nondirs.append(name) | ||
| 141 | |||
| 142 | if topdown: | ||
| 143 | yield top, dirs, nondirs | ||
| 144 | for name in dirs: | ||
| 145 | new_path = os.path.join(top, name) | ||
| 146 | if followlinks or not self.islink(new_path): | ||
| 147 | for x in self.walk(new_path, topdown, onerror, followlinks): | ||
| 148 | yield x | ||
| 149 | if not topdown: | ||
| 150 | yield top, dirs, nondirs | ||
| 151 | |||
| 152 | ## realpath() related functions | ||
| 153 | def __is_path_below(self, file, root): | ||
| 154 | return (file + os.path.sep).startswith(root) | ||
| 155 | |||
| 156 | def __realpath_rel(self, start, rel_path, root, loop_cnt, assume_dir): | ||
| 157 | """Calculates real path of symlink 'start' + 'rel_path' below | ||
| 158 | 'root'; no part of 'start' below 'root' must contain symlinks. """ | ||
| 159 | have_dir = True | ||
| 160 | |||
| 161 | for d in rel_path.split(os.path.sep): | ||
| 162 | if not have_dir and not assume_dir: | ||
| 163 | raise OSError(errno.ENOENT, "no such directory %s" % start) | ||
| 164 | |||
| 165 | if d == os.path.pardir: # '..' | ||
| 166 | if len(start) >= len(root): | ||
| 167 | # do not follow '..' before root | ||
| 168 | start = os.path.dirname(start) | ||
| 169 | else: | ||
| 170 | # emit warning? | ||
| 171 | pass | ||
| 172 | else: | ||
| 173 | (start, have_dir) = self.__realpath(os.path.join(start, d), | ||
| 174 | root, loop_cnt, assume_dir) | ||
| 175 | |||
| 176 | assert(self.__is_path_below(start, root)) | ||
| 177 | |||
| 178 | return start | ||
| 179 | |||
| 180 | def __realpath(self, file, root, loop_cnt, assume_dir): | ||
| 181 | while self.islink(file) and len(file) >= len(root): | ||
| 182 | if loop_cnt == 0: | ||
| 183 | raise OSError(errno.ELOOP, file) | ||
| 184 | |||
| 185 | loop_cnt -= 1 | ||
| 186 | target = os.path.normpath(os.readlink(file)) | ||
| 187 | |||
| 188 | if not os.path.isabs(target): | ||
| 189 | tdir = os.path.dirname(file) | ||
| 190 | assert(self.__is_path_below(tdir, root)) | ||
| 191 | else: | ||
| 192 | tdir = root | ||
| 193 | |||
| 194 | file = self.__realpath_rel(tdir, target, root, loop_cnt, assume_dir) | ||
| 195 | |||
| 196 | try: | ||
| 197 | is_dir = self.isdir(file) | ||
| 198 | except: | ||
| 199 | is_dir = False | ||
| 200 | |||
| 201 | return (file, is_dir) | ||
| 202 | |||
| 203 | def realpath(self, file, root, use_physdir = True, loop_cnt = 100, assume_dir = False): | ||
| 204 | """ Returns the canonical path of 'file' with assuming a | ||
| 205 | toplevel 'root' directory. When 'use_physdir' is set, all | ||
| 206 | preceding path components of 'file' will be resolved first; | ||
| 207 | this flag should be set unless it is guaranteed that there is | ||
| 208 | no symlink in the path. When 'assume_dir' is not set, missing | ||
| 209 | path components will raise an ENOENT error""" | ||
| 210 | |||
| 211 | root = os.path.normpath(root) | ||
| 212 | file = os.path.normpath(file) | ||
| 213 | |||
| 214 | if not root.endswith(os.path.sep): | ||
| 215 | # letting root end with '/' makes some things easier | ||
| 216 | root = root + os.path.sep | ||
| 217 | |||
| 218 | if not self.__is_path_below(file, root): | ||
| 219 | raise OSError(errno.EINVAL, "file '%s' is not below root" % file) | ||
| 220 | |||
| 221 | try: | ||
| 222 | if use_physdir: | ||
| 223 | file = self.__realpath_rel(root, file[(len(root) - 1):], root, loop_cnt, assume_dir) | ||
| 224 | else: | ||
| 225 | file = self.__realpath(file, root, loop_cnt, assume_dir)[0] | ||
| 226 | except OSError as e: | ||
| 227 | if e.errno == errno.ELOOP: | ||
| 228 | # make ELOOP more readable; without catching it, there will | ||
| 229 | # be printed a backtrace with 100s of OSError exceptions | ||
| 230 | # else | ||
| 231 | raise OSError(errno.ELOOP, | ||
| 232 | "too much recursions while resolving '%s'; loop in '%s'" % | ||
| 233 | (file, e.strerror)) | ||
| 234 | |||
| 235 | raise | ||
| 236 | |||
| 237 | return file | ||
diff --git a/meta-xilinx-core/lib/oe/classextend.py b/meta-xilinx-core/lib/oe/classextend.py new file mode 100644 index 00000000..5161d33d --- /dev/null +++ b/meta-xilinx-core/lib/oe/classextend.py | |||
| @@ -0,0 +1,159 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: GPL-2.0-only | ||
| 5 | # | ||
| 6 | |||
| 7 | import collections | ||
| 8 | |||
| 9 | def get_packages(d): | ||
| 10 | pkgs = d.getVar("PACKAGES_NONML") | ||
| 11 | extcls = d.getVar("EXTENDERCLASS") | ||
| 12 | return extcls.rename_packages_internal(pkgs) | ||
| 13 | |||
| 14 | def get_depends(varprefix, d): | ||
| 15 | extcls = d.getVar("EXTENDERCLASS") | ||
| 16 | return extcls.map_depends_variable(varprefix + "_NONML") | ||
| 17 | |||
| 18 | class ClassExtender(object): | ||
| 19 | def __init__(self, extname, d): | ||
| 20 | self.extname = extname | ||
| 21 | self.d = d | ||
| 22 | self.pkgs_mapping = [] | ||
| 23 | self.d.setVar("EXTENDERCLASS", self) | ||
| 24 | |||
| 25 | def extend_name(self, name): | ||
| 26 | if name.startswith("kernel-") or name == "virtual/kernel": | ||
| 27 | return name | ||
| 28 | if name.startswith("rtld"): | ||
| 29 | return name | ||
| 30 | if name.endswith("-crosssdk"): | ||
| 31 | return name | ||
| 32 | if name.endswith("-" + self.extname): | ||
| 33 | name = name.replace("-" + self.extname, "") | ||
| 34 | if name.startswith("virtual/"): | ||
| 35 | # Assume large numbers of dashes means a triplet is present and we don't need to convert | ||
| 36 | if name.count("-") >= 3 and name.endswith(("-go", "-binutils", "-gcc", "-g++")): | ||
| 37 | return name | ||
| 38 | subs = name.split("/", 1)[1] | ||
| 39 | if not subs.startswith(self.extname): | ||
| 40 | return "virtual/" + self.extname + "-" + subs | ||
| 41 | return name | ||
| 42 | if name.startswith("/") or (name.startswith("${") and name.endswith("}")): | ||
| 43 | return name | ||
| 44 | if not name.startswith(self.extname): | ||
| 45 | return self.extname + "-" + name | ||
| 46 | return name | ||
| 47 | |||
| 48 | def map_variable(self, varname, setvar = True): | ||
| 49 | var = self.d.getVar(varname) | ||
| 50 | if not var: | ||
| 51 | return "" | ||
| 52 | var = var.split() | ||
| 53 | newvar = [] | ||
| 54 | for v in var: | ||
| 55 | newvar.append(self.extend_name(v)) | ||
| 56 | newdata = " ".join(newvar) | ||
| 57 | if setvar: | ||
| 58 | self.d.setVar(varname, newdata) | ||
| 59 | return newdata | ||
| 60 | |||
| 61 | def map_regexp_variable(self, varname, setvar = True): | ||
| 62 | var = self.d.getVar(varname) | ||
| 63 | if not var: | ||
| 64 | return "" | ||
| 65 | var = var.split() | ||
| 66 | newvar = [] | ||
| 67 | for v in var: | ||
| 68 | if v.startswith("^" + self.extname): | ||
| 69 | newvar.append(v) | ||
| 70 | elif v.startswith("^"): | ||
| 71 | newvar.append("^" + self.extname + "-" + v[1:]) | ||
| 72 | else: | ||
| 73 | newvar.append(self.extend_name(v)) | ||
| 74 | newdata = " ".join(newvar) | ||
| 75 | if setvar: | ||
| 76 | self.d.setVar(varname, newdata) | ||
| 77 | return newdata | ||
| 78 | |||
| 79 | def map_depends(self, dep): | ||
| 80 | if dep.endswith(("-native", "-native-runtime")) or ('nativesdk-' in dep) or ('cross-canadian' in dep) or ('-crosssdk-' in dep): | ||
| 81 | return dep | ||
| 82 | else: | ||
| 83 | # Do not extend for that already have multilib prefix | ||
| 84 | var = self.d.getVar("MULTILIB_VARIANTS") | ||
| 85 | if var: | ||
| 86 | var = var.split() | ||
| 87 | for v in var: | ||
| 88 | if dep.startswith(v): | ||
| 89 | return dep | ||
| 90 | return self.extend_name(dep) | ||
| 91 | |||
| 92 | def map_depends_variable(self, varname, suffix = ""): | ||
| 93 | # We need to preserve EXTENDPKGV so it can be expanded correctly later | ||
| 94 | if suffix: | ||
| 95 | varname = varname + ":" + suffix | ||
| 96 | orig = self.d.getVar("EXTENDPKGV", False) | ||
| 97 | self.d.setVar("EXTENDPKGV", "EXTENDPKGV") | ||
| 98 | deps = self.d.getVar(varname) | ||
| 99 | if not deps: | ||
| 100 | self.d.setVar("EXTENDPKGV", orig) | ||
| 101 | return | ||
| 102 | deps = bb.utils.explode_dep_versions2(deps) | ||
| 103 | newdeps = collections.OrderedDict() | ||
| 104 | for dep in deps: | ||
| 105 | newdeps[self.map_depends(dep)] = deps[dep] | ||
| 106 | |||
| 107 | if not varname.endswith("_NONML"): | ||
| 108 | self.d.renameVar(varname, varname + "_NONML") | ||
| 109 | self.d.setVar(varname, "${@oe.classextend.get_depends('%s', d)}" % varname) | ||
| 110 | self.d.appendVarFlag(varname, "vardeps", " " + varname + "_NONML") | ||
| 111 | ret = bb.utils.join_deps(newdeps, False).replace("EXTENDPKGV", "${EXTENDPKGV}") | ||
| 112 | self.d.setVar("EXTENDPKGV", orig) | ||
| 113 | return ret | ||
| 114 | |||
| 115 | def map_packagevars(self): | ||
| 116 | for pkg in (self.d.getVar("PACKAGES").split() + [""]): | ||
| 117 | self.map_depends_variable("RDEPENDS", pkg) | ||
| 118 | self.map_depends_variable("RRECOMMENDS", pkg) | ||
| 119 | self.map_depends_variable("RSUGGESTS", pkg) | ||
| 120 | self.map_depends_variable("RPROVIDES", pkg) | ||
| 121 | self.map_depends_variable("RREPLACES", pkg) | ||
| 122 | self.map_depends_variable("RCONFLICTS", pkg) | ||
| 123 | self.map_depends_variable("PKG", pkg) | ||
| 124 | |||
| 125 | def rename_packages(self): | ||
| 126 | for pkg in (self.d.getVar("PACKAGES") or "").split(): | ||
| 127 | if pkg.startswith(self.extname): | ||
| 128 | self.pkgs_mapping.append([pkg.split(self.extname + "-")[1], pkg]) | ||
| 129 | continue | ||
| 130 | self.pkgs_mapping.append([pkg, self.extend_name(pkg)]) | ||
| 131 | |||
| 132 | self.d.renameVar("PACKAGES", "PACKAGES_NONML") | ||
| 133 | self.d.setVar("PACKAGES", "${@oe.classextend.get_packages(d)}") | ||
| 134 | |||
| 135 | def rename_packages_internal(self, pkgs): | ||
| 136 | self.pkgs_mapping = [] | ||
| 137 | for pkg in (self.d.expand(pkgs) or "").split(): | ||
| 138 | if pkg.startswith(self.extname): | ||
| 139 | self.pkgs_mapping.append([pkg.split(self.extname + "-")[1], pkg]) | ||
| 140 | continue | ||
| 141 | self.pkgs_mapping.append([pkg, self.extend_name(pkg)]) | ||
| 142 | |||
| 143 | return " ".join([row[1] for row in self.pkgs_mapping]) | ||
| 144 | |||
| 145 | def rename_package_variables(self, variables): | ||
| 146 | for pkg_mapping in self.pkgs_mapping: | ||
| 147 | if pkg_mapping[0].startswith("${") and pkg_mapping[0].endswith("}"): | ||
| 148 | continue | ||
| 149 | for subs in variables: | ||
| 150 | self.d.renameVar("%s:%s" % (subs, pkg_mapping[0]), "%s:%s" % (subs, pkg_mapping[1])) | ||
| 151 | |||
| 152 | class NativesdkClassExtender(ClassExtender): | ||
| 153 | def map_depends(self, dep): | ||
| 154 | if dep.startswith(self.extname): | ||
| 155 | return dep | ||
| 156 | if dep.endswith(("-native", "-native-runtime")) or ('nativesdk-' in dep) or ('-cross-' in dep) or ('-crosssdk-' in dep): | ||
| 157 | return dep | ||
| 158 | else: | ||
| 159 | return self.extend_name(dep) | ||
diff --git a/meta-xilinx-core/lib/oe/classutils.py b/meta-xilinx-core/lib/oe/classutils.py new file mode 100644 index 00000000..ec3f6ad7 --- /dev/null +++ b/meta-xilinx-core/lib/oe/classutils.py | |||
| @@ -0,0 +1,49 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: GPL-2.0-only | ||
| 5 | # | ||
| 6 | |||
| 7 | class ClassRegistryMeta(type): | ||
| 8 | """Give each ClassRegistry their own registry""" | ||
| 9 | def __init__(cls, name, bases, attrs): | ||
| 10 | cls.registry = {} | ||
| 11 | type.__init__(cls, name, bases, attrs) | ||
| 12 | |||
| 13 | class ClassRegistry(type, metaclass=ClassRegistryMeta): | ||
| 14 | """Maintain a registry of classes, indexed by name. | ||
| 15 | |||
| 16 | Note that this implementation requires that the names be unique, as it uses | ||
| 17 | a dictionary to hold the classes by name. | ||
| 18 | |||
| 19 | The name in the registry can be overridden via the 'name' attribute of the | ||
| 20 | class, and the 'priority' attribute controls priority. The prioritized() | ||
| 21 | method returns the registered classes in priority order. | ||
| 22 | |||
| 23 | Subclasses of ClassRegistry may define an 'implemented' property to exert | ||
| 24 | control over whether the class will be added to the registry (e.g. to keep | ||
| 25 | abstract base classes out of the registry).""" | ||
| 26 | priority = 0 | ||
| 27 | def __init__(cls, name, bases, attrs): | ||
| 28 | super(ClassRegistry, cls).__init__(name, bases, attrs) | ||
| 29 | try: | ||
| 30 | if not cls.implemented: | ||
| 31 | return | ||
| 32 | except AttributeError: | ||
| 33 | pass | ||
| 34 | |||
| 35 | try: | ||
| 36 | cls.name | ||
| 37 | except AttributeError: | ||
| 38 | cls.name = name | ||
| 39 | cls.registry[cls.name] = cls | ||
| 40 | |||
| 41 | @classmethod | ||
| 42 | def prioritized(tcls): | ||
| 43 | return sorted(list(tcls.registry.values()), | ||
| 44 | key=lambda v: (v.priority, v.name), reverse=True) | ||
| 45 | |||
| 46 | def unregister(cls): | ||
| 47 | for key in cls.registry.keys(): | ||
| 48 | if cls.registry[key] is cls: | ||
| 49 | del cls.registry[key] | ||
diff --git a/meta-xilinx-core/lib/oe/copy_buildsystem.py b/meta-xilinx-core/lib/oe/copy_buildsystem.py new file mode 100644 index 00000000..81abfbf9 --- /dev/null +++ b/meta-xilinx-core/lib/oe/copy_buildsystem.py | |||
| @@ -0,0 +1,293 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: GPL-2.0-only | ||
| 5 | # | ||
| 6 | # This class should provide easy access to the different aspects of the | ||
| 7 | # buildsystem such as layers, bitbake location, etc. | ||
| 8 | # | ||
| 9 | # SDK_LAYERS_EXCLUDE: Layers which will be excluded from SDK layers. | ||
| 10 | # SDK_LAYERS_EXCLUDE_PATTERN: The simiar to SDK_LAYERS_EXCLUDE, this supports | ||
| 11 | # python regular expression, use space as separator, | ||
| 12 | # e.g.: ".*-downloads closed-.*" | ||
| 13 | # | ||
| 14 | |||
| 15 | import stat | ||
| 16 | import shutil | ||
| 17 | |||
| 18 | def _smart_copy(src, dest): | ||
| 19 | import subprocess | ||
| 20 | # smart_copy will choose the correct function depending on whether the | ||
| 21 | # source is a file or a directory. | ||
| 22 | mode = os.stat(src).st_mode | ||
| 23 | if stat.S_ISDIR(mode): | ||
| 24 | bb.utils.mkdirhier(dest) | ||
| 25 | cmd = "tar --exclude='.git' --exclude='__pycache__' --xattrs --xattrs-include='*' -cf - -C %s -p . \ | ||
| 26 | | tar --xattrs --xattrs-include='*' -xf - -C %s" % (src, dest) | ||
| 27 | subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT) | ||
| 28 | else: | ||
| 29 | shutil.copyfile(src, dest) | ||
| 30 | shutil.copymode(src, dest) | ||
| 31 | |||
| 32 | class BuildSystem(object): | ||
| 33 | def __init__(self, context, d): | ||
| 34 | self.d = d | ||
| 35 | self.context = context | ||
| 36 | self.layerdirs = [os.path.abspath(pth) for pth in d.getVar('BBLAYERS').split()] | ||
| 37 | self.layers_exclude = (d.getVar('SDK_LAYERS_EXCLUDE') or "").split() | ||
| 38 | self.layers_exclude_pattern = d.getVar('SDK_LAYERS_EXCLUDE_PATTERN') | ||
| 39 | |||
| 40 | def copy_bitbake_and_layers(self, destdir, workspace_name=None): | ||
| 41 | import re | ||
| 42 | # Copy in all metadata layers + bitbake (as repositories) | ||
| 43 | copied_corebase = None | ||
| 44 | layers_copied = [] | ||
| 45 | bb.utils.mkdirhier(destdir) | ||
| 46 | layers = list(self.layerdirs) | ||
| 47 | |||
| 48 | corebase = os.path.abspath(self.d.getVar('COREBASE')) | ||
| 49 | layers.append(corebase) | ||
| 50 | # The bitbake build system uses the meta-skeleton layer as a layout | ||
| 51 | # for common recipies, e.g: the recipetool script to create kernel recipies | ||
| 52 | # Add the meta-skeleton layer to be included as part of the eSDK installation | ||
| 53 | layers.append(os.path.join(corebase, 'meta-skeleton')) | ||
| 54 | |||
| 55 | # Exclude layers | ||
| 56 | for layer_exclude in self.layers_exclude: | ||
| 57 | if layer_exclude in layers: | ||
| 58 | bb.note('Excluded %s from sdk layers since it is in SDK_LAYERS_EXCLUDE' % layer_exclude) | ||
| 59 | layers.remove(layer_exclude) | ||
| 60 | |||
| 61 | if self.layers_exclude_pattern: | ||
| 62 | layers_cp = layers[:] | ||
| 63 | for pattern in self.layers_exclude_pattern.split(): | ||
| 64 | for layer in layers_cp: | ||
| 65 | if re.match(pattern, layer): | ||
| 66 | bb.note('Excluded %s from sdk layers since matched SDK_LAYERS_EXCLUDE_PATTERN' % layer) | ||
| 67 | layers.remove(layer) | ||
| 68 | |||
| 69 | workspace_newname = workspace_name | ||
| 70 | if workspace_newname: | ||
| 71 | layernames = [os.path.basename(layer) for layer in layers] | ||
| 72 | extranum = 0 | ||
| 73 | while workspace_newname in layernames: | ||
| 74 | extranum += 1 | ||
| 75 | workspace_newname = '%s-%d' % (workspace_name, extranum) | ||
| 76 | |||
| 77 | corebase_files = self.d.getVar('COREBASE_FILES').split() | ||
| 78 | corebase_files = [corebase + '/' +x for x in corebase_files] | ||
| 79 | # Make sure bitbake goes in | ||
| 80 | bitbake_dir = bb.__file__.rsplit('/', 3)[0] | ||
| 81 | corebase_files.append(bitbake_dir) | ||
| 82 | |||
| 83 | for layer in layers: | ||
| 84 | layerconf = os.path.join(layer, 'conf', 'layer.conf') | ||
| 85 | layernewname = os.path.basename(layer) | ||
| 86 | workspace = False | ||
| 87 | if os.path.exists(layerconf): | ||
| 88 | with open(layerconf, 'r') as f: | ||
| 89 | if f.readline().startswith("# ### workspace layer auto-generated by devtool ###"): | ||
| 90 | if workspace_newname: | ||
| 91 | layernewname = workspace_newname | ||
| 92 | workspace = True | ||
| 93 | else: | ||
| 94 | bb.plain("NOTE: Excluding local workspace layer %s from %s" % (layer, self.context)) | ||
| 95 | continue | ||
| 96 | |||
| 97 | # If the layer was already under corebase, leave it there | ||
| 98 | # since layers such as meta have issues when moved. | ||
| 99 | layerdestpath = destdir | ||
| 100 | if corebase == os.path.dirname(layer): | ||
| 101 | layerdestpath += '/' + os.path.basename(corebase) | ||
| 102 | # If the layer is located somewhere under the same parent directory | ||
| 103 | # as corebase we keep the layer structure. | ||
| 104 | elif os.path.commonpath([layer, corebase]) == os.path.dirname(corebase): | ||
| 105 | layer_relative = os.path.relpath(layer, os.path.dirname(corebase)) | ||
| 106 | if os.path.dirname(layer_relative) != layernewname: | ||
| 107 | layerdestpath += '/' + os.path.dirname(layer_relative) | ||
| 108 | |||
| 109 | layerdestpath += '/' + layernewname | ||
| 110 | |||
| 111 | layer_relative = os.path.relpath(layerdestpath, | ||
| 112 | destdir) | ||
| 113 | # Treat corebase as special since it typically will contain | ||
| 114 | # build directories or other custom items. | ||
| 115 | if corebase == layer: | ||
| 116 | copied_corebase = layer_relative | ||
| 117 | bb.utils.mkdirhier(layerdestpath) | ||
| 118 | for f in corebase_files: | ||
| 119 | f_basename = os.path.basename(f) | ||
| 120 | destname = os.path.join(layerdestpath, f_basename) | ||
| 121 | _smart_copy(f, destname) | ||
| 122 | else: | ||
| 123 | layers_copied.append(layer_relative) | ||
| 124 | |||
| 125 | if os.path.exists(os.path.join(layerdestpath, 'conf/layer.conf')): | ||
| 126 | bb.note("Skipping layer %s, already handled" % layer) | ||
| 127 | else: | ||
| 128 | _smart_copy(layer, layerdestpath) | ||
| 129 | |||
| 130 | if workspace: | ||
| 131 | # Make some adjustments original workspace layer | ||
| 132 | # Drop sources (recipe tasks will be locked, so we don't need them) | ||
| 133 | srcdir = os.path.join(layerdestpath, 'sources') | ||
| 134 | if os.path.isdir(srcdir): | ||
| 135 | shutil.rmtree(srcdir) | ||
| 136 | # Drop all bbappends except the one for the image the SDK is being built for | ||
| 137 | # (because of externalsrc, the workspace bbappends will interfere with the | ||
| 138 | # locked signatures if present, and we don't need them anyway) | ||
| 139 | image_bbappend = os.path.splitext(os.path.basename(self.d.getVar('FILE')))[0] + '.bbappend' | ||
| 140 | appenddir = os.path.join(layerdestpath, 'appends') | ||
| 141 | if os.path.isdir(appenddir): | ||
| 142 | for fn in os.listdir(appenddir): | ||
| 143 | if fn == image_bbappend: | ||
| 144 | continue | ||
| 145 | else: | ||
| 146 | os.remove(os.path.join(appenddir, fn)) | ||
| 147 | # Drop README | ||
| 148 | readme = os.path.join(layerdestpath, 'README') | ||
| 149 | if os.path.exists(readme): | ||
| 150 | os.remove(readme) | ||
| 151 | # Filter out comments in layer.conf and change layer name | ||
| 152 | layerconf = os.path.join(layerdestpath, 'conf', 'layer.conf') | ||
| 153 | with open(layerconf, 'r') as f: | ||
| 154 | origlines = f.readlines() | ||
| 155 | with open(layerconf, 'w') as f: | ||
| 156 | for line in origlines: | ||
| 157 | if line.startswith('#'): | ||
| 158 | continue | ||
| 159 | line = line.replace('workspacelayer', workspace_newname) | ||
| 160 | f.write(line) | ||
| 161 | |||
| 162 | # meta-skeleton layer is added as part of the build system | ||
| 163 | # but not as a layer included in the build, therefore it is | ||
| 164 | # not reported to the function caller. | ||
| 165 | for layer in layers_copied: | ||
| 166 | if layer.endswith('/meta-skeleton'): | ||
| 167 | layers_copied.remove(layer) | ||
| 168 | break | ||
| 169 | |||
| 170 | return copied_corebase, layers_copied | ||
| 171 | |||
| 172 | def generate_locked_sigs(sigfile, d): | ||
| 173 | bb.utils.mkdirhier(os.path.dirname(sigfile)) | ||
| 174 | depd = d.getVar('BB_TASKDEPDATA', False) | ||
| 175 | tasks = ['%s:%s' % (v[2], v[1]) for v in depd.values()] | ||
| 176 | bb.parse.siggen.dump_lockedsigs(sigfile, tasks) | ||
| 177 | |||
| 178 | def prune_lockedsigs(excluded_tasks, excluded_targets, lockedsigs, onlynative, pruned_output): | ||
| 179 | with open(lockedsigs, 'r') as infile: | ||
| 180 | bb.utils.mkdirhier(os.path.dirname(pruned_output)) | ||
| 181 | with open(pruned_output, 'w') as f: | ||
| 182 | invalue = False | ||
| 183 | for line in infile: | ||
| 184 | if invalue: | ||
| 185 | if line.endswith('\\\n'): | ||
| 186 | splitval = line.strip().split(':') | ||
| 187 | if not splitval[1] in excluded_tasks and not splitval[0] in excluded_targets: | ||
| 188 | if onlynative: | ||
| 189 | if 'nativesdk' in splitval[0]: | ||
| 190 | f.write(line) | ||
| 191 | else: | ||
| 192 | f.write(line) | ||
| 193 | else: | ||
| 194 | f.write(line) | ||
| 195 | invalue = False | ||
| 196 | elif line.startswith('SIGGEN_LOCKEDSIGS'): | ||
| 197 | invalue = True | ||
| 198 | f.write(line) | ||
| 199 | |||
| 200 | def merge_lockedsigs(copy_tasks, lockedsigs_main, lockedsigs_extra, merged_output, copy_output=None): | ||
| 201 | merged = {} | ||
| 202 | arch_order = [] | ||
| 203 | with open(lockedsigs_main, 'r') as f: | ||
| 204 | invalue = None | ||
| 205 | for line in f: | ||
| 206 | if invalue: | ||
| 207 | if line.endswith('\\\n'): | ||
| 208 | merged[invalue].append(line) | ||
| 209 | else: | ||
| 210 | invalue = None | ||
| 211 | elif line.startswith('SIGGEN_LOCKEDSIGS_t-'): | ||
| 212 | invalue = line[18:].split('=', 1)[0].rstrip() | ||
| 213 | merged[invalue] = [] | ||
| 214 | arch_order.append(invalue) | ||
| 215 | |||
| 216 | with open(lockedsigs_extra, 'r') as f: | ||
| 217 | invalue = None | ||
| 218 | tocopy = {} | ||
| 219 | for line in f: | ||
| 220 | if invalue: | ||
| 221 | if line.endswith('\\\n'): | ||
| 222 | if not line in merged[invalue]: | ||
| 223 | target, task = line.strip().split(':')[:2] | ||
| 224 | if not copy_tasks or task in copy_tasks: | ||
| 225 | tocopy[invalue].append(line) | ||
| 226 | merged[invalue].append(line) | ||
| 227 | else: | ||
| 228 | invalue = None | ||
| 229 | elif line.startswith('SIGGEN_LOCKEDSIGS_t-'): | ||
| 230 | invalue = line[18:].split('=', 1)[0].rstrip() | ||
| 231 | if not invalue in merged: | ||
| 232 | merged[invalue] = [] | ||
| 233 | arch_order.append(invalue) | ||
| 234 | tocopy[invalue] = [] | ||
| 235 | |||
| 236 | def write_sigs_file(fn, types, sigs): | ||
| 237 | fulltypes = [] | ||
| 238 | bb.utils.mkdirhier(os.path.dirname(fn)) | ||
| 239 | with open(fn, 'w') as f: | ||
| 240 | for typename in types: | ||
| 241 | lines = sigs[typename] | ||
| 242 | if lines: | ||
| 243 | f.write('SIGGEN_LOCKEDSIGS_%s = "\\\n' % typename) | ||
| 244 | for line in lines: | ||
| 245 | f.write(line) | ||
| 246 | f.write(' "\n') | ||
| 247 | fulltypes.append(typename) | ||
| 248 | f.write('SIGGEN_LOCKEDSIGS_TYPES = "%s"\n' % ' '.join(fulltypes)) | ||
| 249 | |||
| 250 | if copy_output: | ||
| 251 | write_sigs_file(copy_output, list(tocopy.keys()), tocopy) | ||
| 252 | if merged_output: | ||
| 253 | write_sigs_file(merged_output, arch_order, merged) | ||
| 254 | |||
| 255 | def create_locked_sstate_cache(lockedsigs, input_sstate_cache, output_sstate_cache, d, fixedlsbstring="", filterfile=None): | ||
| 256 | import shutil | ||
| 257 | bb.note('Generating sstate-cache...') | ||
| 258 | |||
| 259 | nativelsbstring = d.getVar('NATIVELSBSTRING') | ||
| 260 | bb.process.run("PYTHONDONTWRITEBYTECODE=1 gen-lockedsig-cache %s %s %s %s %s" % (lockedsigs, input_sstate_cache, output_sstate_cache, nativelsbstring, filterfile or '')) | ||
| 261 | if fixedlsbstring and nativelsbstring != fixedlsbstring: | ||
| 262 | nativedir = output_sstate_cache + '/' + nativelsbstring | ||
| 263 | if os.path.isdir(nativedir): | ||
| 264 | destdir = os.path.join(output_sstate_cache, fixedlsbstring) | ||
| 265 | for root, _, files in os.walk(nativedir): | ||
| 266 | for fn in files: | ||
| 267 | src = os.path.join(root, fn) | ||
| 268 | dest = os.path.join(destdir, os.path.relpath(src, nativedir)) | ||
| 269 | if os.path.exists(dest): | ||
| 270 | # Already exists, and it'll be the same file, so just delete it | ||
| 271 | os.unlink(src) | ||
| 272 | else: | ||
| 273 | bb.utils.mkdirhier(os.path.dirname(dest)) | ||
| 274 | shutil.move(src, dest) | ||
| 275 | |||
| 276 | def check_sstate_task_list(d, targets, filteroutfile, cmdprefix='', cwd=None, logfile=None): | ||
| 277 | import subprocess | ||
| 278 | |||
| 279 | bb.note('Generating sstate task list...') | ||
| 280 | |||
| 281 | if not cwd: | ||
| 282 | cwd = os.getcwd() | ||
| 283 | if logfile: | ||
| 284 | logparam = '-l %s' % logfile | ||
| 285 | else: | ||
| 286 | logparam = '' | ||
| 287 | cmd = "%sPYTHONDONTWRITEBYTECODE=1 BB_SETSCENE_ENFORCE=1 PSEUDO_DISABLED=1 oe-check-sstate %s -s -o %s %s" % (cmdprefix, targets, filteroutfile, logparam) | ||
| 288 | env = dict(d.getVar('BB_ORIGENV', False)) | ||
| 289 | env.pop('BUILDDIR', '') | ||
| 290 | env.pop('BBPATH', '') | ||
| 291 | pathitems = env['PATH'].split(':') | ||
| 292 | env['PATH'] = ':'.join([item for item in pathitems if not item.endswith('/bitbake/bin')]) | ||
| 293 | bb.process.run(cmd, stderr=subprocess.STDOUT, env=env, cwd=cwd, executable='/bin/bash') | ||
diff --git a/meta-xilinx-core/lib/oe/cve_check.py b/meta-xilinx-core/lib/oe/cve_check.py new file mode 100644 index 00000000..ed5c714c --- /dev/null +++ b/meta-xilinx-core/lib/oe/cve_check.py | |||
| @@ -0,0 +1,245 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | import collections | ||
| 8 | import re | ||
| 9 | import itertools | ||
| 10 | import functools | ||
| 11 | |||
| 12 | _Version = collections.namedtuple( | ||
| 13 | "_Version", ["release", "patch_l", "pre_l", "pre_v"] | ||
| 14 | ) | ||
| 15 | |||
| 16 | @functools.total_ordering | ||
| 17 | class Version(): | ||
| 18 | |||
| 19 | def __init__(self, version, suffix=None): | ||
| 20 | |||
| 21 | suffixes = ["alphabetical", "patch"] | ||
| 22 | |||
| 23 | if str(suffix) == "alphabetical": | ||
| 24 | version_pattern = r"""r?v?(?:(?P<release>[0-9]+(?:[-\.][0-9]+)*)(?P<patch>[-_\.]?(?P<patch_l>[a-z]))?(?P<pre>[-_\.]?(?P<pre_l>(rc|alpha|beta|pre|preview|dev))[-_\.]?(?P<pre_v>[0-9]+)?)?)(.*)?""" | ||
| 25 | elif str(suffix) == "patch": | ||
| 26 | version_pattern = r"""r?v?(?:(?P<release>[0-9]+(?:[-\.][0-9]+)*)(?P<patch>[-_\.]?(p|patch)(?P<patch_l>[0-9]+))?(?P<pre>[-_\.]?(?P<pre_l>(rc|alpha|beta|pre|preview|dev))[-_\.]?(?P<pre_v>[0-9]+)?)?)(.*)?""" | ||
| 27 | else: | ||
| 28 | version_pattern = r"""r?v?(?:(?P<release>[0-9]+(?:[-\.][0-9]+)*)(?P<pre>[-_\.]?(?P<pre_l>(rc|alpha|beta|pre|preview|dev))[-_\.]?(?P<pre_v>[0-9]+)?)?)(.*)?""" | ||
| 29 | regex = re.compile(r"^\s*" + version_pattern + r"\s*$", re.VERBOSE | re.IGNORECASE) | ||
| 30 | |||
| 31 | match = regex.search(version) | ||
| 32 | if not match: | ||
| 33 | raise Exception("Invalid version: '{0}'".format(version)) | ||
| 34 | |||
| 35 | self._version = _Version( | ||
| 36 | release=tuple(int(i) for i in match.group("release").replace("-",".").split(".")), | ||
| 37 | patch_l=match.group("patch_l") if str(suffix) in suffixes and match.group("patch_l") else "", | ||
| 38 | pre_l=match.group("pre_l"), | ||
| 39 | pre_v=match.group("pre_v") | ||
| 40 | ) | ||
| 41 | |||
| 42 | self._key = _cmpkey( | ||
| 43 | self._version.release, | ||
| 44 | self._version.patch_l, | ||
| 45 | self._version.pre_l, | ||
| 46 | self._version.pre_v | ||
| 47 | ) | ||
| 48 | |||
| 49 | def __eq__(self, other): | ||
| 50 | if not isinstance(other, Version): | ||
| 51 | return NotImplemented | ||
| 52 | return self._key == other._key | ||
| 53 | |||
| 54 | def __gt__(self, other): | ||
| 55 | if not isinstance(other, Version): | ||
| 56 | return NotImplemented | ||
| 57 | return self._key > other._key | ||
| 58 | |||
| 59 | def _cmpkey(release, patch_l, pre_l, pre_v): | ||
| 60 | # remove leading 0 | ||
| 61 | _release = tuple( | ||
| 62 | reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release)))) | ||
| 63 | ) | ||
| 64 | |||
| 65 | _patch = patch_l.upper() | ||
| 66 | |||
| 67 | if pre_l is None and pre_v is None: | ||
| 68 | _pre = float('inf') | ||
| 69 | else: | ||
| 70 | _pre = float(pre_v) if pre_v else float('-inf') | ||
| 71 | return _release, _patch, _pre | ||
| 72 | |||
| 73 | |||
| 74 | def get_patched_cves(d): | ||
| 75 | """ | ||
| 76 | Get patches that solve CVEs using the "CVE: " tag. | ||
| 77 | """ | ||
| 78 | |||
| 79 | import re | ||
| 80 | import oe.patch | ||
| 81 | |||
| 82 | cve_match = re.compile(r"CVE:( CVE-\d{4}-\d+)+") | ||
| 83 | |||
| 84 | # Matches the last "CVE-YYYY-ID" in the file name, also if written | ||
| 85 | # in lowercase. Possible to have multiple CVE IDs in a single | ||
| 86 | # file name, but only the last one will be detected from the file name. | ||
| 87 | # However, patch files contents addressing multiple CVE IDs are supported | ||
| 88 | # (cve_match regular expression) | ||
| 89 | cve_file_name_match = re.compile(r".*(CVE-\d{4}-\d+)", re.IGNORECASE) | ||
| 90 | |||
| 91 | patched_cves = set() | ||
| 92 | patches = oe.patch.src_patches(d) | ||
| 93 | bb.debug(2, "Scanning %d patches for CVEs" % len(patches)) | ||
| 94 | for url in patches: | ||
| 95 | patch_file = bb.fetch.decodeurl(url)[2] | ||
| 96 | |||
| 97 | # Check patch file name for CVE ID | ||
| 98 | fname_match = cve_file_name_match.search(patch_file) | ||
| 99 | if fname_match: | ||
| 100 | cve = fname_match.group(1).upper() | ||
| 101 | patched_cves.add(cve) | ||
| 102 | bb.debug(2, "Found %s from patch file name %s" % (cve, patch_file)) | ||
| 103 | |||
| 104 | # Remote patches won't be present and compressed patches won't be | ||
| 105 | # unpacked, so say we're not scanning them | ||
| 106 | if not os.path.isfile(patch_file): | ||
| 107 | bb.note("%s is remote or compressed, not scanning content" % patch_file) | ||
| 108 | continue | ||
| 109 | |||
| 110 | with open(patch_file, "r", encoding="utf-8") as f: | ||
| 111 | try: | ||
| 112 | patch_text = f.read() | ||
| 113 | except UnicodeDecodeError: | ||
| 114 | bb.debug(1, "Failed to read patch %s using UTF-8 encoding" | ||
| 115 | " trying with iso8859-1" % patch_file) | ||
| 116 | f.close() | ||
| 117 | with open(patch_file, "r", encoding="iso8859-1") as f: | ||
| 118 | patch_text = f.read() | ||
| 119 | |||
| 120 | # Search for one or more "CVE: " lines | ||
| 121 | text_match = False | ||
| 122 | for match in cve_match.finditer(patch_text): | ||
| 123 | # Get only the CVEs without the "CVE: " tag | ||
| 124 | cves = patch_text[match.start()+5:match.end()] | ||
| 125 | for cve in cves.split(): | ||
| 126 | bb.debug(2, "Patch %s solves %s" % (patch_file, cve)) | ||
| 127 | patched_cves.add(cve) | ||
| 128 | text_match = True | ||
| 129 | |||
| 130 | if not fname_match and not text_match: | ||
| 131 | bb.debug(2, "Patch %s doesn't solve CVEs" % patch_file) | ||
| 132 | |||
| 133 | # Search for additional patched CVEs | ||
| 134 | for cve in (d.getVarFlags("CVE_STATUS") or {}): | ||
| 135 | decoded_status, _, _ = decode_cve_status(d, cve) | ||
| 136 | if decoded_status == "Patched": | ||
| 137 | bb.debug(2, "CVE %s is additionally patched" % cve) | ||
| 138 | patched_cves.add(cve) | ||
| 139 | |||
| 140 | return patched_cves | ||
| 141 | |||
| 142 | |||
| 143 | def get_cpe_ids(cve_product, version): | ||
| 144 | """ | ||
| 145 | Get list of CPE identifiers for the given product and version | ||
| 146 | """ | ||
| 147 | |||
| 148 | version = version.split("+git")[0] | ||
| 149 | |||
| 150 | cpe_ids = [] | ||
| 151 | for product in cve_product.split(): | ||
| 152 | # CVE_PRODUCT in recipes may include vendor information for CPE identifiers. If not, | ||
| 153 | # use wildcard for vendor. | ||
| 154 | if ":" in product: | ||
| 155 | vendor, product = product.split(":", 1) | ||
| 156 | else: | ||
| 157 | vendor = "*" | ||
| 158 | |||
| 159 | cpe_id = 'cpe:2.3:*:{}:{}:{}:*:*:*:*:*:*:*'.format(vendor, product, version) | ||
| 160 | cpe_ids.append(cpe_id) | ||
| 161 | |||
| 162 | return cpe_ids | ||
| 163 | |||
| 164 | def cve_check_merge_jsons(output, data): | ||
| 165 | """ | ||
| 166 | Merge the data in the "package" property to the main data file | ||
| 167 | output | ||
| 168 | """ | ||
| 169 | if output["version"] != data["version"]: | ||
| 170 | bb.error("Version mismatch when merging JSON outputs") | ||
| 171 | return | ||
| 172 | |||
| 173 | for product in output["package"]: | ||
| 174 | if product["name"] == data["package"][0]["name"]: | ||
| 175 | bb.error("Error adding the same package %s twice" % product["name"]) | ||
| 176 | return | ||
| 177 | |||
| 178 | output["package"].append(data["package"][0]) | ||
| 179 | |||
| 180 | def update_symlinks(target_path, link_path): | ||
| 181 | """ | ||
| 182 | Update a symbolic link link_path to point to target_path. | ||
| 183 | Remove the link and recreate it if exist and is different. | ||
| 184 | """ | ||
| 185 | if link_path != target_path and os.path.exists(target_path): | ||
| 186 | if os.path.exists(os.path.realpath(link_path)): | ||
| 187 | os.remove(link_path) | ||
| 188 | os.symlink(os.path.basename(target_path), link_path) | ||
| 189 | |||
| 190 | |||
| 191 | def convert_cve_version(version): | ||
| 192 | """ | ||
| 193 | This function converts from CVE format to Yocto version format. | ||
| 194 | eg 8.3_p1 -> 8.3p1, 6.2_rc1 -> 6.2-rc1 | ||
| 195 | |||
| 196 | Unless it is redefined using CVE_VERSION in the recipe, | ||
| 197 | cve_check uses the version in the name of the recipe (${PV}) | ||
| 198 | to check vulnerabilities against a CVE in the database downloaded from NVD. | ||
| 199 | |||
| 200 | When the version has an update, i.e. | ||
| 201 | "p1" in OpenSSH 8.3p1, | ||
| 202 | "-rc1" in linux kernel 6.2-rc1, | ||
| 203 | the database stores the version as version_update (8.3_p1, 6.2_rc1). | ||
| 204 | Therefore, we must transform this version before comparing to the | ||
| 205 | recipe version. | ||
| 206 | |||
| 207 | In this case, the parameter of the function is 8.3_p1. | ||
| 208 | If the version uses the Release Candidate format, "rc", | ||
| 209 | this function replaces the '_' by '-'. | ||
| 210 | If the version uses the Update format, "p", | ||
| 211 | this function removes the '_' completely. | ||
| 212 | """ | ||
| 213 | import re | ||
| 214 | |||
| 215 | matches = re.match('^([0-9.]+)_((p|rc)[0-9]+)$', version) | ||
| 216 | |||
| 217 | if not matches: | ||
| 218 | return version | ||
| 219 | |||
| 220 | version = matches.group(1) | ||
| 221 | update = matches.group(2) | ||
| 222 | |||
| 223 | if matches.group(3) == "rc": | ||
| 224 | return version + '-' + update | ||
| 225 | |||
| 226 | return version + update | ||
| 227 | |||
| 228 | def decode_cve_status(d, cve): | ||
| 229 | """ | ||
| 230 | Convert CVE_STATUS into status, detail and description. | ||
| 231 | """ | ||
| 232 | status = d.getVarFlag("CVE_STATUS", cve) | ||
| 233 | if not status: | ||
| 234 | return ("", "", "") | ||
| 235 | |||
| 236 | status_split = status.split(':', 1) | ||
| 237 | detail = status_split[0] | ||
| 238 | description = status_split[1].strip() if (len(status_split) > 1) else "" | ||
| 239 | |||
| 240 | status_mapping = d.getVarFlag("CVE_CHECK_STATUSMAP", detail) | ||
| 241 | if status_mapping is None: | ||
| 242 | bb.warn('Invalid detail "%s" for CVE_STATUS[%s] = "%s", fallback to Unpatched' % (detail, cve, status)) | ||
| 243 | status_mapping = "Unpatched" | ||
| 244 | |||
| 245 | return (status_mapping, detail, description) | ||
diff --git a/meta-xilinx-core/lib/oe/data.py b/meta-xilinx-core/lib/oe/data.py new file mode 100644 index 00000000..37121cfa --- /dev/null +++ b/meta-xilinx-core/lib/oe/data.py | |||
| @@ -0,0 +1,53 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: GPL-2.0-only | ||
| 5 | # | ||
| 6 | |||
| 7 | import json | ||
| 8 | import oe.maketype | ||
| 9 | |||
| 10 | def typed_value(key, d): | ||
| 11 | """Construct a value for the specified metadata variable, using its flags | ||
| 12 | to determine the type and parameters for construction.""" | ||
| 13 | var_type = d.getVarFlag(key, 'type') | ||
| 14 | flags = d.getVarFlags(key) | ||
| 15 | if flags is not None: | ||
| 16 | flags = dict((flag, d.expand(value)) | ||
| 17 | for flag, value in list(flags.items())) | ||
| 18 | else: | ||
| 19 | flags = {} | ||
| 20 | |||
| 21 | try: | ||
| 22 | return oe.maketype.create(d.getVar(key) or '', var_type, **flags) | ||
| 23 | except (TypeError, ValueError) as exc: | ||
| 24 | bb.msg.fatal("Data", "%s: %s" % (key, str(exc))) | ||
| 25 | |||
| 26 | def export2json(d, json_file, expand=True, searchString="",replaceString=""): | ||
| 27 | data2export = {} | ||
| 28 | keys2export = [] | ||
| 29 | |||
| 30 | for key in d.keys(): | ||
| 31 | if key.startswith("_"): | ||
| 32 | continue | ||
| 33 | elif key.startswith("BB"): | ||
| 34 | continue | ||
| 35 | elif key.startswith("B_pn"): | ||
| 36 | continue | ||
| 37 | elif key.startswith("do_"): | ||
| 38 | continue | ||
| 39 | elif d.getVarFlag(key, "func"): | ||
| 40 | continue | ||
| 41 | |||
| 42 | keys2export.append(key) | ||
| 43 | |||
| 44 | for key in keys2export: | ||
| 45 | try: | ||
| 46 | data2export[key] = d.getVar(key, expand).replace(searchString,replaceString) | ||
| 47 | except bb.data_smart.ExpansionError: | ||
| 48 | data2export[key] = '' | ||
| 49 | except AttributeError: | ||
| 50 | pass | ||
| 51 | |||
| 52 | with open(json_file, "w") as f: | ||
| 53 | json.dump(data2export, f, skipkeys=True, indent=4, sort_keys=True) | ||
diff --git a/meta-xilinx-core/lib/oe/distro_check.py b/meta-xilinx-core/lib/oe/distro_check.py new file mode 100644 index 00000000..3494520f --- /dev/null +++ b/meta-xilinx-core/lib/oe/distro_check.py | |||
| @@ -0,0 +1,314 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: GPL-2.0-only | ||
| 5 | # | ||
| 6 | |||
| 7 | def create_socket(url, d): | ||
| 8 | import urllib | ||
| 9 | from bb.utils import export_proxies | ||
| 10 | |||
| 11 | export_proxies(d) | ||
| 12 | return urllib.request.urlopen(url) | ||
| 13 | |||
| 14 | def get_links_from_url(url, d): | ||
| 15 | "Return all the href links found on the web location" | ||
| 16 | |||
| 17 | from bs4 import BeautifulSoup, SoupStrainer | ||
| 18 | |||
| 19 | soup = BeautifulSoup(create_socket(url,d), "html.parser", parse_only=SoupStrainer("a")) | ||
| 20 | hyperlinks = [] | ||
| 21 | for line in soup.find_all('a', href=True): | ||
| 22 | hyperlinks.append(line['href'].strip('/')) | ||
| 23 | return hyperlinks | ||
| 24 | |||
| 25 | def find_latest_numeric_release(url, d): | ||
| 26 | "Find the latest listed numeric release on the given url" | ||
| 27 | max=0 | ||
| 28 | maxstr="" | ||
| 29 | for link in get_links_from_url(url, d): | ||
| 30 | try: | ||
| 31 | # TODO use bb.utils.vercmp_string_op() | ||
| 32 | release = float(link) | ||
| 33 | except: | ||
| 34 | release = 0 | ||
| 35 | if release > max: | ||
| 36 | max = release | ||
| 37 | maxstr = link | ||
| 38 | return maxstr | ||
| 39 | |||
| 40 | def is_src_rpm(name): | ||
| 41 | "Check if the link is pointing to a src.rpm file" | ||
| 42 | return name.endswith(".src.rpm") | ||
| 43 | |||
| 44 | def package_name_from_srpm(srpm): | ||
| 45 | "Strip out the package name from the src.rpm filename" | ||
| 46 | |||
| 47 | # ca-certificates-2016.2.7-1.0.fc24.src.rpm | ||
| 48 | # ^name ^ver ^release^removed | ||
| 49 | (name, version, release) = srpm.replace(".src.rpm", "").rsplit("-", 2) | ||
| 50 | return name | ||
| 51 | |||
| 52 | def get_source_package_list_from_url(url, section, d): | ||
| 53 | "Return a sectioned list of package names from a URL list" | ||
| 54 | |||
| 55 | bb.note("Reading %s: %s" % (url, section)) | ||
| 56 | links = get_links_from_url(url, d) | ||
| 57 | srpms = filter(is_src_rpm, links) | ||
| 58 | names_list = map(package_name_from_srpm, srpms) | ||
| 59 | |||
| 60 | new_pkgs = set() | ||
| 61 | for pkgs in names_list: | ||
| 62 | new_pkgs.add(pkgs + ":" + section) | ||
| 63 | return new_pkgs | ||
| 64 | |||
| 65 | def get_source_package_list_from_url_by_letter(url, section, d): | ||
| 66 | import string | ||
| 67 | from urllib.error import HTTPError | ||
| 68 | packages = set() | ||
| 69 | for letter in (string.ascii_lowercase + string.digits): | ||
| 70 | # Not all subfolders may exist, so silently handle 404 | ||
| 71 | try: | ||
| 72 | packages |= get_source_package_list_from_url(url + "/" + letter, section, d) | ||
| 73 | except HTTPError as e: | ||
| 74 | if e.code != 404: raise | ||
| 75 | return packages | ||
| 76 | |||
| 77 | def get_latest_released_fedora_source_package_list(d): | ||
| 78 | "Returns list of all the name os packages in the latest fedora distro" | ||
| 79 | latest = find_latest_numeric_release("http://archive.fedoraproject.org/pub/fedora/linux/releases/", d) | ||
| 80 | package_names = get_source_package_list_from_url_by_letter("http://archive.fedoraproject.org/pub/fedora/linux/releases/%s/Everything/source/tree/Packages/" % latest, "main", d) | ||
| 81 | package_names |= get_source_package_list_from_url_by_letter("http://archive.fedoraproject.org/pub/fedora/linux/updates/%s/SRPMS/" % latest, "updates", d) | ||
| 82 | return latest, package_names | ||
| 83 | |||
| 84 | def get_latest_released_opensuse_source_package_list(d): | ||
| 85 | "Returns list of all the name os packages in the latest opensuse distro" | ||
| 86 | latest = find_latest_numeric_release("http://download.opensuse.org/source/distribution/leap", d) | ||
| 87 | |||
| 88 | package_names = get_source_package_list_from_url("http://download.opensuse.org/source/distribution/leap/%s/repo/oss/suse/src/" % latest, "main", d) | ||
| 89 | package_names |= get_source_package_list_from_url("http://download.opensuse.org/update/leap/%s/oss/src/" % latest, "updates", d) | ||
| 90 | return latest, package_names | ||
| 91 | |||
| 92 | def get_latest_released_clear_source_package_list(d): | ||
| 93 | latest = find_latest_numeric_release("https://download.clearlinux.org/releases/", d) | ||
| 94 | package_names = get_source_package_list_from_url("https://download.clearlinux.org/releases/%s/clear/source/SRPMS/" % latest, "main", d) | ||
| 95 | return latest, package_names | ||
| 96 | |||
| 97 | def find_latest_debian_release(url, d): | ||
| 98 | "Find the latest listed debian release on the given url" | ||
| 99 | |||
| 100 | releases = [link.replace("Debian", "") | ||
| 101 | for link in get_links_from_url(url, d) | ||
| 102 | if link.startswith("Debian")] | ||
| 103 | releases.sort() | ||
| 104 | try: | ||
| 105 | return releases[-1] | ||
| 106 | except: | ||
| 107 | return "_NotFound_" | ||
| 108 | |||
| 109 | def get_debian_style_source_package_list(url, section, d): | ||
| 110 | "Return the list of package-names stored in the debian style Sources.gz file" | ||
| 111 | import gzip | ||
| 112 | |||
| 113 | package_names = set() | ||
| 114 | for line in gzip.open(create_socket(url, d), mode="rt"): | ||
| 115 | if line.startswith("Package:"): | ||
| 116 | pkg = line.split(":", 1)[1].strip() | ||
| 117 | package_names.add(pkg + ":" + section) | ||
| 118 | return package_names | ||
| 119 | |||
| 120 | def get_latest_released_debian_source_package_list(d): | ||
| 121 | "Returns list of all the name of packages in the latest debian distro" | ||
| 122 | latest = find_latest_debian_release("http://ftp.debian.org/debian/dists/", d) | ||
| 123 | url = "http://ftp.debian.org/debian/dists/stable/main/source/Sources.gz" | ||
| 124 | package_names = get_debian_style_source_package_list(url, "main", d) | ||
| 125 | url = "http://ftp.debian.org/debian/dists/stable-proposed-updates/main/source/Sources.gz" | ||
| 126 | package_names |= get_debian_style_source_package_list(url, "updates", d) | ||
| 127 | return latest, package_names | ||
| 128 | |||
| 129 | def find_latest_ubuntu_release(url, d): | ||
| 130 | """ | ||
| 131 | Find the latest listed Ubuntu release on the given ubuntu/dists/ URL. | ||
| 132 | |||
| 133 | To avoid matching development releases look for distributions that have | ||
| 134 | updates, so the resulting distro could be any supported release. | ||
| 135 | """ | ||
| 136 | url += "?C=M;O=D" # Descending Sort by Last Modified | ||
| 137 | for link in get_links_from_url(url, d): | ||
| 138 | if "-updates" in link: | ||
| 139 | distro = link.replace("-updates", "") | ||
| 140 | return distro | ||
| 141 | return "_NotFound_" | ||
| 142 | |||
| 143 | def get_latest_released_ubuntu_source_package_list(d): | ||
| 144 | "Returns list of all the name os packages in the latest ubuntu distro" | ||
| 145 | latest = find_latest_ubuntu_release("http://archive.ubuntu.com/ubuntu/dists/", d) | ||
| 146 | url = "http://archive.ubuntu.com/ubuntu/dists/%s/main/source/Sources.gz" % latest | ||
| 147 | package_names = get_debian_style_source_package_list(url, "main", d) | ||
| 148 | url = "http://archive.ubuntu.com/ubuntu/dists/%s-updates/main/source/Sources.gz" % latest | ||
| 149 | package_names |= get_debian_style_source_package_list(url, "updates", d) | ||
| 150 | return latest, package_names | ||
| 151 | |||
| 152 | def create_distro_packages_list(distro_check_dir, d): | ||
| 153 | import shutil | ||
| 154 | |||
| 155 | pkglst_dir = os.path.join(distro_check_dir, "package_lists") | ||
| 156 | bb.utils.remove(pkglst_dir, True) | ||
| 157 | bb.utils.mkdirhier(pkglst_dir) | ||
| 158 | |||
| 159 | per_distro_functions = ( | ||
| 160 | ("Debian", get_latest_released_debian_source_package_list), | ||
| 161 | ("Ubuntu", get_latest_released_ubuntu_source_package_list), | ||
| 162 | ("Fedora", get_latest_released_fedora_source_package_list), | ||
| 163 | ("openSUSE", get_latest_released_opensuse_source_package_list), | ||
| 164 | ("Clear", get_latest_released_clear_source_package_list), | ||
| 165 | ) | ||
| 166 | |||
| 167 | for name, fetcher_func in per_distro_functions: | ||
| 168 | try: | ||
| 169 | release, package_list = fetcher_func(d) | ||
| 170 | except Exception as e: | ||
| 171 | bb.warn("Cannot fetch packages for %s: %s" % (name, e)) | ||
| 172 | bb.note("Distro: %s, Latest Release: %s, # src packages: %d" % (name, release, len(package_list))) | ||
| 173 | if len(package_list) == 0: | ||
| 174 | bb.error("Didn't fetch any packages for %s %s" % (name, release)) | ||
| 175 | |||
| 176 | package_list_file = os.path.join(pkglst_dir, name + "-" + release) | ||
| 177 | with open(package_list_file, 'w') as f: | ||
| 178 | for pkg in sorted(package_list): | ||
| 179 | f.write(pkg + "\n") | ||
| 180 | |||
| 181 | def update_distro_data(distro_check_dir, datetime, d): | ||
| 182 | """ | ||
| 183 | If distro packages list data is old then rebuild it. | ||
| 184 | The operations has to be protected by a lock so that | ||
| 185 | only one thread performes it at a time. | ||
| 186 | """ | ||
| 187 | if not os.path.isdir (distro_check_dir): | ||
| 188 | try: | ||
| 189 | bb.note ("Making new directory: %s" % distro_check_dir) | ||
| 190 | os.makedirs (distro_check_dir) | ||
| 191 | except OSError: | ||
| 192 | raise Exception('Unable to create directory %s' % (distro_check_dir)) | ||
| 193 | |||
| 194 | |||
| 195 | datetime_file = os.path.join(distro_check_dir, "build_datetime") | ||
| 196 | saved_datetime = "_invalid_" | ||
| 197 | import fcntl | ||
| 198 | try: | ||
| 199 | if not os.path.exists(datetime_file): | ||
| 200 | open(datetime_file, 'w+').close() # touch the file so that the next open won't fail | ||
| 201 | |||
| 202 | f = open(datetime_file, "r+") | ||
| 203 | fcntl.lockf(f, fcntl.LOCK_EX) | ||
| 204 | saved_datetime = f.read() | ||
| 205 | if saved_datetime[0:8] != datetime[0:8]: | ||
| 206 | bb.note("The build datetime did not match: saved:%s current:%s" % (saved_datetime, datetime)) | ||
| 207 | bb.note("Regenerating distro package lists") | ||
| 208 | create_distro_packages_list(distro_check_dir, d) | ||
| 209 | f.seek(0) | ||
| 210 | f.write(datetime) | ||
| 211 | |||
| 212 | except OSError as e: | ||
| 213 | raise Exception('Unable to open timestamp: %s' % e) | ||
| 214 | finally: | ||
| 215 | fcntl.lockf(f, fcntl.LOCK_UN) | ||
| 216 | f.close() | ||
| 217 | |||
| 218 | def compare_in_distro_packages_list(distro_check_dir, d): | ||
| 219 | if not os.path.isdir(distro_check_dir): | ||
| 220 | raise Exception("compare_in_distro_packages_list: invalid distro_check_dir passed") | ||
| 221 | |||
| 222 | localdata = bb.data.createCopy(d) | ||
| 223 | pkglst_dir = os.path.join(distro_check_dir, "package_lists") | ||
| 224 | matching_distros = [] | ||
| 225 | pn = recipe_name = d.getVar('PN') | ||
| 226 | bb.note("Checking: %s" % pn) | ||
| 227 | |||
| 228 | if pn.find("-native") != -1: | ||
| 229 | pnstripped = pn.split("-native") | ||
| 230 | localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES')) | ||
| 231 | recipe_name = pnstripped[0] | ||
| 232 | |||
| 233 | if pn.startswith("nativesdk-"): | ||
| 234 | pnstripped = pn.split("nativesdk-") | ||
| 235 | localdata.setVar('OVERRIDES', "pn-" + pnstripped[1] + ":" + d.getVar('OVERRIDES')) | ||
| 236 | recipe_name = pnstripped[1] | ||
| 237 | |||
| 238 | if pn.find("-cross") != -1: | ||
| 239 | pnstripped = pn.split("-cross") | ||
| 240 | localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES')) | ||
| 241 | recipe_name = pnstripped[0] | ||
| 242 | |||
| 243 | if pn.find("-initial") != -1: | ||
| 244 | pnstripped = pn.split("-initial") | ||
| 245 | localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES')) | ||
| 246 | recipe_name = pnstripped[0] | ||
| 247 | |||
| 248 | bb.note("Recipe: %s" % recipe_name) | ||
| 249 | |||
| 250 | distro_exceptions = dict({"OE-Core":'OE-Core', "OpenedHand":'OpenedHand', "Intel":'Intel', "Upstream":'Upstream', "Windriver":'Windriver', "OSPDT":'OSPDT Approved', "Poky":'poky'}) | ||
| 251 | tmp = localdata.getVar('DISTRO_PN_ALIAS') or "" | ||
| 252 | for str in tmp.split(): | ||
| 253 | if str and str.find("=") == -1 and distro_exceptions[str]: | ||
| 254 | matching_distros.append(str) | ||
| 255 | |||
| 256 | distro_pn_aliases = {} | ||
| 257 | for str in tmp.split(): | ||
| 258 | if "=" in str: | ||
| 259 | (dist, pn_alias) = str.split('=') | ||
| 260 | distro_pn_aliases[dist.strip().lower()] = pn_alias.strip() | ||
| 261 | |||
| 262 | for file in os.listdir(pkglst_dir): | ||
| 263 | (distro, distro_release) = file.split("-") | ||
| 264 | f = open(os.path.join(pkglst_dir, file), "r") | ||
| 265 | for line in f: | ||
| 266 | (pkg, section) = line.split(":") | ||
| 267 | if distro.lower() in distro_pn_aliases: | ||
| 268 | pn = distro_pn_aliases[distro.lower()] | ||
| 269 | else: | ||
| 270 | pn = recipe_name | ||
| 271 | if pn == pkg: | ||
| 272 | matching_distros.append(distro + "-" + section[:-1]) # strip the \n at the end | ||
| 273 | f.close() | ||
| 274 | break | ||
| 275 | f.close() | ||
| 276 | |||
| 277 | for item in tmp.split(): | ||
| 278 | matching_distros.append(item) | ||
| 279 | bb.note("Matching: %s" % matching_distros) | ||
| 280 | return matching_distros | ||
| 281 | |||
| 282 | def create_log_file(d, logname): | ||
| 283 | logpath = d.getVar('LOG_DIR') | ||
| 284 | bb.utils.mkdirhier(logpath) | ||
| 285 | logfn, logsuffix = os.path.splitext(logname) | ||
| 286 | logfile = os.path.join(logpath, "%s.%s%s" % (logfn, d.getVar('DATETIME'), logsuffix)) | ||
| 287 | if not os.path.exists(logfile): | ||
| 288 | slogfile = os.path.join(logpath, logname) | ||
| 289 | if os.path.exists(slogfile): | ||
| 290 | os.remove(slogfile) | ||
| 291 | open(logfile, 'w+').close() | ||
| 292 | os.symlink(logfile, slogfile) | ||
| 293 | d.setVar('LOG_FILE', logfile) | ||
| 294 | return logfile | ||
| 295 | |||
| 296 | |||
| 297 | def save_distro_check_result(result, datetime, result_file, d): | ||
| 298 | pn = d.getVar('PN') | ||
| 299 | logdir = d.getVar('LOG_DIR') | ||
| 300 | if not logdir: | ||
| 301 | bb.error("LOG_DIR variable is not defined, can't write the distro_check results") | ||
| 302 | return | ||
| 303 | bb.utils.mkdirhier(logdir) | ||
| 304 | |||
| 305 | line = pn | ||
| 306 | for i in result: | ||
| 307 | line = line + "," + i | ||
| 308 | f = open(result_file, "a") | ||
| 309 | import fcntl | ||
| 310 | fcntl.lockf(f, fcntl.LOCK_EX) | ||
| 311 | f.seek(0, os.SEEK_END) # seek to the end of file | ||
| 312 | f.write(line + "\n") | ||
| 313 | fcntl.lockf(f, fcntl.LOCK_UN) | ||
| 314 | f.close() | ||
diff --git a/meta-xilinx-core/lib/oe/elf.py b/meta-xilinx-core/lib/oe/elf.py new file mode 100644 index 00000000..eab2349a --- /dev/null +++ b/meta-xilinx-core/lib/oe/elf.py | |||
| @@ -0,0 +1,145 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: GPL-2.0-only | ||
| 5 | # | ||
| 6 | |||
| 7 | def machine_dict(d): | ||
| 8 | # TARGET_OS TARGET_ARCH MACHINE, OSABI, ABIVERSION, Little Endian, 32bit? | ||
| 9 | machdata = { | ||
| 10 | "darwin9" : { | ||
| 11 | "arm" : (40, 0, 0, True, 32), | ||
| 12 | }, | ||
| 13 | "eabi" : { | ||
| 14 | "arm" : (40, 0, 0, True, 32), | ||
| 15 | }, | ||
| 16 | "elf" : { | ||
| 17 | "aarch64" : (183, 0, 0, True, 64), | ||
| 18 | "aarch64_be" :(183, 0, 0, False, 64), | ||
| 19 | "i586" : (3, 0, 0, True, 32), | ||
| 20 | "i686" : (3, 0, 0, True, 32), | ||
| 21 | "x86_64": (62, 0, 0, True, 64), | ||
| 22 | "epiphany": (4643, 0, 0, True, 32), | ||
| 23 | "lm32": (138, 0, 0, False, 32), | ||
| 24 | "loongarch64":(258, 0, 0, True, 64), | ||
| 25 | "mips": ( 8, 0, 0, False, 32), | ||
| 26 | "mipsel": ( 8, 0, 0, True, 32), | ||
| 27 | "microblaze": (189, 0, 0, False, 32), | ||
| 28 | "microblazeel":(189, 0, 0, True, 32), | ||
| 29 | "powerpc": (20, 0, 0, False, 32), | ||
| 30 | "riscv32": (243, 0, 0, True, 32), | ||
| 31 | "riscv64": (243, 0, 0, True, 64), | ||
| 32 | }, | ||
| 33 | "linux" : { | ||
| 34 | "aarch64" : (183, 0, 0, True, 64), | ||
| 35 | "aarch64_be" :(183, 0, 0, False, 64), | ||
| 36 | "arm" : (40, 97, 0, True, 32), | ||
| 37 | "armeb": (40, 97, 0, False, 32), | ||
| 38 | "powerpc": (20, 0, 0, False, 32), | ||
| 39 | "powerpc64": (21, 0, 0, False, 64), | ||
| 40 | "powerpc64le": (21, 0, 0, True, 64), | ||
| 41 | "i386": ( 3, 0, 0, True, 32), | ||
| 42 | "i486": ( 3, 0, 0, True, 32), | ||
| 43 | "i586": ( 3, 0, 0, True, 32), | ||
| 44 | "i686": ( 3, 0, 0, True, 32), | ||
| 45 | "x86_64": (62, 0, 0, True, 64), | ||
| 46 | "ia64": (50, 0, 0, True, 64), | ||
| 47 | "alpha": (36902, 0, 0, True, 64), | ||
| 48 | "hppa": (15, 3, 0, False, 32), | ||
| 49 | "loongarch64":(258, 0, 0, True, 64), | ||
| 50 | "m68k": ( 4, 0, 0, False, 32), | ||
| 51 | "mips": ( 8, 0, 0, False, 32), | ||
| 52 | "mipsel": ( 8, 0, 0, True, 32), | ||
| 53 | "mips64": ( 8, 0, 0, False, 64), | ||
| 54 | "mips64el": ( 8, 0, 0, True, 64), | ||
| 55 | "mipsisa32r6": ( 8, 0, 0, False, 32), | ||
| 56 | "mipsisa32r6el": ( 8, 0, 0, True, 32), | ||
| 57 | "mipsisa64r6": ( 8, 0, 0, False, 64), | ||
| 58 | "mipsisa64r6el": ( 8, 0, 0, True, 64), | ||
| 59 | "nios2": (113, 0, 0, True, 32), | ||
| 60 | "riscv32": (243, 0, 0, True, 32), | ||
| 61 | "riscv64": (243, 0, 0, True, 64), | ||
| 62 | "s390": (22, 0, 0, False, 32), | ||
| 63 | "sh4": (42, 0, 0, True, 32), | ||
| 64 | "sparc": ( 2, 0, 0, False, 32), | ||
| 65 | "microblaze": (189, 0, 0, False, 32), | ||
| 66 | "microblazeel":(189, 0, 0, True, 32), | ||
| 67 | }, | ||
| 68 | "linux-android" : { | ||
| 69 | "aarch64" : (183, 0, 0, True, 64), | ||
| 70 | "i686": ( 3, 0, 0, True, 32), | ||
| 71 | "x86_64": (62, 0, 0, True, 64), | ||
| 72 | }, | ||
| 73 | "linux-androideabi" : { | ||
| 74 | "arm" : (40, 97, 0, True, 32), | ||
| 75 | }, | ||
| 76 | "linux-musl" : { | ||
| 77 | "aarch64" : (183, 0, 0, True, 64), | ||
| 78 | "aarch64_be" :(183, 0, 0, False, 64), | ||
| 79 | "arm" : ( 40, 97, 0, True, 32), | ||
| 80 | "armeb": ( 40, 97, 0, False, 32), | ||
| 81 | "powerpc": ( 20, 0, 0, False, 32), | ||
| 82 | "powerpc64": ( 21, 0, 0, False, 64), | ||
| 83 | "powerpc64le": (21, 0, 0, True, 64), | ||
| 84 | "i386": ( 3, 0, 0, True, 32), | ||
| 85 | "i486": ( 3, 0, 0, True, 32), | ||
| 86 | "i586": ( 3, 0, 0, True, 32), | ||
| 87 | "i686": ( 3, 0, 0, True, 32), | ||
| 88 | "x86_64": ( 62, 0, 0, True, 64), | ||
| 89 | "mips": ( 8, 0, 0, False, 32), | ||
| 90 | "mipsel": ( 8, 0, 0, True, 32), | ||
| 91 | "mips64": ( 8, 0, 0, False, 64), | ||
| 92 | "mips64el": ( 8, 0, 0, True, 64), | ||
| 93 | "microblaze": (189, 0, 0, False, 32), | ||
| 94 | "microblazeel":(189, 0, 0, True, 32), | ||
| 95 | "riscv32": (243, 0, 0, True, 32), | ||
| 96 | "riscv64": (243, 0, 0, True, 64), | ||
| 97 | "sh4": ( 42, 0, 0, True, 32), | ||
| 98 | }, | ||
| 99 | "uclinux-uclibc" : { | ||
| 100 | "bfin": ( 106, 0, 0, True, 32), | ||
| 101 | }, | ||
| 102 | "linux-gnueabi" : { | ||
| 103 | "arm" : (40, 0, 0, True, 32), | ||
| 104 | "armeb" : (40, 0, 0, False, 32), | ||
| 105 | }, | ||
| 106 | "linux-musleabi" : { | ||
| 107 | "arm" : (40, 0, 0, True, 32), | ||
| 108 | "armeb" : (40, 0, 0, False, 32), | ||
| 109 | }, | ||
| 110 | "linux-gnuspe" : { | ||
| 111 | "powerpc": (20, 0, 0, False, 32), | ||
| 112 | }, | ||
| 113 | "linux-muslspe" : { | ||
| 114 | "powerpc": (20, 0, 0, False, 32), | ||
| 115 | }, | ||
| 116 | "linux-gnu" : { | ||
| 117 | "powerpc": (20, 0, 0, False, 32), | ||
| 118 | "sh4": (42, 0, 0, True, 32), | ||
| 119 | }, | ||
| 120 | "linux-gnu_ilp32" : { | ||
| 121 | "aarch64" : (183, 0, 0, True, 32), | ||
| 122 | }, | ||
| 123 | "linux-gnux32" : { | ||
| 124 | "x86_64": (62, 0, 0, True, 32), | ||
| 125 | }, | ||
| 126 | "linux-muslx32" : { | ||
| 127 | "x86_64": (62, 0, 0, True, 32), | ||
| 128 | }, | ||
| 129 | "linux-gnun32" : { | ||
| 130 | "mips64": ( 8, 0, 0, False, 32), | ||
| 131 | "mips64el": ( 8, 0, 0, True, 32), | ||
| 132 | "mipsisa64r6": ( 8, 0, 0, False, 32), | ||
| 133 | "mipsisa64r6el":( 8, 0, 0, True, 32), | ||
| 134 | }, | ||
| 135 | } | ||
| 136 | |||
| 137 | # Add in any extra user supplied data which may come from a BSP layer, removing the | ||
| 138 | # need to always change this class directly | ||
| 139 | extra_machdata = (d and d.getVar("PACKAGEQA_EXTRA_MACHDEFFUNCS" or None) or "").split() | ||
| 140 | for m in extra_machdata: | ||
| 141 | call = m + "(machdata, d)" | ||
| 142 | locs = { "machdata" : machdata, "d" : d} | ||
| 143 | machdata = bb.utils.better_eval(call, locs) | ||
| 144 | |||
| 145 | return machdata | ||
diff --git a/meta-xilinx-core/lib/oe/go.py b/meta-xilinx-core/lib/oe/go.py new file mode 100644 index 00000000..dfd957d1 --- /dev/null +++ b/meta-xilinx-core/lib/oe/go.py | |||
| @@ -0,0 +1,34 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | import re | ||
| 8 | |||
| 9 | def map_arch(a): | ||
| 10 | if re.match('i.86', a): | ||
| 11 | return '386' | ||
| 12 | elif a == 'x86_64': | ||
| 13 | return 'amd64' | ||
| 14 | elif re.match('arm.*', a): | ||
| 15 | return 'arm' | ||
| 16 | elif re.match('aarch64.*', a): | ||
| 17 | return 'arm64' | ||
| 18 | elif re.match('mips64el.*', a): | ||
| 19 | return 'mips64le' | ||
| 20 | elif re.match('mips64.*', a): | ||
| 21 | return 'mips64' | ||
| 22 | elif a == 'mips': | ||
| 23 | return 'mips' | ||
| 24 | elif a == 'mipsel': | ||
| 25 | return 'mipsle' | ||
| 26 | elif re.match('p(pc|owerpc)(64le)', a): | ||
| 27 | return 'ppc64le' | ||
| 28 | elif re.match('p(pc|owerpc)(64)', a): | ||
| 29 | return 'ppc64' | ||
| 30 | elif a == 'riscv64': | ||
| 31 | return 'riscv64' | ||
| 32 | elif a == 'loongarch64': | ||
| 33 | return 'loong64' | ||
| 34 | return '' | ||
diff --git a/meta-xilinx-core/lib/oe/gpg_sign.py b/meta-xilinx-core/lib/oe/gpg_sign.py new file mode 100644 index 00000000..ede6186c --- /dev/null +++ b/meta-xilinx-core/lib/oe/gpg_sign.py | |||
| @@ -0,0 +1,160 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: GPL-2.0-only | ||
| 5 | # | ||
| 6 | |||
| 7 | """Helper module for GPG signing""" | ||
| 8 | |||
| 9 | import bb | ||
| 10 | import os | ||
| 11 | import shlex | ||
| 12 | import subprocess | ||
| 13 | import tempfile | ||
| 14 | |||
| 15 | class LocalSigner(object): | ||
| 16 | """Class for handling local (on the build host) signing""" | ||
| 17 | def __init__(self, d): | ||
| 18 | self.gpg_bin = d.getVar('GPG_BIN') or \ | ||
| 19 | bb.utils.which(os.getenv('PATH'), 'gpg') | ||
| 20 | self.gpg_cmd = [self.gpg_bin] | ||
| 21 | self.gpg_agent_bin = bb.utils.which(os.getenv('PATH'), "gpg-agent") | ||
| 22 | # Without this we see "Cannot allocate memory" errors when running processes in parallel | ||
| 23 | # It needs to be set for any gpg command since any agent launched can stick around in memory | ||
| 24 | # and this parameter must be set. | ||
| 25 | if self.gpg_agent_bin: | ||
| 26 | self.gpg_cmd += ["--agent-program=%s|--auto-expand-secmem" % (self.gpg_agent_bin)] | ||
| 27 | self.gpg_path = d.getVar('GPG_PATH') | ||
| 28 | self.rpm_bin = bb.utils.which(os.getenv('PATH'), "rpmsign") | ||
| 29 | self.gpg_version = self.get_gpg_version() | ||
| 30 | |||
| 31 | |||
| 32 | def export_pubkey(self, output_file, keyid, armor=True): | ||
| 33 | """Export GPG public key to a file""" | ||
| 34 | cmd = self.gpg_cmd + ["--no-permission-warning", "--batch", "--yes", "--export", "-o", output_file] | ||
| 35 | if self.gpg_path: | ||
| 36 | cmd += ["--homedir", self.gpg_path] | ||
| 37 | if armor: | ||
| 38 | cmd += ["--armor"] | ||
| 39 | cmd += [keyid] | ||
| 40 | subprocess.check_output(cmd, stderr=subprocess.STDOUT) | ||
| 41 | |||
| 42 | def sign_rpms(self, files, keyid, passphrase, digest, sign_chunk, fsk=None, fsk_password=None): | ||
| 43 | """Sign RPM files""" | ||
| 44 | |||
| 45 | cmd = self.rpm_bin + " --addsign --define '_gpg_name %s' " % keyid | ||
| 46 | gpg_args = '--no-permission-warning --batch --passphrase=%s --agent-program=%s|--auto-expand-secmem' % (passphrase, self.gpg_agent_bin) | ||
| 47 | if self.gpg_version > (2,1,): | ||
| 48 | gpg_args += ' --pinentry-mode=loopback' | ||
| 49 | cmd += "--define '_gpg_sign_cmd_extra_args %s' " % gpg_args | ||
| 50 | cmd += "--define '_binary_filedigest_algorithm %s' " % digest | ||
| 51 | if self.gpg_bin: | ||
| 52 | cmd += "--define '__gpg %s' " % self.gpg_bin | ||
| 53 | if self.gpg_path: | ||
| 54 | cmd += "--define '_gpg_path %s' " % self.gpg_path | ||
| 55 | if fsk: | ||
| 56 | cmd += "--signfiles --fskpath %s " % fsk | ||
| 57 | if fsk_password: | ||
| 58 | cmd += "--define '_file_signing_key_password %s' " % fsk_password | ||
| 59 | |||
| 60 | # Sign in chunks | ||
| 61 | for i in range(0, len(files), sign_chunk): | ||
| 62 | subprocess.check_output(shlex.split(cmd + ' '.join(files[i:i+sign_chunk])), stderr=subprocess.STDOUT) | ||
| 63 | |||
| 64 | def detach_sign(self, input_file, keyid, passphrase_file, passphrase=None, armor=True, output_suffix=None, use_sha256=False): | ||
| 65 | """Create a detached signature of a file""" | ||
| 66 | |||
| 67 | if passphrase_file and passphrase: | ||
| 68 | raise Exception("You should use either passphrase_file of passphrase, not both") | ||
| 69 | |||
| 70 | cmd = self.gpg_cmd + ['--detach-sign', '--no-permission-warning', '--batch', | ||
| 71 | '--no-tty', '--yes', '--passphrase-fd', '0', '-u', keyid] | ||
| 72 | |||
| 73 | if self.gpg_path: | ||
| 74 | cmd += ['--homedir', self.gpg_path] | ||
| 75 | if armor: | ||
| 76 | cmd += ['--armor'] | ||
| 77 | if use_sha256: | ||
| 78 | cmd += ['--digest-algo', "SHA256"] | ||
| 79 | |||
| 80 | #gpg > 2.1 supports password pipes only through the loopback interface | ||
| 81 | #gpg < 2.1 errors out if given unknown parameters | ||
| 82 | if self.gpg_version > (2,1,): | ||
| 83 | cmd += ['--pinentry-mode', 'loopback'] | ||
| 84 | |||
| 85 | try: | ||
| 86 | if passphrase_file: | ||
| 87 | with open(passphrase_file) as fobj: | ||
| 88 | passphrase = fobj.readline(); | ||
| 89 | |||
| 90 | if not output_suffix: | ||
| 91 | output_suffix = 'asc' if armor else 'sig' | ||
| 92 | output_file = input_file + "." + output_suffix | ||
| 93 | with tempfile.TemporaryDirectory(dir=os.path.dirname(output_file)) as tmp_dir: | ||
| 94 | tmp_file = os.path.join(tmp_dir, os.path.basename(output_file)) | ||
| 95 | cmd += ['-o', tmp_file] | ||
| 96 | |||
| 97 | cmd += [input_file] | ||
| 98 | |||
| 99 | job = subprocess.Popen(cmd, stdin=subprocess.PIPE, stderr=subprocess.PIPE) | ||
| 100 | (_, stderr) = job.communicate(passphrase.encode("utf-8")) | ||
| 101 | |||
| 102 | if job.returncode: | ||
| 103 | bb.fatal("GPG exited with code %d: %s" % (job.returncode, stderr.decode("utf-8"))) | ||
| 104 | |||
| 105 | os.rename(tmp_file, output_file) | ||
| 106 | except IOError as e: | ||
| 107 | bb.error("IO error (%s): %s" % (e.errno, e.strerror)) | ||
| 108 | raise Exception("Failed to sign '%s'" % input_file) | ||
| 109 | |||
| 110 | except OSError as e: | ||
| 111 | bb.error("OS error (%s): %s" % (e.errno, e.strerror)) | ||
| 112 | raise Exception("Failed to sign '%s" % input_file) | ||
| 113 | |||
| 114 | |||
| 115 | def get_gpg_version(self): | ||
| 116 | """Return the gpg version as a tuple of ints""" | ||
| 117 | try: | ||
| 118 | cmd = self.gpg_cmd + ["--version", "--no-permission-warning"] | ||
| 119 | ver_str = subprocess.check_output(cmd).split()[2].decode("utf-8") | ||
| 120 | return tuple([int(i) for i in ver_str.split("-")[0].split('.')]) | ||
| 121 | except subprocess.CalledProcessError as e: | ||
| 122 | bb.fatal("Could not get gpg version: %s" % e) | ||
| 123 | |||
| 124 | |||
| 125 | def verify(self, sig_file, valid_sigs = ''): | ||
| 126 | """Verify signature""" | ||
| 127 | cmd = self.gpg_cmd + ["--verify", "--no-permission-warning", "--status-fd", "1"] | ||
| 128 | if self.gpg_path: | ||
| 129 | cmd += ["--homedir", self.gpg_path] | ||
| 130 | |||
| 131 | cmd += [sig_file] | ||
| 132 | status = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) | ||
| 133 | # Valid if any key matches if unspecified | ||
| 134 | if not valid_sigs: | ||
| 135 | ret = False if status.returncode else True | ||
| 136 | return ret | ||
| 137 | |||
| 138 | import re | ||
| 139 | goodsigs = [] | ||
| 140 | sigre = re.compile(r'^\[GNUPG:\] GOODSIG (\S+)\s(.*)$') | ||
| 141 | for l in status.stdout.decode("utf-8").splitlines(): | ||
| 142 | s = sigre.match(l) | ||
| 143 | if s: | ||
| 144 | goodsigs += [s.group(1)] | ||
| 145 | |||
| 146 | for sig in valid_sigs.split(): | ||
| 147 | if sig in goodsigs: | ||
| 148 | return True | ||
| 149 | if len(goodsigs): | ||
| 150 | bb.warn('No accepted signatures found. Good signatures found: %s.' % ' '.join(goodsigs)) | ||
| 151 | return False | ||
| 152 | |||
| 153 | |||
| 154 | def get_signer(d, backend): | ||
| 155 | """Get signer object for the specified backend""" | ||
| 156 | # Use local signing by default | ||
| 157 | if backend == 'local': | ||
| 158 | return LocalSigner(d) | ||
| 159 | else: | ||
| 160 | bb.fatal("Unsupported signing backend '%s'" % backend) | ||
diff --git a/meta-xilinx-core/lib/oe/license.py b/meta-xilinx-core/lib/oe/license.py new file mode 100644 index 00000000..d9c8d94d --- /dev/null +++ b/meta-xilinx-core/lib/oe/license.py | |||
| @@ -0,0 +1,261 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: GPL-2.0-only | ||
| 5 | # | ||
| 6 | """Code for parsing OpenEmbedded license strings""" | ||
| 7 | |||
| 8 | import ast | ||
| 9 | import re | ||
| 10 | from fnmatch import fnmatchcase as fnmatch | ||
| 11 | |||
| 12 | def license_ok(license, dont_want_licenses): | ||
| 13 | """ Return False if License exist in dont_want_licenses else True """ | ||
| 14 | for dwl in dont_want_licenses: | ||
| 15 | if fnmatch(license, dwl): | ||
| 16 | return False | ||
| 17 | return True | ||
| 18 | |||
| 19 | def obsolete_license_list(): | ||
| 20 | return ["AGPL-3", "AGPL-3+", "AGPLv3", "AGPLv3+", "AGPLv3.0", "AGPLv3.0+", "AGPL-3.0", "AGPL-3.0+", "BSD-0-Clause", | ||
| 21 | "GPL-1", "GPL-1+", "GPLv1", "GPLv1+", "GPLv1.0", "GPLv1.0+", "GPL-1.0", "GPL-1.0+", "GPL-2", "GPL-2+", "GPLv2", | ||
| 22 | "GPLv2+", "GPLv2.0", "GPLv2.0+", "GPL-2.0", "GPL-2.0+", "GPL-3", "GPL-3+", "GPLv3", "GPLv3+", "GPLv3.0", "GPLv3.0+", | ||
| 23 | "GPL-3.0", "GPL-3.0+", "LGPLv2", "LGPLv2+", "LGPLv2.0", "LGPLv2.0+", "LGPL-2.0", "LGPL-2.0+", "LGPL2.1", "LGPL2.1+", | ||
| 24 | "LGPLv2.1", "LGPLv2.1+", "LGPL-2.1", "LGPL-2.1+", "LGPLv3", "LGPLv3+", "LGPL-3.0", "LGPL-3.0+", "MPL-1", "MPLv1", | ||
| 25 | "MPLv1.1", "MPLv2", "MIT-X", "MIT-style", "openssl", "PSF", "PSFv2", "Python-2", "Apachev2", "Apache-2", "Artisticv1", | ||
| 26 | "Artistic-1", "AFL-2", "AFL-1", "AFLv2", "AFLv1", "CDDLv1", "CDDL-1", "EPLv1.0", "FreeType", "Nauman", | ||
| 27 | "tcl", "vim", "SGIv1"] | ||
| 28 | |||
| 29 | class LicenseError(Exception): | ||
| 30 | pass | ||
| 31 | |||
| 32 | class LicenseSyntaxError(LicenseError): | ||
| 33 | def __init__(self, licensestr, exc): | ||
| 34 | self.licensestr = licensestr | ||
| 35 | self.exc = exc | ||
| 36 | LicenseError.__init__(self) | ||
| 37 | |||
| 38 | def __str__(self): | ||
| 39 | return "error in '%s': %s" % (self.licensestr, self.exc) | ||
| 40 | |||
| 41 | class InvalidLicense(LicenseError): | ||
| 42 | def __init__(self, license): | ||
| 43 | self.license = license | ||
| 44 | LicenseError.__init__(self) | ||
| 45 | |||
| 46 | def __str__(self): | ||
| 47 | return "invalid characters in license '%s'" % self.license | ||
| 48 | |||
| 49 | license_operator_chars = '&|() ' | ||
| 50 | license_operator = re.compile(r'([' + license_operator_chars + '])') | ||
| 51 | license_pattern = re.compile(r'[a-zA-Z0-9.+_\-]+$') | ||
| 52 | |||
| 53 | class LicenseVisitor(ast.NodeVisitor): | ||
| 54 | """Get elements based on OpenEmbedded license strings""" | ||
| 55 | def get_elements(self, licensestr): | ||
| 56 | new_elements = [] | ||
| 57 | elements = list([x for x in license_operator.split(licensestr) if x.strip()]) | ||
| 58 | for pos, element in enumerate(elements): | ||
| 59 | if license_pattern.match(element): | ||
| 60 | if pos > 0 and license_pattern.match(elements[pos-1]): | ||
| 61 | new_elements.append('&') | ||
| 62 | element = '"' + element + '"' | ||
| 63 | elif not license_operator.match(element): | ||
| 64 | raise InvalidLicense(element) | ||
| 65 | new_elements.append(element) | ||
| 66 | |||
| 67 | return new_elements | ||
| 68 | |||
| 69 | """Syntax tree visitor which can accept elements previously generated with | ||
| 70 | OpenEmbedded license string""" | ||
| 71 | def visit_elements(self, elements): | ||
| 72 | self.visit(ast.parse(' '.join(elements))) | ||
| 73 | |||
| 74 | """Syntax tree visitor which can accept OpenEmbedded license strings""" | ||
| 75 | def visit_string(self, licensestr): | ||
| 76 | self.visit_elements(self.get_elements(licensestr)) | ||
| 77 | |||
| 78 | class FlattenVisitor(LicenseVisitor): | ||
| 79 | """Flatten a license tree (parsed from a string) by selecting one of each | ||
| 80 | set of OR options, in the way the user specifies""" | ||
| 81 | def __init__(self, choose_licenses): | ||
| 82 | self.choose_licenses = choose_licenses | ||
| 83 | self.licenses = [] | ||
| 84 | LicenseVisitor.__init__(self) | ||
| 85 | |||
| 86 | def visit_Str(self, node): | ||
| 87 | self.licenses.append(node.s) | ||
| 88 | |||
| 89 | def visit_Constant(self, node): | ||
| 90 | self.licenses.append(node.value) | ||
| 91 | |||
| 92 | def visit_BinOp(self, node): | ||
| 93 | if isinstance(node.op, ast.BitOr): | ||
| 94 | left = FlattenVisitor(self.choose_licenses) | ||
| 95 | left.visit(node.left) | ||
| 96 | |||
| 97 | right = FlattenVisitor(self.choose_licenses) | ||
| 98 | right.visit(node.right) | ||
| 99 | |||
| 100 | selected = self.choose_licenses(left.licenses, right.licenses) | ||
| 101 | self.licenses.extend(selected) | ||
| 102 | else: | ||
| 103 | self.generic_visit(node) | ||
| 104 | |||
| 105 | def flattened_licenses(licensestr, choose_licenses): | ||
| 106 | """Given a license string and choose_licenses function, return a flat list of licenses""" | ||
| 107 | flatten = FlattenVisitor(choose_licenses) | ||
| 108 | try: | ||
| 109 | flatten.visit_string(licensestr) | ||
| 110 | except SyntaxError as exc: | ||
| 111 | raise LicenseSyntaxError(licensestr, exc) | ||
| 112 | return flatten.licenses | ||
| 113 | |||
| 114 | def is_included(licensestr, include_licenses=None, exclude_licenses=None): | ||
| 115 | """Given a license string, a list of licenses to include and a list of | ||
| 116 | licenses to exclude, determine if the license string matches the include | ||
| 117 | list and does not match the exclude list. | ||
| 118 | |||
| 119 | Returns a tuple holding the boolean state and a list of the applicable | ||
| 120 | licenses that were excluded if state is False, or the licenses that were | ||
| 121 | included if the state is True.""" | ||
| 122 | |||
| 123 | def include_license(license): | ||
| 124 | return any(fnmatch(license, pattern) for pattern in include_licenses) | ||
| 125 | |||
| 126 | def exclude_license(license): | ||
| 127 | return any(fnmatch(license, pattern) for pattern in exclude_licenses) | ||
| 128 | |||
| 129 | def choose_licenses(alpha, beta): | ||
| 130 | """Select the option in an OR which is the 'best' (has the most | ||
| 131 | included licenses and no excluded licenses).""" | ||
| 132 | # The factor 1000 below is arbitrary, just expected to be much larger | ||
| 133 | # than the number of licenses actually specified. That way the weight | ||
| 134 | # will be negative if the list of licenses contains an excluded license, | ||
| 135 | # but still gives a higher weight to the list with the most included | ||
| 136 | # licenses. | ||
| 137 | alpha_weight = (len(list(filter(include_license, alpha))) - | ||
| 138 | 1000 * (len(list(filter(exclude_license, alpha))) > 0)) | ||
| 139 | beta_weight = (len(list(filter(include_license, beta))) - | ||
| 140 | 1000 * (len(list(filter(exclude_license, beta))) > 0)) | ||
| 141 | if alpha_weight >= beta_weight: | ||
| 142 | return alpha | ||
| 143 | else: | ||
| 144 | return beta | ||
| 145 | |||
| 146 | if not include_licenses: | ||
| 147 | include_licenses = ['*'] | ||
| 148 | |||
| 149 | if not exclude_licenses: | ||
| 150 | exclude_licenses = [] | ||
| 151 | |||
| 152 | licenses = flattened_licenses(licensestr, choose_licenses) | ||
| 153 | excluded = [lic for lic in licenses if exclude_license(lic)] | ||
| 154 | included = [lic for lic in licenses if include_license(lic)] | ||
| 155 | if excluded: | ||
| 156 | return False, excluded | ||
| 157 | else: | ||
| 158 | return True, included | ||
| 159 | |||
| 160 | class ManifestVisitor(LicenseVisitor): | ||
| 161 | """Walk license tree (parsed from a string) removing the incompatible | ||
| 162 | licenses specified""" | ||
| 163 | def __init__(self, dont_want_licenses, canonical_license, d): | ||
| 164 | self._dont_want_licenses = dont_want_licenses | ||
| 165 | self._canonical_license = canonical_license | ||
| 166 | self._d = d | ||
| 167 | self._operators = [] | ||
| 168 | |||
| 169 | self.licenses = [] | ||
| 170 | self.licensestr = '' | ||
| 171 | |||
| 172 | LicenseVisitor.__init__(self) | ||
| 173 | |||
| 174 | def visit(self, node): | ||
| 175 | if isinstance(node, ast.Str): | ||
| 176 | lic = node.s | ||
| 177 | |||
| 178 | if license_ok(self._canonical_license(self._d, lic), | ||
| 179 | self._dont_want_licenses) == True: | ||
| 180 | if self._operators: | ||
| 181 | ops = [] | ||
| 182 | for op in self._operators: | ||
| 183 | if op == '[': | ||
| 184 | ops.append(op) | ||
| 185 | elif op == ']': | ||
| 186 | ops.append(op) | ||
| 187 | else: | ||
| 188 | if not ops: | ||
| 189 | ops.append(op) | ||
| 190 | elif ops[-1] in ['[', ']']: | ||
| 191 | ops.append(op) | ||
| 192 | else: | ||
| 193 | ops[-1] = op | ||
| 194 | |||
| 195 | for op in ops: | ||
| 196 | if op == '[' or op == ']': | ||
| 197 | self.licensestr += op | ||
| 198 | elif self.licenses: | ||
| 199 | self.licensestr += ' ' + op + ' ' | ||
| 200 | |||
| 201 | self._operators = [] | ||
| 202 | |||
| 203 | self.licensestr += lic | ||
| 204 | self.licenses.append(lic) | ||
| 205 | elif isinstance(node, ast.BitAnd): | ||
| 206 | self._operators.append("&") | ||
| 207 | elif isinstance(node, ast.BitOr): | ||
| 208 | self._operators.append("|") | ||
| 209 | elif isinstance(node, ast.List): | ||
| 210 | self._operators.append("[") | ||
| 211 | elif isinstance(node, ast.Load): | ||
| 212 | self.licensestr += "]" | ||
| 213 | |||
| 214 | self.generic_visit(node) | ||
| 215 | |||
| 216 | def manifest_licenses(licensestr, dont_want_licenses, canonical_license, d): | ||
| 217 | """Given a license string and dont_want_licenses list, | ||
| 218 | return license string filtered and a list of licenses""" | ||
| 219 | manifest = ManifestVisitor(dont_want_licenses, canonical_license, d) | ||
| 220 | |||
| 221 | try: | ||
| 222 | elements = manifest.get_elements(licensestr) | ||
| 223 | |||
| 224 | # Replace '()' to '[]' for handle in ast as List and Load types. | ||
| 225 | elements = ['[' if e == '(' else e for e in elements] | ||
| 226 | elements = [']' if e == ')' else e for e in elements] | ||
| 227 | |||
| 228 | manifest.visit_elements(elements) | ||
| 229 | except SyntaxError as exc: | ||
| 230 | raise LicenseSyntaxError(licensestr, exc) | ||
| 231 | |||
| 232 | # Replace '[]' to '()' for output correct license. | ||
| 233 | manifest.licensestr = manifest.licensestr.replace('[', '(').replace(']', ')') | ||
| 234 | |||
| 235 | return (manifest.licensestr, manifest.licenses) | ||
| 236 | |||
| 237 | class ListVisitor(LicenseVisitor): | ||
| 238 | """Record all different licenses found in the license string""" | ||
| 239 | def __init__(self): | ||
| 240 | self.licenses = set() | ||
| 241 | |||
| 242 | def visit_Str(self, node): | ||
| 243 | self.licenses.add(node.s) | ||
| 244 | |||
| 245 | def visit_Constant(self, node): | ||
| 246 | self.licenses.add(node.value) | ||
| 247 | |||
| 248 | def list_licenses(licensestr): | ||
| 249 | """Simply get a list of all licenses mentioned in a license string. | ||
| 250 | Binary operators are not applied or taken into account in any way""" | ||
| 251 | visitor = ListVisitor() | ||
| 252 | try: | ||
| 253 | visitor.visit_string(licensestr) | ||
| 254 | except SyntaxError as exc: | ||
| 255 | raise LicenseSyntaxError(licensestr, exc) | ||
| 256 | return visitor.licenses | ||
| 257 | |||
| 258 | def apply_pkg_license_exception(pkg, bad_licenses, exceptions): | ||
| 259 | """Return remaining bad licenses after removing any package exceptions""" | ||
| 260 | |||
| 261 | return [lic for lic in bad_licenses if pkg + ':' + lic not in exceptions] | ||
diff --git a/meta-xilinx-core/lib/oe/lsb.py b/meta-xilinx-core/lib/oe/lsb.py new file mode 100644 index 00000000..3ec03e50 --- /dev/null +++ b/meta-xilinx-core/lib/oe/lsb.py | |||
| @@ -0,0 +1,123 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: GPL-2.0-only | ||
| 5 | # | ||
| 6 | |||
| 7 | def get_os_release(): | ||
| 8 | """Get all key-value pairs from /etc/os-release as a dict""" | ||
| 9 | from collections import OrderedDict | ||
| 10 | |||
| 11 | data = OrderedDict() | ||
| 12 | if os.path.exists('/etc/os-release'): | ||
| 13 | with open('/etc/os-release') as f: | ||
| 14 | for line in f: | ||
| 15 | try: | ||
| 16 | key, val = line.rstrip().split('=', 1) | ||
| 17 | except ValueError: | ||
| 18 | continue | ||
| 19 | data[key.strip()] = val.strip('"') | ||
| 20 | return data | ||
| 21 | |||
| 22 | def release_dict_osr(): | ||
| 23 | """ Populate a dict with pertinent values from /etc/os-release """ | ||
| 24 | data = {} | ||
| 25 | os_release = get_os_release() | ||
| 26 | if 'ID' in os_release: | ||
| 27 | data['DISTRIB_ID'] = os_release['ID'] | ||
| 28 | if 'VERSION_ID' in os_release: | ||
| 29 | data['DISTRIB_RELEASE'] = os_release['VERSION_ID'] | ||
| 30 | |||
| 31 | return data | ||
| 32 | |||
| 33 | def release_dict_lsb(): | ||
| 34 | """ Return the output of lsb_release -ir as a dictionary """ | ||
| 35 | from subprocess import PIPE | ||
| 36 | |||
| 37 | try: | ||
| 38 | output, err = bb.process.run(['lsb_release', '-ir'], stderr=PIPE) | ||
| 39 | except bb.process.CmdError as exc: | ||
| 40 | return {} | ||
| 41 | |||
| 42 | lsb_map = { 'Distributor ID': 'DISTRIB_ID', | ||
| 43 | 'Release': 'DISTRIB_RELEASE'} | ||
| 44 | lsb_keys = lsb_map.keys() | ||
| 45 | |||
| 46 | data = {} | ||
| 47 | for line in output.splitlines(): | ||
| 48 | if line.startswith("-e"): | ||
| 49 | line = line[3:] | ||
| 50 | try: | ||
| 51 | key, value = line.split(":\t", 1) | ||
| 52 | except ValueError: | ||
| 53 | continue | ||
| 54 | if key in lsb_keys: | ||
| 55 | data[lsb_map[key]] = value | ||
| 56 | |||
| 57 | if len(data.keys()) != 2: | ||
| 58 | return None | ||
| 59 | |||
| 60 | return data | ||
| 61 | |||
| 62 | def release_dict_file(): | ||
| 63 | """ Try to gather release information manually when other methods fail """ | ||
| 64 | data = {} | ||
| 65 | try: | ||
| 66 | if os.path.exists('/etc/lsb-release'): | ||
| 67 | data = {} | ||
| 68 | with open('/etc/lsb-release') as f: | ||
| 69 | for line in f: | ||
| 70 | key, value = line.split("=", 1) | ||
| 71 | data[key] = value.strip() | ||
| 72 | elif os.path.exists('/etc/redhat-release'): | ||
| 73 | data = {} | ||
| 74 | with open('/etc/redhat-release') as f: | ||
| 75 | distro = f.readline().strip() | ||
| 76 | import re | ||
| 77 | match = re.match(r'(.*) release (.*) \((.*)\)', distro) | ||
| 78 | if match: | ||
| 79 | data['DISTRIB_ID'] = match.group(1) | ||
| 80 | data['DISTRIB_RELEASE'] = match.group(2) | ||
| 81 | elif os.path.exists('/etc/SuSE-release'): | ||
| 82 | data = {} | ||
| 83 | data['DISTRIB_ID'] = 'SUSE LINUX' | ||
| 84 | with open('/etc/SuSE-release') as f: | ||
| 85 | for line in f: | ||
| 86 | if line.startswith('VERSION = '): | ||
| 87 | data['DISTRIB_RELEASE'] = line[10:].rstrip() | ||
| 88 | break | ||
| 89 | |||
| 90 | except IOError: | ||
| 91 | return {} | ||
| 92 | return data | ||
| 93 | |||
| 94 | def distro_identifier(adjust_hook=None): | ||
| 95 | """Return a distro identifier string based upon lsb_release -ri, | ||
| 96 | with optional adjustment via a hook""" | ||
| 97 | |||
| 98 | import re | ||
| 99 | |||
| 100 | # Try /etc/os-release first, then the output of `lsb_release -ir` and | ||
| 101 | # finally fall back on parsing various release files in order to determine | ||
| 102 | # host distro name and version. | ||
| 103 | distro_data = release_dict_osr() | ||
| 104 | if not distro_data: | ||
| 105 | distro_data = release_dict_lsb() | ||
| 106 | if not distro_data: | ||
| 107 | distro_data = release_dict_file() | ||
| 108 | |||
| 109 | distro_id = distro_data.get('DISTRIB_ID', '') | ||
| 110 | release = distro_data.get('DISTRIB_RELEASE', '') | ||
| 111 | |||
| 112 | if adjust_hook: | ||
| 113 | distro_id, release = adjust_hook(distro_id, release) | ||
| 114 | if not distro_id: | ||
| 115 | return "unknown" | ||
| 116 | # Filter out any non-alphanumerics and convert to lowercase | ||
| 117 | distro_id = re.sub(r'\W', '', distro_id).lower() | ||
| 118 | |||
| 119 | if release: | ||
| 120 | id_str = '{0}-{1}'.format(distro_id, release) | ||
| 121 | else: | ||
| 122 | id_str = distro_id | ||
| 123 | return id_str.replace(' ','-').replace('/','-') | ||
diff --git a/meta-xilinx-core/lib/oe/maketype.py b/meta-xilinx-core/lib/oe/maketype.py new file mode 100644 index 00000000..7a83bdf6 --- /dev/null +++ b/meta-xilinx-core/lib/oe/maketype.py | |||
| @@ -0,0 +1,107 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: GPL-2.0-only | ||
| 5 | # | ||
| 6 | """OpenEmbedded variable typing support | ||
| 7 | |||
| 8 | Types are defined in the metadata by name, using the 'type' flag on a | ||
| 9 | variable. Other flags may be utilized in the construction of the types. See | ||
| 10 | the arguments of the type's factory for details. | ||
| 11 | """ | ||
| 12 | |||
| 13 | import inspect | ||
| 14 | import oe.types as types | ||
| 15 | from collections.abc import Callable | ||
| 16 | |||
| 17 | available_types = {} | ||
| 18 | |||
| 19 | class MissingFlag(TypeError): | ||
| 20 | """A particular flag is required to construct the type, but has not been | ||
| 21 | provided.""" | ||
| 22 | def __init__(self, flag, type): | ||
| 23 | self.flag = flag | ||
| 24 | self.type = type | ||
| 25 | TypeError.__init__(self) | ||
| 26 | |||
| 27 | def __str__(self): | ||
| 28 | return "Type '%s' requires flag '%s'" % (self.type, self.flag) | ||
| 29 | |||
| 30 | def factory(var_type): | ||
| 31 | """Return the factory for a specified type.""" | ||
| 32 | if var_type is None: | ||
| 33 | raise TypeError("No type specified. Valid types: %s" % | ||
| 34 | ', '.join(available_types)) | ||
| 35 | try: | ||
| 36 | return available_types[var_type] | ||
| 37 | except KeyError: | ||
| 38 | raise TypeError("Invalid type '%s':\n Valid types: %s" % | ||
| 39 | (var_type, ', '.join(available_types))) | ||
| 40 | |||
| 41 | def create(value, var_type, **flags): | ||
| 42 | """Create an object of the specified type, given the specified flags and | ||
| 43 | string value.""" | ||
| 44 | obj = factory(var_type) | ||
| 45 | objflags = {} | ||
| 46 | for flag in obj.flags: | ||
| 47 | if flag not in flags: | ||
| 48 | if flag not in obj.optflags: | ||
| 49 | raise MissingFlag(flag, var_type) | ||
| 50 | else: | ||
| 51 | objflags[flag] = flags[flag] | ||
| 52 | |||
| 53 | return obj(value, **objflags) | ||
| 54 | |||
| 55 | def get_callable_args(obj): | ||
| 56 | """Grab all but the first argument of the specified callable, returning | ||
| 57 | the list, as well as a list of which of the arguments have default | ||
| 58 | values.""" | ||
| 59 | if type(obj) is type: | ||
| 60 | obj = obj.__init__ | ||
| 61 | |||
| 62 | sig = inspect.signature(obj) | ||
| 63 | args = list(sig.parameters.keys()) | ||
| 64 | defaults = list(s for s in sig.parameters.keys() if sig.parameters[s].default != inspect.Parameter.empty) | ||
| 65 | flaglist = [] | ||
| 66 | if args: | ||
| 67 | if len(args) > 1 and args[0] == 'self': | ||
| 68 | args = args[1:] | ||
| 69 | flaglist.extend(args) | ||
| 70 | |||
| 71 | optional = set() | ||
| 72 | if defaults: | ||
| 73 | optional |= set(flaglist[-len(defaults):]) | ||
| 74 | return flaglist, optional | ||
| 75 | |||
| 76 | def factory_setup(name, obj): | ||
| 77 | """Prepare a factory for use.""" | ||
| 78 | args, optional = get_callable_args(obj) | ||
| 79 | extra_args = args[1:] | ||
| 80 | if extra_args: | ||
| 81 | obj.flags, optional = extra_args, optional | ||
| 82 | obj.optflags = set(optional) | ||
| 83 | else: | ||
| 84 | obj.flags = obj.optflags = () | ||
| 85 | |||
| 86 | if not hasattr(obj, 'name'): | ||
| 87 | obj.name = name | ||
| 88 | |||
| 89 | def register(name, factory): | ||
| 90 | """Register a type, given its name and a factory callable. | ||
| 91 | |||
| 92 | Determines the required and optional flags from the factory's | ||
| 93 | arguments.""" | ||
| 94 | factory_setup(name, factory) | ||
| 95 | available_types[factory.name] = factory | ||
| 96 | |||
| 97 | |||
| 98 | # Register all our included types | ||
| 99 | for name in dir(types): | ||
| 100 | if name.startswith('_'): | ||
| 101 | continue | ||
| 102 | |||
| 103 | obj = getattr(types, name) | ||
| 104 | if not isinstance(obj, Callable): | ||
| 105 | continue | ||
| 106 | |||
| 107 | register(name, obj) | ||
diff --git a/meta-xilinx-core/lib/oe/manifest.py b/meta-xilinx-core/lib/oe/manifest.py new file mode 100644 index 00000000..61f18adc --- /dev/null +++ b/meta-xilinx-core/lib/oe/manifest.py | |||
| @@ -0,0 +1,206 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: GPL-2.0-only | ||
| 5 | # | ||
| 6 | |||
| 7 | from abc import ABCMeta, abstractmethod | ||
| 8 | import os | ||
| 9 | import re | ||
| 10 | import bb | ||
| 11 | |||
| 12 | class Manifest(object, metaclass=ABCMeta): | ||
| 13 | """ | ||
| 14 | This is an abstract class. Do not instantiate this directly. | ||
| 15 | """ | ||
| 16 | |||
| 17 | PKG_TYPE_MUST_INSTALL = "mip" | ||
| 18 | PKG_TYPE_MULTILIB = "mlp" | ||
| 19 | PKG_TYPE_LANGUAGE = "lgp" | ||
| 20 | PKG_TYPE_ATTEMPT_ONLY = "aop" | ||
| 21 | |||
| 22 | MANIFEST_TYPE_IMAGE = "image" | ||
| 23 | MANIFEST_TYPE_SDK_HOST = "sdk_host" | ||
| 24 | MANIFEST_TYPE_SDK_TARGET = "sdk_target" | ||
| 25 | |||
| 26 | var_maps = { | ||
| 27 | MANIFEST_TYPE_IMAGE: { | ||
| 28 | "PACKAGE_INSTALL": PKG_TYPE_MUST_INSTALL, | ||
| 29 | "PACKAGE_INSTALL_ATTEMPTONLY": PKG_TYPE_ATTEMPT_ONLY, | ||
| 30 | "LINGUAS_INSTALL": PKG_TYPE_LANGUAGE | ||
| 31 | }, | ||
| 32 | MANIFEST_TYPE_SDK_HOST: { | ||
| 33 | "TOOLCHAIN_HOST_TASK": PKG_TYPE_MUST_INSTALL, | ||
| 34 | "TOOLCHAIN_HOST_TASK_ATTEMPTONLY": PKG_TYPE_ATTEMPT_ONLY | ||
| 35 | }, | ||
| 36 | MANIFEST_TYPE_SDK_TARGET: { | ||
| 37 | "TOOLCHAIN_TARGET_TASK": PKG_TYPE_MUST_INSTALL, | ||
| 38 | "TOOLCHAIN_TARGET_TASK_ATTEMPTONLY": PKG_TYPE_ATTEMPT_ONLY | ||
| 39 | } | ||
| 40 | } | ||
| 41 | |||
| 42 | INSTALL_ORDER = [ | ||
| 43 | PKG_TYPE_LANGUAGE, | ||
| 44 | PKG_TYPE_MUST_INSTALL, | ||
| 45 | PKG_TYPE_ATTEMPT_ONLY, | ||
| 46 | PKG_TYPE_MULTILIB | ||
| 47 | ] | ||
| 48 | |||
| 49 | initial_manifest_file_header = \ | ||
| 50 | "# This file was generated automatically and contains the packages\n" \ | ||
| 51 | "# passed on to the package manager in order to create the rootfs.\n\n" \ | ||
| 52 | "# Format:\n" \ | ||
| 53 | "# <package_type>,<package_name>\n" \ | ||
| 54 | "# where:\n" \ | ||
| 55 | "# <package_type> can be:\n" \ | ||
| 56 | "# 'mip' = must install package\n" \ | ||
| 57 | "# 'aop' = attempt only package\n" \ | ||
| 58 | "# 'mlp' = multilib package\n" \ | ||
| 59 | "# 'lgp' = language package\n\n" | ||
| 60 | |||
| 61 | def __init__(self, d, manifest_dir=None, manifest_type=MANIFEST_TYPE_IMAGE): | ||
| 62 | self.d = d | ||
| 63 | self.manifest_type = manifest_type | ||
| 64 | |||
| 65 | if manifest_dir is None: | ||
| 66 | if manifest_type != self.MANIFEST_TYPE_IMAGE: | ||
| 67 | self.manifest_dir = self.d.getVar('SDK_DIR') | ||
| 68 | else: | ||
| 69 | self.manifest_dir = self.d.getVar('WORKDIR') | ||
| 70 | else: | ||
| 71 | self.manifest_dir = manifest_dir | ||
| 72 | |||
| 73 | bb.utils.mkdirhier(self.manifest_dir) | ||
| 74 | |||
| 75 | self.initial_manifest = os.path.join(self.manifest_dir, "%s_initial_manifest" % manifest_type) | ||
| 76 | self.final_manifest = os.path.join(self.manifest_dir, "%s_final_manifest" % manifest_type) | ||
| 77 | self.full_manifest = os.path.join(self.manifest_dir, "%s_full_manifest" % manifest_type) | ||
| 78 | |||
| 79 | # packages in the following vars will be split in 'must install' and | ||
| 80 | # 'multilib' | ||
| 81 | self.vars_to_split = ["PACKAGE_INSTALL", | ||
| 82 | "TOOLCHAIN_HOST_TASK", | ||
| 83 | "TOOLCHAIN_TARGET_TASK"] | ||
| 84 | |||
| 85 | """ | ||
| 86 | This creates a standard initial manifest for core-image-(minimal|sato|sato-sdk). | ||
| 87 | This will be used for testing until the class is implemented properly! | ||
| 88 | """ | ||
| 89 | def _create_dummy_initial(self): | ||
| 90 | image_rootfs = self.d.getVar('IMAGE_ROOTFS') | ||
| 91 | pkg_list = dict() | ||
| 92 | if image_rootfs.find("core-image-sato-sdk") > 0: | ||
| 93 | pkg_list[self.PKG_TYPE_MUST_INSTALL] = \ | ||
| 94 | "packagegroup-core-x11-sato-games packagegroup-base-extended " \ | ||
| 95 | "packagegroup-core-x11-sato packagegroup-core-x11-base " \ | ||
| 96 | "packagegroup-core-sdk packagegroup-core-tools-debug " \ | ||
| 97 | "packagegroup-core-boot packagegroup-core-tools-testapps " \ | ||
| 98 | "packagegroup-core-eclipse-debug packagegroup-core-qt-demoapps " \ | ||
| 99 | "apt packagegroup-core-tools-profile psplash " \ | ||
| 100 | "packagegroup-core-standalone-sdk-target " \ | ||
| 101 | "packagegroup-core-ssh-openssh dpkg kernel-dev" | ||
| 102 | pkg_list[self.PKG_TYPE_LANGUAGE] = \ | ||
| 103 | "locale-base-en-us locale-base-en-gb" | ||
| 104 | elif image_rootfs.find("core-image-sato") > 0: | ||
| 105 | pkg_list[self.PKG_TYPE_MUST_INSTALL] = \ | ||
| 106 | "packagegroup-core-ssh-dropbear packagegroup-core-x11-sato-games " \ | ||
| 107 | "packagegroup-core-x11-base psplash apt dpkg packagegroup-base-extended " \ | ||
| 108 | "packagegroup-core-x11-sato packagegroup-core-boot" | ||
| 109 | pkg_list['lgp'] = \ | ||
| 110 | "locale-base-en-us locale-base-en-gb" | ||
| 111 | elif image_rootfs.find("core-image-minimal") > 0: | ||
| 112 | pkg_list[self.PKG_TYPE_MUST_INSTALL] = "packagegroup-core-boot" | ||
| 113 | |||
| 114 | with open(self.initial_manifest, "w+") as manifest: | ||
| 115 | manifest.write(self.initial_manifest_file_header) | ||
| 116 | |||
| 117 | for pkg_type in pkg_list: | ||
| 118 | for pkg in pkg_list[pkg_type].split(): | ||
| 119 | manifest.write("%s,%s\n" % (pkg_type, pkg)) | ||
| 120 | |||
| 121 | """ | ||
| 122 | This will create the initial manifest which will be used by Rootfs class to | ||
| 123 | generate the rootfs | ||
| 124 | """ | ||
| 125 | @abstractmethod | ||
| 126 | def create_initial(self): | ||
| 127 | pass | ||
| 128 | |||
| 129 | """ | ||
| 130 | This creates the manifest after everything has been installed. | ||
| 131 | """ | ||
| 132 | @abstractmethod | ||
| 133 | def create_final(self): | ||
| 134 | pass | ||
| 135 | |||
| 136 | """ | ||
| 137 | This creates the manifest after the package in initial manifest has been | ||
| 138 | dummy installed. It lists all *to be installed* packages. There is no real | ||
| 139 | installation, just a test. | ||
| 140 | """ | ||
| 141 | @abstractmethod | ||
| 142 | def create_full(self, pm): | ||
| 143 | pass | ||
| 144 | |||
| 145 | """ | ||
| 146 | The following function parses an initial manifest and returns a dictionary | ||
| 147 | object with the must install, attempt only, multilib and language packages. | ||
| 148 | """ | ||
| 149 | def parse_initial_manifest(self): | ||
| 150 | pkgs = dict() | ||
| 151 | |||
| 152 | with open(self.initial_manifest) as manifest: | ||
| 153 | for line in manifest.read().split('\n'): | ||
| 154 | comment = re.match("^#.*", line) | ||
| 155 | pattern = "^(%s|%s|%s|%s),(.*)$" % \ | ||
| 156 | (self.PKG_TYPE_MUST_INSTALL, | ||
| 157 | self.PKG_TYPE_ATTEMPT_ONLY, | ||
| 158 | self.PKG_TYPE_MULTILIB, | ||
| 159 | self.PKG_TYPE_LANGUAGE) | ||
| 160 | pkg = re.match(pattern, line) | ||
| 161 | |||
| 162 | if comment is not None: | ||
| 163 | continue | ||
| 164 | |||
| 165 | if pkg is not None: | ||
| 166 | pkg_type = pkg.group(1) | ||
| 167 | pkg_name = pkg.group(2) | ||
| 168 | |||
| 169 | if not pkg_type in pkgs: | ||
| 170 | pkgs[pkg_type] = [pkg_name] | ||
| 171 | else: | ||
| 172 | pkgs[pkg_type].append(pkg_name) | ||
| 173 | |||
| 174 | return pkgs | ||
| 175 | |||
| 176 | ''' | ||
| 177 | This following function parses a full manifest and return a list | ||
| 178 | object with packages. | ||
| 179 | ''' | ||
| 180 | def parse_full_manifest(self): | ||
| 181 | installed_pkgs = list() | ||
| 182 | if not os.path.exists(self.full_manifest): | ||
| 183 | bb.note('full manifest not exist') | ||
| 184 | return installed_pkgs | ||
| 185 | |||
| 186 | with open(self.full_manifest, 'r') as manifest: | ||
| 187 | for pkg in manifest.read().split('\n'): | ||
| 188 | installed_pkgs.append(pkg.strip()) | ||
| 189 | |||
| 190 | return installed_pkgs | ||
| 191 | |||
| 192 | |||
| 193 | |||
| 194 | def create_manifest(d, final_manifest=False, manifest_dir=None, | ||
| 195 | manifest_type=Manifest.MANIFEST_TYPE_IMAGE): | ||
| 196 | import importlib | ||
| 197 | manifest = importlib.import_module('oe.package_manager.' + d.getVar('IMAGE_PKGTYPE') + '.manifest').PkgManifest(d, manifest_dir, manifest_type) | ||
| 198 | |||
| 199 | if final_manifest: | ||
| 200 | manifest.create_final() | ||
| 201 | else: | ||
| 202 | manifest.create_initial() | ||
| 203 | |||
| 204 | |||
| 205 | if __name__ == "__main__": | ||
| 206 | pass | ||
diff --git a/meta-xilinx-core/lib/oe/npm_registry.py b/meta-xilinx-core/lib/oe/npm_registry.py new file mode 100644 index 00000000..d97ced7c --- /dev/null +++ b/meta-xilinx-core/lib/oe/npm_registry.py | |||
| @@ -0,0 +1,175 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | import bb | ||
| 8 | import json | ||
| 9 | import subprocess | ||
| 10 | |||
| 11 | _ALWAYS_SAFE = frozenset('ABCDEFGHIJKLMNOPQRSTUVWXYZ' | ||
| 12 | 'abcdefghijklmnopqrstuvwxyz' | ||
| 13 | '0123456789' | ||
| 14 | '_.-~()') | ||
| 15 | |||
| 16 | MISSING_OK = object() | ||
| 17 | |||
| 18 | REGISTRY = "https://registry.npmjs.org" | ||
| 19 | |||
| 20 | # we can not use urllib.parse here because npm expects lowercase | ||
| 21 | # hex-chars but urllib generates uppercase ones | ||
| 22 | def uri_quote(s, safe = '/'): | ||
| 23 | res = "" | ||
| 24 | safe_set = set(safe) | ||
| 25 | for c in s: | ||
| 26 | if c in _ALWAYS_SAFE or c in safe_set: | ||
| 27 | res += c | ||
| 28 | else: | ||
| 29 | res += '%%%02x' % ord(c) | ||
| 30 | return res | ||
| 31 | |||
| 32 | class PackageJson: | ||
| 33 | def __init__(self, spec): | ||
| 34 | self.__spec = spec | ||
| 35 | |||
| 36 | @property | ||
| 37 | def name(self): | ||
| 38 | return self.__spec['name'] | ||
| 39 | |||
| 40 | @property | ||
| 41 | def version(self): | ||
| 42 | return self.__spec['version'] | ||
| 43 | |||
| 44 | @property | ||
| 45 | def empty_manifest(self): | ||
| 46 | return { | ||
| 47 | 'name': self.name, | ||
| 48 | 'description': self.__spec.get('description', ''), | ||
| 49 | 'versions': {}, | ||
| 50 | } | ||
| 51 | |||
| 52 | def base_filename(self): | ||
| 53 | return uri_quote(self.name, safe = '@') | ||
| 54 | |||
| 55 | def as_manifest_entry(self, tarball_uri): | ||
| 56 | res = {} | ||
| 57 | |||
| 58 | ## NOTE: 'npm install' requires more than basic meta information; | ||
| 59 | ## e.g. it takes 'bin' from this manifest entry but not the actual | ||
| 60 | ## 'package.json' | ||
| 61 | for (idx,dflt) in [('name', None), | ||
| 62 | ('description', ""), | ||
| 63 | ('version', None), | ||
| 64 | ('bin', MISSING_OK), | ||
| 65 | ('man', MISSING_OK), | ||
| 66 | ('scripts', MISSING_OK), | ||
| 67 | ('directories', MISSING_OK), | ||
| 68 | ('dependencies', MISSING_OK), | ||
| 69 | ('devDependencies', MISSING_OK), | ||
| 70 | ('optionalDependencies', MISSING_OK), | ||
| 71 | ('license', "unknown")]: | ||
| 72 | if idx in self.__spec: | ||
| 73 | res[idx] = self.__spec[idx] | ||
| 74 | elif dflt == MISSING_OK: | ||
| 75 | pass | ||
| 76 | elif dflt != None: | ||
| 77 | res[idx] = dflt | ||
| 78 | else: | ||
| 79 | raise Exception("%s-%s: missing key %s" % (self.name, | ||
| 80 | self.version, | ||
| 81 | idx)) | ||
| 82 | |||
| 83 | res['dist'] = { | ||
| 84 | 'tarball': tarball_uri, | ||
| 85 | } | ||
| 86 | |||
| 87 | return res | ||
| 88 | |||
| 89 | class ManifestImpl: | ||
| 90 | def __init__(self, base_fname, spec): | ||
| 91 | self.__base = base_fname | ||
| 92 | self.__spec = spec | ||
| 93 | |||
| 94 | def load(self): | ||
| 95 | try: | ||
| 96 | with open(self.filename, "r") as f: | ||
| 97 | res = json.load(f) | ||
| 98 | except IOError: | ||
| 99 | res = self.__spec.empty_manifest | ||
| 100 | |||
| 101 | return res | ||
| 102 | |||
| 103 | def save(self, meta): | ||
| 104 | with open(self.filename, "w") as f: | ||
| 105 | json.dump(meta, f, indent = 2) | ||
| 106 | |||
| 107 | @property | ||
| 108 | def filename(self): | ||
| 109 | return self.__base + ".meta" | ||
| 110 | |||
| 111 | class Manifest: | ||
| 112 | def __init__(self, base_fname, spec): | ||
| 113 | self.__base = base_fname | ||
| 114 | self.__spec = spec | ||
| 115 | self.__lockf = None | ||
| 116 | self.__impl = None | ||
| 117 | |||
| 118 | def __enter__(self): | ||
| 119 | self.__lockf = bb.utils.lockfile(self.__base + ".lock") | ||
| 120 | self.__impl = ManifestImpl(self.__base, self.__spec) | ||
| 121 | return self.__impl | ||
| 122 | |||
| 123 | def __exit__(self, exc_type, exc_val, exc_tb): | ||
| 124 | bb.utils.unlockfile(self.__lockf) | ||
| 125 | |||
| 126 | class NpmCache: | ||
| 127 | def __init__(self, cache): | ||
| 128 | self.__cache = cache | ||
| 129 | |||
| 130 | @property | ||
| 131 | def path(self): | ||
| 132 | return self.__cache | ||
| 133 | |||
| 134 | def run(self, type, key, fname): | ||
| 135 | subprocess.run(['oe-npm-cache', self.__cache, type, key, fname], | ||
| 136 | check = True) | ||
| 137 | |||
| 138 | class NpmRegistry: | ||
| 139 | def __init__(self, path, cache): | ||
| 140 | self.__path = path | ||
| 141 | self.__cache = NpmCache(cache + '/_cacache') | ||
| 142 | bb.utils.mkdirhier(self.__path) | ||
| 143 | bb.utils.mkdirhier(self.__cache.path) | ||
| 144 | |||
| 145 | @staticmethod | ||
| 146 | ## This function is critical and must match nodejs expectations | ||
| 147 | def _meta_uri(spec): | ||
| 148 | return REGISTRY + '/' + uri_quote(spec.name, safe = '@') | ||
| 149 | |||
| 150 | @staticmethod | ||
| 151 | ## Exact return value does not matter; just make it look like a | ||
| 152 | ## usual registry url | ||
| 153 | def _tarball_uri(spec): | ||
| 154 | return '%s/%s/-/%s-%s.tgz' % (REGISTRY, | ||
| 155 | uri_quote(spec.name, safe = '@'), | ||
| 156 | uri_quote(spec.name, safe = '@/'), | ||
| 157 | spec.version) | ||
| 158 | |||
| 159 | def add_pkg(self, tarball, pkg_json): | ||
| 160 | pkg_json = PackageJson(pkg_json) | ||
| 161 | base = os.path.join(self.__path, pkg_json.base_filename()) | ||
| 162 | |||
| 163 | with Manifest(base, pkg_json) as manifest: | ||
| 164 | meta = manifest.load() | ||
| 165 | tarball_uri = self._tarball_uri(pkg_json) | ||
| 166 | |||
| 167 | meta['versions'][pkg_json.version] = pkg_json.as_manifest_entry(tarball_uri) | ||
| 168 | |||
| 169 | manifest.save(meta) | ||
| 170 | |||
| 171 | ## Cache entries are a little bit dependent on the nodejs | ||
| 172 | ## version; version specific cache implementation must | ||
| 173 | ## mitigate differences | ||
| 174 | self.__cache.run('meta', self._meta_uri(pkg_json), manifest.filename); | ||
| 175 | self.__cache.run('tgz', tarball_uri, tarball); | ||
diff --git a/meta-xilinx-core/lib/oe/overlayfs.py b/meta-xilinx-core/lib/oe/overlayfs.py new file mode 100644 index 00000000..8b88900f --- /dev/null +++ b/meta-xilinx-core/lib/oe/overlayfs.py | |||
| @@ -0,0 +1,54 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: GPL-2.0-only | ||
| 5 | # | ||
| 6 | # This file contains common functions for overlayfs and its QA check | ||
| 7 | |||
| 8 | # this function is based on https://github.com/systemd/systemd/blob/main/src/basic/unit-name.c | ||
| 9 | def escapeSystemdUnitName(path): | ||
| 10 | escapeMap = { | ||
| 11 | '/': '-', | ||
| 12 | '-': "\\x2d", | ||
| 13 | '\\': "\\x5d" | ||
| 14 | } | ||
| 15 | return "".join([escapeMap.get(c, c) for c in path.strip('/')]) | ||
| 16 | |||
| 17 | def strForBash(s): | ||
| 18 | return s.replace('\\', '\\\\') | ||
| 19 | |||
| 20 | def allOverlaysUnitName(d): | ||
| 21 | return d.getVar('PN') + '-overlays.service' | ||
| 22 | |||
| 23 | def mountUnitName(unit): | ||
| 24 | return escapeSystemdUnitName(unit) + '.mount' | ||
| 25 | |||
| 26 | def helperUnitName(unit): | ||
| 27 | return escapeSystemdUnitName(unit) + '-create-upper-dir.service' | ||
| 28 | |||
| 29 | def unitFileList(d): | ||
| 30 | fileList = [] | ||
| 31 | overlayMountPoints = d.getVarFlags("OVERLAYFS_MOUNT_POINT") | ||
| 32 | |||
| 33 | if not overlayMountPoints: | ||
| 34 | bb.fatal("A recipe uses overlayfs class but there is no OVERLAYFS_MOUNT_POINT set in your MACHINE configuration") | ||
| 35 | |||
| 36 | # check that we have required mount points set first | ||
| 37 | requiredMountPoints = d.getVarFlags('OVERLAYFS_WRITABLE_PATHS') | ||
| 38 | for mountPoint in requiredMountPoints: | ||
| 39 | if mountPoint not in overlayMountPoints: | ||
| 40 | bb.fatal("Missing required mount point for OVERLAYFS_MOUNT_POINT[%s] in your MACHINE configuration" % mountPoint) | ||
| 41 | |||
| 42 | for mountPoint in overlayMountPoints: | ||
| 43 | mountPointList = d.getVarFlag('OVERLAYFS_WRITABLE_PATHS', mountPoint) | ||
| 44 | if not mountPointList: | ||
| 45 | bb.debug(1, "No mount points defined for %s flag, don't add to file list", mountPoint) | ||
| 46 | continue | ||
| 47 | for path in mountPointList.split(): | ||
| 48 | fileList.append(mountUnitName(path)) | ||
| 49 | fileList.append(helperUnitName(path)) | ||
| 50 | |||
| 51 | fileList.append(allOverlaysUnitName(d)) | ||
| 52 | |||
| 53 | return fileList | ||
| 54 | |||
diff --git a/meta-xilinx-core/lib/oe/package.py b/meta-xilinx-core/lib/oe/package.py new file mode 100644 index 00000000..af0923a6 --- /dev/null +++ b/meta-xilinx-core/lib/oe/package.py | |||
| @@ -0,0 +1,2065 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: GPL-2.0-only | ||
| 5 | # | ||
| 6 | |||
| 7 | import errno | ||
| 8 | import fnmatch | ||
| 9 | import itertools | ||
| 10 | import os | ||
| 11 | import shlex | ||
| 12 | import re | ||
| 13 | import glob | ||
| 14 | import stat | ||
| 15 | import mmap | ||
| 16 | import subprocess | ||
| 17 | import shutil | ||
| 18 | |||
| 19 | import oe.cachedpath | ||
| 20 | |||
| 21 | def runstrip(arg): | ||
| 22 | # Function to strip a single file, called from split_and_strip_files below | ||
| 23 | # A working 'file' (one which works on the target architecture) | ||
| 24 | # | ||
| 25 | # The elftype is a bit pattern (explained in is_elf below) to tell | ||
| 26 | # us what type of file we're processing... | ||
| 27 | # 4 - executable | ||
| 28 | # 8 - shared library | ||
| 29 | # 16 - kernel module | ||
| 30 | |||
| 31 | if len(arg) == 3: | ||
| 32 | (file, elftype, strip) = arg | ||
| 33 | extra_strip_sections = '' | ||
| 34 | else: | ||
| 35 | (file, elftype, strip, extra_strip_sections) = arg | ||
| 36 | |||
| 37 | newmode = None | ||
| 38 | if not os.access(file, os.W_OK) or os.access(file, os.R_OK): | ||
| 39 | origmode = os.stat(file)[stat.ST_MODE] | ||
| 40 | newmode = origmode | stat.S_IWRITE | stat.S_IREAD | ||
| 41 | os.chmod(file, newmode) | ||
| 42 | |||
| 43 | stripcmd = [strip] | ||
| 44 | skip_strip = False | ||
| 45 | # kernel module | ||
| 46 | if elftype & 16: | ||
| 47 | if is_kernel_module_signed(file): | ||
| 48 | bb.debug(1, "Skip strip on signed module %s" % file) | ||
| 49 | skip_strip = True | ||
| 50 | else: | ||
| 51 | stripcmd.extend(["--strip-debug", "--remove-section=.comment", | ||
| 52 | "--remove-section=.note", "--preserve-dates"]) | ||
| 53 | # .so and shared library | ||
| 54 | elif ".so" in file and elftype & 8: | ||
| 55 | stripcmd.extend(["--remove-section=.comment", "--remove-section=.note", "--strip-unneeded"]) | ||
| 56 | # shared or executable: | ||
| 57 | elif elftype & 8 or elftype & 4: | ||
| 58 | stripcmd.extend(["--remove-section=.comment", "--remove-section=.note"]) | ||
| 59 | if extra_strip_sections != '': | ||
| 60 | for section in extra_strip_sections.split(): | ||
| 61 | stripcmd.extend(["--remove-section=" + section]) | ||
| 62 | |||
| 63 | stripcmd.append(file) | ||
| 64 | bb.debug(1, "runstrip: %s" % stripcmd) | ||
| 65 | |||
| 66 | if not skip_strip: | ||
| 67 | output = subprocess.check_output(stripcmd, stderr=subprocess.STDOUT) | ||
| 68 | |||
| 69 | if newmode: | ||
| 70 | os.chmod(file, origmode) | ||
| 71 | |||
| 72 | # Detect .ko module by searching for "vermagic=" string | ||
| 73 | def is_kernel_module(path): | ||
| 74 | with open(path) as f: | ||
| 75 | return mmap.mmap(f.fileno(), 0, prot=mmap.PROT_READ).find(b"vermagic=") >= 0 | ||
| 76 | |||
| 77 | # Detect if .ko module is signed | ||
| 78 | def is_kernel_module_signed(path): | ||
| 79 | with open(path, "rb") as f: | ||
| 80 | f.seek(-28, 2) | ||
| 81 | module_tail = f.read() | ||
| 82 | return "Module signature appended" in "".join(chr(c) for c in bytearray(module_tail)) | ||
| 83 | |||
| 84 | # Return type (bits): | ||
| 85 | # 0 - not elf | ||
| 86 | # 1 - ELF | ||
| 87 | # 2 - stripped | ||
| 88 | # 4 - executable | ||
| 89 | # 8 - shared library | ||
| 90 | # 16 - kernel module | ||
| 91 | def is_elf(path): | ||
| 92 | exec_type = 0 | ||
| 93 | result = subprocess.check_output(["file", "-b", path], stderr=subprocess.STDOUT).decode("utf-8") | ||
| 94 | |||
| 95 | if "ELF" in result: | ||
| 96 | exec_type |= 1 | ||
| 97 | if "not stripped" not in result: | ||
| 98 | exec_type |= 2 | ||
| 99 | if "executable" in result: | ||
| 100 | exec_type |= 4 | ||
| 101 | if "shared" in result: | ||
| 102 | exec_type |= 8 | ||
| 103 | if "relocatable" in result: | ||
| 104 | if path.endswith(".ko") and path.find("/lib/modules/") != -1 and is_kernel_module(path): | ||
| 105 | exec_type |= 16 | ||
| 106 | return (path, exec_type) | ||
| 107 | |||
| 108 | def is_static_lib(path): | ||
| 109 | if path.endswith('.a') and not os.path.islink(path): | ||
| 110 | with open(path, 'rb') as fh: | ||
| 111 | # The magic must include the first slash to avoid | ||
| 112 | # matching golang static libraries | ||
| 113 | magic = b'!<arch>\x0a/' | ||
| 114 | start = fh.read(len(magic)) | ||
| 115 | return start == magic | ||
| 116 | return False | ||
| 117 | |||
| 118 | def strip_execs(pn, dstdir, strip_cmd, libdir, base_libdir, max_process, qa_already_stripped=False): | ||
| 119 | """ | ||
| 120 | Strip executable code (like executables, shared libraries) _in_place_ | ||
| 121 | - Based on sysroot_strip in staging.bbclass | ||
| 122 | :param dstdir: directory in which to strip files | ||
| 123 | :param strip_cmd: Strip command (usually ${STRIP}) | ||
| 124 | :param libdir: ${libdir} - strip .so files in this directory | ||
| 125 | :param base_libdir: ${base_libdir} - strip .so files in this directory | ||
| 126 | :param max_process: number of stripping processes started in parallel | ||
| 127 | :param qa_already_stripped: Set to True if already-stripped' in ${INSANE_SKIP} | ||
| 128 | This is for proper logging and messages only. | ||
| 129 | """ | ||
| 130 | import stat, errno, oe.path, oe.utils | ||
| 131 | |||
| 132 | elffiles = {} | ||
| 133 | inodes = {} | ||
| 134 | libdir = os.path.abspath(dstdir + os.sep + libdir) | ||
| 135 | base_libdir = os.path.abspath(dstdir + os.sep + base_libdir) | ||
| 136 | exec_mask = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH | ||
| 137 | # | ||
| 138 | # First lets figure out all of the files we may have to process | ||
| 139 | # | ||
| 140 | checkelf = [] | ||
| 141 | inodecache = {} | ||
| 142 | for root, dirs, files in os.walk(dstdir): | ||
| 143 | for f in files: | ||
| 144 | file = os.path.join(root, f) | ||
| 145 | |||
| 146 | try: | ||
| 147 | ltarget = oe.path.realpath(file, dstdir, False) | ||
| 148 | s = os.lstat(ltarget) | ||
| 149 | except OSError as e: | ||
| 150 | (err, strerror) = e.args | ||
| 151 | if err != errno.ENOENT: | ||
| 152 | raise | ||
| 153 | # Skip broken symlinks | ||
| 154 | continue | ||
| 155 | if not s: | ||
| 156 | continue | ||
| 157 | # Check its an excutable | ||
| 158 | if s[stat.ST_MODE] & exec_mask \ | ||
| 159 | or ((file.startswith(libdir) or file.startswith(base_libdir)) and ".so" in f) \ | ||
| 160 | or file.endswith('.ko'): | ||
| 161 | # If it's a symlink, and points to an ELF file, we capture the readlink target | ||
| 162 | if os.path.islink(file): | ||
| 163 | continue | ||
| 164 | |||
| 165 | # It's a file (or hardlink), not a link | ||
| 166 | # ...but is it ELF, and is it already stripped? | ||
| 167 | checkelf.append(file) | ||
| 168 | inodecache[file] = s.st_ino | ||
| 169 | results = oe.utils.multiprocess_launch_mp(is_elf, checkelf, max_process) | ||
| 170 | for (file, elf_file) in results: | ||
| 171 | #elf_file = is_elf(file) | ||
| 172 | if elf_file & 1: | ||
| 173 | if elf_file & 2: | ||
| 174 | if qa_already_stripped: | ||
| 175 | bb.note("Skipping file %s from %s for already-stripped QA test" % (file[len(dstdir):], pn)) | ||
| 176 | else: | ||
| 177 | bb.warn("File '%s' from %s was already stripped, this will prevent future debugging!" % (file[len(dstdir):], pn)) | ||
| 178 | continue | ||
| 179 | |||
| 180 | if inodecache[file] in inodes: | ||
| 181 | os.unlink(file) | ||
| 182 | os.link(inodes[inodecache[file]], file) | ||
| 183 | else: | ||
| 184 | # break hardlinks so that we do not strip the original. | ||
| 185 | inodes[inodecache[file]] = file | ||
| 186 | bb.utils.break_hardlinks(file) | ||
| 187 | elffiles[file] = elf_file | ||
| 188 | |||
| 189 | # | ||
| 190 | # Now strip them (in parallel) | ||
| 191 | # | ||
| 192 | sfiles = [] | ||
| 193 | for file in elffiles: | ||
| 194 | elf_file = int(elffiles[file]) | ||
| 195 | sfiles.append((file, elf_file, strip_cmd)) | ||
| 196 | |||
| 197 | oe.utils.multiprocess_launch_mp(runstrip, sfiles, max_process) | ||
| 198 | |||
| 199 | |||
| 200 | def file_translate(file): | ||
| 201 | ft = file.replace("@", "@at@") | ||
| 202 | ft = ft.replace(" ", "@space@") | ||
| 203 | ft = ft.replace("\t", "@tab@") | ||
| 204 | ft = ft.replace("[", "@openbrace@") | ||
| 205 | ft = ft.replace("]", "@closebrace@") | ||
| 206 | ft = ft.replace("_", "@underscore@") | ||
| 207 | return ft | ||
| 208 | |||
| 209 | def filedeprunner(arg): | ||
| 210 | import re, subprocess, shlex | ||
| 211 | |||
| 212 | (pkg, pkgfiles, rpmdeps, pkgdest) = arg | ||
| 213 | provides = {} | ||
| 214 | requires = {} | ||
| 215 | |||
| 216 | file_re = re.compile(r'\s+\d+\s(.*)') | ||
| 217 | dep_re = re.compile(r'\s+(\S)\s+(.*)') | ||
| 218 | r = re.compile(r'[<>=]+\s+\S*') | ||
| 219 | |||
| 220 | def process_deps(pipe, pkg, pkgdest, provides, requires): | ||
| 221 | file = None | ||
| 222 | for line in pipe.split("\n"): | ||
| 223 | |||
| 224 | m = file_re.match(line) | ||
| 225 | if m: | ||
| 226 | file = m.group(1) | ||
| 227 | file = file.replace(pkgdest + "/" + pkg, "") | ||
| 228 | file = file_translate(file) | ||
| 229 | continue | ||
| 230 | |||
| 231 | m = dep_re.match(line) | ||
| 232 | if not m or not file: | ||
| 233 | continue | ||
| 234 | |||
| 235 | type, dep = m.groups() | ||
| 236 | |||
| 237 | if type == 'R': | ||
| 238 | i = requires | ||
| 239 | elif type == 'P': | ||
| 240 | i = provides | ||
| 241 | else: | ||
| 242 | continue | ||
| 243 | |||
| 244 | if dep.startswith("python("): | ||
| 245 | continue | ||
| 246 | |||
| 247 | # Ignore all perl(VMS::...) and perl(Mac::...) dependencies. These | ||
| 248 | # are typically used conditionally from the Perl code, but are | ||
| 249 | # generated as unconditional dependencies. | ||
| 250 | if dep.startswith('perl(VMS::') or dep.startswith('perl(Mac::'): | ||
| 251 | continue | ||
| 252 | |||
| 253 | # Ignore perl dependencies on .pl files. | ||
| 254 | if dep.startswith('perl(') and dep.endswith('.pl)'): | ||
| 255 | continue | ||
| 256 | |||
| 257 | # Remove perl versions and perl module versions since they typically | ||
| 258 | # do not make sense when used as package versions. | ||
| 259 | if dep.startswith('perl') and r.search(dep): | ||
| 260 | dep = dep.split()[0] | ||
| 261 | |||
| 262 | # Put parentheses around any version specifications. | ||
| 263 | dep = r.sub(r'(\g<0>)',dep) | ||
| 264 | |||
| 265 | if file not in i: | ||
| 266 | i[file] = [] | ||
| 267 | i[file].append(dep) | ||
| 268 | |||
| 269 | return provides, requires | ||
| 270 | |||
| 271 | output = subprocess.check_output(shlex.split(rpmdeps) + pkgfiles, stderr=subprocess.STDOUT).decode("utf-8") | ||
| 272 | provides, requires = process_deps(output, pkg, pkgdest, provides, requires) | ||
| 273 | |||
| 274 | return (pkg, provides, requires) | ||
| 275 | |||
| 276 | |||
| 277 | def read_shlib_providers(d): | ||
| 278 | import re | ||
| 279 | |||
| 280 | shlib_provider = {} | ||
| 281 | shlibs_dirs = d.getVar('SHLIBSDIRS').split() | ||
| 282 | list_re = re.compile(r'^(.*)\.list$') | ||
| 283 | # Go from least to most specific since the last one found wins | ||
| 284 | for dir in reversed(shlibs_dirs): | ||
| 285 | bb.debug(2, "Reading shlib providers in %s" % (dir)) | ||
| 286 | if not os.path.exists(dir): | ||
| 287 | continue | ||
| 288 | for file in sorted(os.listdir(dir)): | ||
| 289 | m = list_re.match(file) | ||
| 290 | if m: | ||
| 291 | dep_pkg = m.group(1) | ||
| 292 | try: | ||
| 293 | fd = open(os.path.join(dir, file)) | ||
| 294 | except IOError: | ||
| 295 | # During a build unrelated shlib files may be deleted, so | ||
| 296 | # handle files disappearing between the listdirs and open. | ||
| 297 | continue | ||
| 298 | lines = fd.readlines() | ||
| 299 | fd.close() | ||
| 300 | for l in lines: | ||
| 301 | s = l.strip().split(":") | ||
| 302 | if s[0] not in shlib_provider: | ||
| 303 | shlib_provider[s[0]] = {} | ||
| 304 | shlib_provider[s[0]][s[1]] = (dep_pkg, s[2]) | ||
| 305 | return shlib_provider | ||
| 306 | |||
| 307 | # We generate a master list of directories to process, we start by | ||
| 308 | # seeding this list with reasonable defaults, then load from | ||
| 309 | # the fs-perms.txt files | ||
| 310 | def fixup_perms(d): | ||
| 311 | import pwd, grp | ||
| 312 | |||
| 313 | cpath = oe.cachedpath.CachedPath() | ||
| 314 | dvar = d.getVar('PKGD') | ||
| 315 | |||
| 316 | # init using a string with the same format as a line as documented in | ||
| 317 | # the fs-perms.txt file | ||
| 318 | # <path> <mode> <uid> <gid> <walk> <fmode> <fuid> <fgid> | ||
| 319 | # <path> link <link target> | ||
| 320 | # | ||
| 321 | # __str__ can be used to print out an entry in the input format | ||
| 322 | # | ||
| 323 | # if fs_perms_entry.path is None: | ||
| 324 | # an error occurred | ||
| 325 | # if fs_perms_entry.link, you can retrieve: | ||
| 326 | # fs_perms_entry.path = path | ||
| 327 | # fs_perms_entry.link = target of link | ||
| 328 | # if not fs_perms_entry.link, you can retrieve: | ||
| 329 | # fs_perms_entry.path = path | ||
| 330 | # fs_perms_entry.mode = expected dir mode or None | ||
| 331 | # fs_perms_entry.uid = expected uid or -1 | ||
| 332 | # fs_perms_entry.gid = expected gid or -1 | ||
| 333 | # fs_perms_entry.walk = 'true' or something else | ||
| 334 | # fs_perms_entry.fmode = expected file mode or None | ||
| 335 | # fs_perms_entry.fuid = expected file uid or -1 | ||
| 336 | # fs_perms_entry_fgid = expected file gid or -1 | ||
| 337 | class fs_perms_entry(): | ||
| 338 | def __init__(self, line): | ||
| 339 | lsplit = line.split() | ||
| 340 | if len(lsplit) == 3 and lsplit[1].lower() == "link": | ||
| 341 | self._setlink(lsplit[0], lsplit[2]) | ||
| 342 | elif len(lsplit) == 8: | ||
| 343 | self._setdir(lsplit[0], lsplit[1], lsplit[2], lsplit[3], lsplit[4], lsplit[5], lsplit[6], lsplit[7]) | ||
| 344 | else: | ||
| 345 | msg = "Fixup Perms: invalid config line %s" % line | ||
| 346 | oe.qa.handle_error("perm-config", msg, d) | ||
| 347 | self.path = None | ||
| 348 | self.link = None | ||
| 349 | |||
| 350 | def _setdir(self, path, mode, uid, gid, walk, fmode, fuid, fgid): | ||
| 351 | self.path = os.path.normpath(path) | ||
| 352 | self.link = None | ||
| 353 | self.mode = self._procmode(mode) | ||
| 354 | self.uid = self._procuid(uid) | ||
| 355 | self.gid = self._procgid(gid) | ||
| 356 | self.walk = walk.lower() | ||
| 357 | self.fmode = self._procmode(fmode) | ||
| 358 | self.fuid = self._procuid(fuid) | ||
| 359 | self.fgid = self._procgid(fgid) | ||
| 360 | |||
| 361 | def _setlink(self, path, link): | ||
| 362 | self.path = os.path.normpath(path) | ||
| 363 | self.link = link | ||
| 364 | |||
| 365 | def _procmode(self, mode): | ||
| 366 | if not mode or (mode and mode == "-"): | ||
| 367 | return None | ||
| 368 | else: | ||
| 369 | return int(mode,8) | ||
| 370 | |||
| 371 | # Note uid/gid -1 has special significance in os.lchown | ||
| 372 | def _procuid(self, uid): | ||
| 373 | if uid is None or uid == "-": | ||
| 374 | return -1 | ||
| 375 | elif uid.isdigit(): | ||
| 376 | return int(uid) | ||
| 377 | else: | ||
| 378 | return pwd.getpwnam(uid).pw_uid | ||
| 379 | |||
| 380 | def _procgid(self, gid): | ||
| 381 | if gid is None or gid == "-": | ||
| 382 | return -1 | ||
| 383 | elif gid.isdigit(): | ||
| 384 | return int(gid) | ||
| 385 | else: | ||
| 386 | return grp.getgrnam(gid).gr_gid | ||
| 387 | |||
| 388 | # Use for debugging the entries | ||
| 389 | def __str__(self): | ||
| 390 | if self.link: | ||
| 391 | return "%s link %s" % (self.path, self.link) | ||
| 392 | else: | ||
| 393 | mode = "-" | ||
| 394 | if self.mode: | ||
| 395 | mode = "0%o" % self.mode | ||
| 396 | fmode = "-" | ||
| 397 | if self.fmode: | ||
| 398 | fmode = "0%o" % self.fmode | ||
| 399 | uid = self._mapugid(self.uid) | ||
| 400 | gid = self._mapugid(self.gid) | ||
| 401 | fuid = self._mapugid(self.fuid) | ||
| 402 | fgid = self._mapugid(self.fgid) | ||
| 403 | return "%s %s %s %s %s %s %s %s" % (self.path, mode, uid, gid, self.walk, fmode, fuid, fgid) | ||
| 404 | |||
| 405 | def _mapugid(self, id): | ||
| 406 | if id is None or id == -1: | ||
| 407 | return "-" | ||
| 408 | else: | ||
| 409 | return "%d" % id | ||
| 410 | |||
| 411 | # Fix the permission, owner and group of path | ||
| 412 | def fix_perms(path, mode, uid, gid, dir): | ||
| 413 | if mode and not os.path.islink(path): | ||
| 414 | #bb.note("Fixup Perms: chmod 0%o %s" % (mode, dir)) | ||
| 415 | os.chmod(path, mode) | ||
| 416 | # -1 is a special value that means don't change the uid/gid | ||
| 417 | # if they are BOTH -1, don't bother to lchown | ||
| 418 | if not (uid == -1 and gid == -1): | ||
| 419 | #bb.note("Fixup Perms: lchown %d:%d %s" % (uid, gid, dir)) | ||
| 420 | os.lchown(path, uid, gid) | ||
| 421 | |||
| 422 | # Return a list of configuration files based on either the default | ||
| 423 | # files/fs-perms.txt or the contents of FILESYSTEM_PERMS_TABLES | ||
| 424 | # paths are resolved via BBPATH | ||
| 425 | def get_fs_perms_list(d): | ||
| 426 | str = "" | ||
| 427 | bbpath = d.getVar('BBPATH') | ||
| 428 | fs_perms_tables = d.getVar('FILESYSTEM_PERMS_TABLES') or "" | ||
| 429 | for conf_file in fs_perms_tables.split(): | ||
| 430 | confpath = bb.utils.which(bbpath, conf_file) | ||
| 431 | if confpath: | ||
| 432 | str += " %s" % bb.utils.which(bbpath, conf_file) | ||
| 433 | else: | ||
| 434 | bb.warn("cannot find %s specified in FILESYSTEM_PERMS_TABLES" % conf_file) | ||
| 435 | return str | ||
| 436 | |||
| 437 | fs_perms_table = {} | ||
| 438 | fs_link_table = {} | ||
| 439 | |||
| 440 | # By default all of the standard directories specified in | ||
| 441 | # bitbake.conf will get 0755 root:root. | ||
| 442 | target_path_vars = [ 'base_prefix', | ||
| 443 | 'prefix', | ||
| 444 | 'exec_prefix', | ||
| 445 | 'base_bindir', | ||
| 446 | 'base_sbindir', | ||
| 447 | 'base_libdir', | ||
| 448 | 'datadir', | ||
| 449 | 'sysconfdir', | ||
| 450 | 'servicedir', | ||
| 451 | 'sharedstatedir', | ||
| 452 | 'localstatedir', | ||
| 453 | 'infodir', | ||
| 454 | 'mandir', | ||
| 455 | 'docdir', | ||
| 456 | 'bindir', | ||
| 457 | 'sbindir', | ||
| 458 | 'libexecdir', | ||
| 459 | 'libdir', | ||
| 460 | 'includedir' ] | ||
| 461 | |||
| 462 | for path in target_path_vars: | ||
| 463 | dir = d.getVar(path) or "" | ||
| 464 | if dir == "": | ||
| 465 | continue | ||
| 466 | fs_perms_table[dir] = fs_perms_entry(d.expand("%s 0755 root root false - - -" % (dir))) | ||
| 467 | |||
| 468 | # Now we actually load from the configuration files | ||
| 469 | for conf in get_fs_perms_list(d).split(): | ||
| 470 | if not os.path.exists(conf): | ||
| 471 | continue | ||
| 472 | with open(conf) as f: | ||
| 473 | for line in f: | ||
| 474 | if line.startswith('#'): | ||
| 475 | continue | ||
| 476 | lsplit = line.split() | ||
| 477 | if len(lsplit) == 0: | ||
| 478 | continue | ||
| 479 | if len(lsplit) != 8 and not (len(lsplit) == 3 and lsplit[1].lower() == "link"): | ||
| 480 | msg = "Fixup perms: %s invalid line: %s" % (conf, line) | ||
| 481 | oe.qa.handle_error("perm-line", msg, d) | ||
| 482 | continue | ||
| 483 | entry = fs_perms_entry(d.expand(line)) | ||
| 484 | if entry and entry.path: | ||
| 485 | if entry.link: | ||
| 486 | fs_link_table[entry.path] = entry | ||
| 487 | if entry.path in fs_perms_table: | ||
| 488 | fs_perms_table.pop(entry.path) | ||
| 489 | else: | ||
| 490 | fs_perms_table[entry.path] = entry | ||
| 491 | if entry.path in fs_link_table: | ||
| 492 | fs_link_table.pop(entry.path) | ||
| 493 | |||
| 494 | # Debug -- list out in-memory table | ||
| 495 | #for dir in fs_perms_table: | ||
| 496 | # bb.note("Fixup Perms: %s: %s" % (dir, str(fs_perms_table[dir]))) | ||
| 497 | #for link in fs_link_table: | ||
| 498 | # bb.note("Fixup Perms: %s: %s" % (link, str(fs_link_table[link]))) | ||
| 499 | |||
| 500 | # We process links first, so we can go back and fixup directory ownership | ||
| 501 | # for any newly created directories | ||
| 502 | # Process in sorted order so /run gets created before /run/lock, etc. | ||
| 503 | for entry in sorted(fs_link_table.values(), key=lambda x: x.link): | ||
| 504 | link = entry.link | ||
| 505 | dir = entry.path | ||
| 506 | origin = dvar + dir | ||
| 507 | if not (cpath.exists(origin) and cpath.isdir(origin) and not cpath.islink(origin)): | ||
| 508 | continue | ||
| 509 | |||
| 510 | if link[0] == "/": | ||
| 511 | target = dvar + link | ||
| 512 | ptarget = link | ||
| 513 | else: | ||
| 514 | target = os.path.join(os.path.dirname(origin), link) | ||
| 515 | ptarget = os.path.join(os.path.dirname(dir), link) | ||
| 516 | if os.path.exists(target): | ||
| 517 | msg = "Fixup Perms: Unable to correct directory link, target already exists: %s -> %s" % (dir, ptarget) | ||
| 518 | oe.qa.handle_error("perm-link", msg, d) | ||
| 519 | continue | ||
| 520 | |||
| 521 | # Create path to move directory to, move it, and then setup the symlink | ||
| 522 | bb.utils.mkdirhier(os.path.dirname(target)) | ||
| 523 | #bb.note("Fixup Perms: Rename %s -> %s" % (dir, ptarget)) | ||
| 524 | bb.utils.rename(origin, target) | ||
| 525 | #bb.note("Fixup Perms: Link %s -> %s" % (dir, link)) | ||
| 526 | os.symlink(link, origin) | ||
| 527 | |||
| 528 | for dir in fs_perms_table: | ||
| 529 | origin = dvar + dir | ||
| 530 | if not (cpath.exists(origin) and cpath.isdir(origin)): | ||
| 531 | continue | ||
| 532 | |||
| 533 | fix_perms(origin, fs_perms_table[dir].mode, fs_perms_table[dir].uid, fs_perms_table[dir].gid, dir) | ||
| 534 | |||
| 535 | if fs_perms_table[dir].walk == 'true': | ||
| 536 | for root, dirs, files in os.walk(origin): | ||
| 537 | for dr in dirs: | ||
| 538 | each_dir = os.path.join(root, dr) | ||
| 539 | fix_perms(each_dir, fs_perms_table[dir].mode, fs_perms_table[dir].uid, fs_perms_table[dir].gid, dir) | ||
| 540 | for f in files: | ||
| 541 | each_file = os.path.join(root, f) | ||
| 542 | fix_perms(each_file, fs_perms_table[dir].fmode, fs_perms_table[dir].fuid, fs_perms_table[dir].fgid, dir) | ||
| 543 | |||
| 544 | # Get a list of files from file vars by searching files under current working directory | ||
| 545 | # The list contains symlinks, directories and normal files. | ||
| 546 | def files_from_filevars(filevars): | ||
| 547 | cpath = oe.cachedpath.CachedPath() | ||
| 548 | files = [] | ||
| 549 | for f in filevars: | ||
| 550 | if os.path.isabs(f): | ||
| 551 | f = '.' + f | ||
| 552 | if not f.startswith("./"): | ||
| 553 | f = './' + f | ||
| 554 | globbed = glob.glob(f, recursive=True) | ||
| 555 | if globbed: | ||
| 556 | if [ f ] != globbed: | ||
| 557 | files += globbed | ||
| 558 | continue | ||
| 559 | files.append(f) | ||
| 560 | |||
| 561 | symlink_paths = [] | ||
| 562 | for ind, f in enumerate(files): | ||
| 563 | # Handle directory symlinks. Truncate path to the lowest level symlink | ||
| 564 | parent = '' | ||
| 565 | for dirname in f.split('/')[:-1]: | ||
| 566 | parent = os.path.join(parent, dirname) | ||
| 567 | if dirname == '.': | ||
| 568 | continue | ||
| 569 | if cpath.islink(parent): | ||
| 570 | bb.warn("FILES contains file '%s' which resides under a " | ||
| 571 | "directory symlink. Please fix the recipe and use the " | ||
| 572 | "real path for the file." % f[1:]) | ||
| 573 | symlink_paths.append(f) | ||
| 574 | files[ind] = parent | ||
| 575 | f = parent | ||
| 576 | break | ||
| 577 | |||
| 578 | if not cpath.islink(f): | ||
| 579 | if cpath.isdir(f): | ||
| 580 | newfiles = [ os.path.join(f,x) for x in os.listdir(f) ] | ||
| 581 | if newfiles: | ||
| 582 | files += newfiles | ||
| 583 | |||
| 584 | return files, symlink_paths | ||
| 585 | |||
| 586 | # Called in package_<rpm,ipk,deb>.bbclass to get the correct list of configuration files | ||
| 587 | def get_conffiles(pkg, d): | ||
| 588 | pkgdest = d.getVar('PKGDEST') | ||
| 589 | root = os.path.join(pkgdest, pkg) | ||
| 590 | cwd = os.getcwd() | ||
| 591 | os.chdir(root) | ||
| 592 | |||
| 593 | conffiles = d.getVar('CONFFILES:%s' % pkg); | ||
| 594 | if conffiles == None: | ||
| 595 | conffiles = d.getVar('CONFFILES') | ||
| 596 | if conffiles == None: | ||
| 597 | conffiles = "" | ||
| 598 | conffiles = conffiles.split() | ||
| 599 | conf_orig_list = files_from_filevars(conffiles)[0] | ||
| 600 | |||
| 601 | # Remove links and directories from conf_orig_list to get conf_list which only contains normal files | ||
| 602 | conf_list = [] | ||
| 603 | for f in conf_orig_list: | ||
| 604 | if os.path.isdir(f): | ||
| 605 | continue | ||
| 606 | if os.path.islink(f): | ||
| 607 | continue | ||
| 608 | if not os.path.exists(f): | ||
| 609 | continue | ||
| 610 | conf_list.append(f) | ||
| 611 | |||
| 612 | # Remove the leading './' | ||
| 613 | for i in range(0, len(conf_list)): | ||
| 614 | conf_list[i] = conf_list[i][1:] | ||
| 615 | |||
| 616 | os.chdir(cwd) | ||
| 617 | return sorted(conf_list) | ||
| 618 | |||
| 619 | def legitimize_package_name(s): | ||
| 620 | """ | ||
| 621 | Make sure package names are legitimate strings | ||
| 622 | """ | ||
| 623 | |||
| 624 | def fixutf(m): | ||
| 625 | cp = m.group(1) | ||
| 626 | if cp: | ||
| 627 | return ('\\u%s' % cp).encode('latin-1').decode('unicode_escape') | ||
| 628 | |||
| 629 | # Handle unicode codepoints encoded as <U0123>, as in glibc locale files. | ||
| 630 | s = re.sub(r'<U([0-9A-Fa-f]{1,4})>', fixutf, s) | ||
| 631 | |||
| 632 | # Remaining package name validity fixes | ||
| 633 | return s.lower().replace('_', '-').replace('@', '+').replace(',', '+').replace('/', '-') | ||
| 634 | |||
| 635 | def split_locales(d): | ||
| 636 | cpath = oe.cachedpath.CachedPath() | ||
| 637 | if (d.getVar('PACKAGE_NO_LOCALE') == '1'): | ||
| 638 | bb.debug(1, "package requested not splitting locales") | ||
| 639 | return | ||
| 640 | |||
| 641 | packages = (d.getVar('PACKAGES') or "").split() | ||
| 642 | |||
| 643 | dvar = d.getVar('PKGD') | ||
| 644 | pn = d.getVar('LOCALEBASEPN') | ||
| 645 | |||
| 646 | try: | ||
| 647 | locale_index = packages.index(pn + '-locale') | ||
| 648 | packages.pop(locale_index) | ||
| 649 | except ValueError: | ||
| 650 | locale_index = len(packages) | ||
| 651 | |||
| 652 | localepaths = [] | ||
| 653 | locales = set() | ||
| 654 | for localepath in (d.getVar('LOCALE_PATHS') or "").split(): | ||
| 655 | localedir = dvar + localepath | ||
| 656 | if not cpath.isdir(localedir): | ||
| 657 | bb.debug(1, 'No locale files in %s' % localepath) | ||
| 658 | continue | ||
| 659 | |||
| 660 | localepaths.append(localepath) | ||
| 661 | with os.scandir(localedir) as it: | ||
| 662 | for entry in it: | ||
| 663 | if entry.is_dir(): | ||
| 664 | locales.add(entry.name) | ||
| 665 | |||
| 666 | if len(locales) == 0: | ||
| 667 | bb.debug(1, "No locale files in this package") | ||
| 668 | return | ||
| 669 | |||
| 670 | summary = d.getVar('SUMMARY') or pn | ||
| 671 | description = d.getVar('DESCRIPTION') or "" | ||
| 672 | locale_section = d.getVar('LOCALE_SECTION') | ||
| 673 | mlprefix = d.getVar('MLPREFIX') or "" | ||
| 674 | for l in sorted(locales): | ||
| 675 | ln = legitimize_package_name(l) | ||
| 676 | pkg = pn + '-locale-' + ln | ||
| 677 | packages.insert(locale_index, pkg) | ||
| 678 | locale_index += 1 | ||
| 679 | files = [] | ||
| 680 | for localepath in localepaths: | ||
| 681 | files.append(os.path.join(localepath, l)) | ||
| 682 | d.setVar('FILES:' + pkg, " ".join(files)) | ||
| 683 | d.setVar('RRECOMMENDS:' + pkg, '%svirtual-locale-%s' % (mlprefix, ln)) | ||
| 684 | d.setVar('RPROVIDES:' + pkg, '%s-locale %s%s-translation' % (pn, mlprefix, ln)) | ||
| 685 | d.setVar('SUMMARY:' + pkg, '%s - %s translations' % (summary, l)) | ||
| 686 | d.setVar('DESCRIPTION:' + pkg, '%s This package contains language translation files for the %s locale.' % (description, l)) | ||
| 687 | if locale_section: | ||
| 688 | d.setVar('SECTION:' + pkg, locale_section) | ||
| 689 | |||
| 690 | d.setVar('PACKAGES', ' '.join(packages)) | ||
| 691 | |||
| 692 | # Disabled by RP 18/06/07 | ||
| 693 | # Wildcards aren't supported in debian | ||
| 694 | # They break with ipkg since glibc-locale* will mean that | ||
| 695 | # glibc-localedata-translit* won't install as a dependency | ||
| 696 | # for some other package which breaks meta-toolchain | ||
| 697 | # Probably breaks since virtual-locale- isn't provided anywhere | ||
| 698 | #rdep = (d.getVar('RDEPENDS:%s' % pn) or "").split() | ||
| 699 | #rdep.append('%s-locale*' % pn) | ||
| 700 | #d.setVar('RDEPENDS:%s' % pn, ' '.join(rdep)) | ||
| 701 | |||
| 702 | def package_debug_vars(d): | ||
| 703 | # We default to '.debug' style | ||
| 704 | if d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-file-directory': | ||
| 705 | # Single debug-file-directory style debug info | ||
| 706 | debug_vars = { | ||
| 707 | "append": ".debug", | ||
| 708 | "staticappend": "", | ||
| 709 | "dir": "", | ||
| 710 | "staticdir": "", | ||
| 711 | "libdir": "/usr/lib/debug", | ||
| 712 | "staticlibdir": "/usr/lib/debug-static", | ||
| 713 | "srcdir": "/usr/src/debug", | ||
| 714 | } | ||
| 715 | elif d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-without-src': | ||
| 716 | # Original OE-core, a.k.a. ".debug", style debug info, but without sources in /usr/src/debug | ||
| 717 | debug_vars = { | ||
| 718 | "append": "", | ||
| 719 | "staticappend": "", | ||
| 720 | "dir": "/.debug", | ||
| 721 | "staticdir": "/.debug-static", | ||
| 722 | "libdir": "", | ||
| 723 | "staticlibdir": "", | ||
| 724 | "srcdir": "", | ||
| 725 | } | ||
| 726 | elif d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-with-srcpkg': | ||
| 727 | debug_vars = { | ||
| 728 | "append": "", | ||
| 729 | "staticappend": "", | ||
| 730 | "dir": "/.debug", | ||
| 731 | "staticdir": "/.debug-static", | ||
| 732 | "libdir": "", | ||
| 733 | "staticlibdir": "", | ||
| 734 | "srcdir": "/usr/src/debug", | ||
| 735 | } | ||
| 736 | else: | ||
| 737 | # Original OE-core, a.k.a. ".debug", style debug info | ||
| 738 | debug_vars = { | ||
| 739 | "append": "", | ||
| 740 | "staticappend": "", | ||
| 741 | "dir": "/.debug", | ||
| 742 | "staticdir": "/.debug-static", | ||
| 743 | "libdir": "", | ||
| 744 | "staticlibdir": "", | ||
| 745 | "srcdir": "/usr/src/debug", | ||
| 746 | } | ||
| 747 | |||
| 748 | return debug_vars | ||
| 749 | |||
| 750 | |||
| 751 | def parse_debugsources_from_dwarfsrcfiles_output(dwarfsrcfiles_output): | ||
| 752 | debugfiles = {} | ||
| 753 | |||
| 754 | for line in dwarfsrcfiles_output.splitlines(): | ||
| 755 | if line.startswith("\t"): | ||
| 756 | debugfiles[os.path.normpath(line.split()[0])] = "" | ||
| 757 | |||
| 758 | return debugfiles.keys() | ||
| 759 | |||
| 760 | def source_info(file, d, fatal=True): | ||
| 761 | cmd = ["dwarfsrcfiles", file] | ||
| 762 | try: | ||
| 763 | output = subprocess.check_output(cmd, universal_newlines=True, stderr=subprocess.STDOUT) | ||
| 764 | retval = 0 | ||
| 765 | except subprocess.CalledProcessError as exc: | ||
| 766 | output = exc.output | ||
| 767 | retval = exc.returncode | ||
| 768 | |||
| 769 | # 255 means a specific file wasn't fully parsed to get the debug file list, which is not a fatal failure | ||
| 770 | if retval != 0 and retval != 255: | ||
| 771 | msg = "dwarfsrcfiles failed with exit code %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else "") | ||
| 772 | if fatal: | ||
| 773 | bb.fatal(msg) | ||
| 774 | bb.note(msg) | ||
| 775 | |||
| 776 | debugsources = parse_debugsources_from_dwarfsrcfiles_output(output) | ||
| 777 | |||
| 778 | return list(debugsources) | ||
| 779 | |||
| 780 | def splitdebuginfo(file, dvar, dv, d): | ||
| 781 | # Function to split a single file into two components, one is the stripped | ||
| 782 | # target system binary, the other contains any debugging information. The | ||
| 783 | # two files are linked to reference each other. | ||
| 784 | # | ||
| 785 | # return a mapping of files:debugsources | ||
| 786 | |||
| 787 | src = file[len(dvar):] | ||
| 788 | dest = dv["libdir"] + os.path.dirname(src) + dv["dir"] + "/" + os.path.basename(src) + dv["append"] | ||
| 789 | debugfile = dvar + dest | ||
| 790 | sources = [] | ||
| 791 | |||
| 792 | if file.endswith(".ko") and file.find("/lib/modules/") != -1: | ||
| 793 | if oe.package.is_kernel_module_signed(file): | ||
| 794 | bb.debug(1, "Skip strip on signed module %s" % file) | ||
| 795 | return (file, sources) | ||
| 796 | |||
| 797 | # Split the file... | ||
| 798 | bb.utils.mkdirhier(os.path.dirname(debugfile)) | ||
| 799 | #bb.note("Split %s -> %s" % (file, debugfile)) | ||
| 800 | # Only store off the hard link reference if we successfully split! | ||
| 801 | |||
| 802 | dvar = d.getVar('PKGD') | ||
| 803 | objcopy = d.getVar("OBJCOPY") | ||
| 804 | |||
| 805 | newmode = None | ||
| 806 | if not os.access(file, os.W_OK) or os.access(file, os.R_OK): | ||
| 807 | origmode = os.stat(file)[stat.ST_MODE] | ||
| 808 | newmode = origmode | stat.S_IWRITE | stat.S_IREAD | ||
| 809 | os.chmod(file, newmode) | ||
| 810 | |||
| 811 | # We need to extract the debug src information here... | ||
| 812 | if dv["srcdir"]: | ||
| 813 | sources = source_info(file, d) | ||
| 814 | |||
| 815 | bb.utils.mkdirhier(os.path.dirname(debugfile)) | ||
| 816 | |||
| 817 | subprocess.check_output([objcopy, '--only-keep-debug', file, debugfile], stderr=subprocess.STDOUT) | ||
| 818 | |||
| 819 | # Set the debuglink to have the view of the file path on the target | ||
| 820 | subprocess.check_output([objcopy, '--add-gnu-debuglink', debugfile, file], stderr=subprocess.STDOUT) | ||
| 821 | |||
| 822 | if newmode: | ||
| 823 | os.chmod(file, origmode) | ||
| 824 | |||
| 825 | return (file, sources) | ||
| 826 | |||
| 827 | def splitstaticdebuginfo(file, dvar, dv, d): | ||
| 828 | # Unlike the function above, there is no way to split a static library | ||
| 829 | # two components. So to get similar results we will copy the unmodified | ||
| 830 | # static library (containing the debug symbols) into a new directory. | ||
| 831 | # We will then strip (preserving symbols) the static library in the | ||
| 832 | # typical location. | ||
| 833 | # | ||
| 834 | # return a mapping of files:debugsources | ||
| 835 | |||
| 836 | src = file[len(dvar):] | ||
| 837 | dest = dv["staticlibdir"] + os.path.dirname(src) + dv["staticdir"] + "/" + os.path.basename(src) + dv["staticappend"] | ||
| 838 | debugfile = dvar + dest | ||
| 839 | sources = [] | ||
| 840 | |||
| 841 | # Copy the file... | ||
| 842 | bb.utils.mkdirhier(os.path.dirname(debugfile)) | ||
| 843 | #bb.note("Copy %s -> %s" % (file, debugfile)) | ||
| 844 | |||
| 845 | dvar = d.getVar('PKGD') | ||
| 846 | |||
| 847 | newmode = None | ||
| 848 | if not os.access(file, os.W_OK) or os.access(file, os.R_OK): | ||
| 849 | origmode = os.stat(file)[stat.ST_MODE] | ||
| 850 | newmode = origmode | stat.S_IWRITE | stat.S_IREAD | ||
| 851 | os.chmod(file, newmode) | ||
| 852 | |||
| 853 | # We need to extract the debug src information here... | ||
| 854 | if dv["srcdir"]: | ||
| 855 | sources = source_info(file, d) | ||
| 856 | |||
| 857 | bb.utils.mkdirhier(os.path.dirname(debugfile)) | ||
| 858 | |||
| 859 | # Copy the unmodified item to the debug directory | ||
| 860 | shutil.copy2(file, debugfile) | ||
| 861 | |||
| 862 | if newmode: | ||
| 863 | os.chmod(file, origmode) | ||
| 864 | |||
| 865 | return (file, sources) | ||
| 866 | |||
| 867 | def inject_minidebuginfo(file, dvar, dv, d): | ||
| 868 | # Extract just the symbols from debuginfo into minidebuginfo, | ||
| 869 | # compress it with xz and inject it back into the binary in a .gnu_debugdata section. | ||
| 870 | # https://sourceware.org/gdb/onlinedocs/gdb/MiniDebugInfo.html | ||
| 871 | |||
| 872 | readelf = d.getVar('READELF') | ||
| 873 | nm = d.getVar('NM') | ||
| 874 | objcopy = d.getVar('OBJCOPY') | ||
| 875 | |||
| 876 | minidebuginfodir = d.expand('${WORKDIR}/minidebuginfo') | ||
| 877 | |||
| 878 | src = file[len(dvar):] | ||
| 879 | dest = dv["libdir"] + os.path.dirname(src) + dv["dir"] + "/" + os.path.basename(src) + dv["append"] | ||
| 880 | debugfile = dvar + dest | ||
| 881 | minidebugfile = minidebuginfodir + src + '.minidebug' | ||
| 882 | bb.utils.mkdirhier(os.path.dirname(minidebugfile)) | ||
| 883 | |||
| 884 | # If we didn't produce debuginfo for any reason, we can't produce minidebuginfo either | ||
| 885 | # so skip it. | ||
| 886 | if not os.path.exists(debugfile): | ||
| 887 | bb.debug(1, 'ELF file {} has no debuginfo, skipping minidebuginfo injection'.format(file)) | ||
| 888 | return | ||
| 889 | |||
| 890 | # minidebuginfo does not make sense to apply to ELF objects other than | ||
| 891 | # executables and shared libraries, skip applying the minidebuginfo | ||
| 892 | # generation for objects like kernel modules. | ||
| 893 | for line in subprocess.check_output([readelf, '-h', debugfile], universal_newlines=True).splitlines(): | ||
| 894 | if not line.strip().startswith("Type:"): | ||
| 895 | continue | ||
| 896 | elftype = line.split(":")[1].strip() | ||
| 897 | if not any(elftype.startswith(i) for i in ["EXEC", "DYN"]): | ||
| 898 | bb.debug(1, 'ELF file {} is not executable/shared, skipping minidebuginfo injection'.format(file)) | ||
| 899 | return | ||
| 900 | break | ||
| 901 | |||
| 902 | # Find non-allocated PROGBITS, NOTE, and NOBITS sections in the debuginfo. | ||
| 903 | # We will exclude all of these from minidebuginfo to save space. | ||
| 904 | remove_section_names = [] | ||
| 905 | for line in subprocess.check_output([readelf, '-W', '-S', debugfile], universal_newlines=True).splitlines(): | ||
| 906 | # strip the leading " [ 1]" section index to allow splitting on space | ||
| 907 | if ']' not in line: | ||
| 908 | continue | ||
| 909 | fields = line[line.index(']') + 1:].split() | ||
| 910 | if len(fields) < 7: | ||
| 911 | continue | ||
| 912 | name = fields[0] | ||
| 913 | type = fields[1] | ||
| 914 | flags = fields[6] | ||
| 915 | # .debug_ sections will be removed by objcopy -S so no need to explicitly remove them | ||
| 916 | if name.startswith('.debug_'): | ||
| 917 | continue | ||
| 918 | if 'A' not in flags and type in ['PROGBITS', 'NOTE', 'NOBITS']: | ||
| 919 | remove_section_names.append(name) | ||
| 920 | |||
| 921 | # List dynamic symbols in the binary. We can exclude these from minidebuginfo | ||
| 922 | # because they are always present in the binary. | ||
| 923 | dynsyms = set() | ||
| 924 | for line in subprocess.check_output([nm, '-D', file, '--format=posix', '--defined-only'], universal_newlines=True).splitlines(): | ||
| 925 | dynsyms.add(line.split()[0]) | ||
| 926 | |||
| 927 | # Find all function symbols from debuginfo which aren't in the dynamic symbols table. | ||
| 928 | # These are the ones we want to keep in minidebuginfo. | ||
| 929 | keep_symbols_file = minidebugfile + '.symlist' | ||
| 930 | found_any_symbols = False | ||
| 931 | with open(keep_symbols_file, 'w') as f: | ||
| 932 | for line in subprocess.check_output([nm, debugfile, '--format=sysv', '--defined-only'], universal_newlines=True).splitlines(): | ||
| 933 | fields = line.split('|') | ||
| 934 | if len(fields) < 7: | ||
| 935 | continue | ||
| 936 | name = fields[0].strip() | ||
| 937 | type = fields[3].strip() | ||
| 938 | if type == 'FUNC' and name not in dynsyms: | ||
| 939 | f.write('{}\n'.format(name)) | ||
| 940 | found_any_symbols = True | ||
| 941 | |||
| 942 | if not found_any_symbols: | ||
| 943 | bb.debug(1, 'ELF file {} contains no symbols, skipping minidebuginfo injection'.format(file)) | ||
| 944 | return | ||
| 945 | |||
| 946 | bb.utils.remove(minidebugfile) | ||
| 947 | bb.utils.remove(minidebugfile + '.xz') | ||
| 948 | |||
| 949 | subprocess.check_call([objcopy, '-S'] + | ||
| 950 | ['--remove-section={}'.format(s) for s in remove_section_names] + | ||
| 951 | ['--keep-symbols={}'.format(keep_symbols_file), debugfile, minidebugfile]) | ||
| 952 | |||
| 953 | subprocess.check_call(['xz', '--keep', minidebugfile]) | ||
| 954 | |||
| 955 | subprocess.check_call([objcopy, '--add-section', '.gnu_debugdata={}.xz'.format(minidebugfile), file]) | ||
| 956 | |||
| 957 | def copydebugsources(debugsrcdir, sources, d): | ||
| 958 | # The debug src information written out to sourcefile is further processed | ||
| 959 | # and copied to the destination here. | ||
| 960 | |||
| 961 | cpath = oe.cachedpath.CachedPath() | ||
| 962 | |||
| 963 | if debugsrcdir and sources: | ||
| 964 | sourcefile = d.expand("${WORKDIR}/debugsources.list") | ||
| 965 | bb.utils.remove(sourcefile) | ||
| 966 | |||
| 967 | # filenames are null-separated - this is an artefact of the previous use | ||
| 968 | # of rpm's debugedit, which was writing them out that way, and the code elsewhere | ||
| 969 | # is still assuming that. | ||
| 970 | debuglistoutput = '\0'.join(sources) + '\0' | ||
| 971 | with open(sourcefile, 'a') as sf: | ||
| 972 | sf.write(debuglistoutput) | ||
| 973 | |||
| 974 | dvar = d.getVar('PKGD') | ||
| 975 | strip = d.getVar("STRIP") | ||
| 976 | objcopy = d.getVar("OBJCOPY") | ||
| 977 | workdir = d.getVar("WORKDIR") | ||
| 978 | sdir = d.getVar("S") | ||
| 979 | cflags = d.expand("${CFLAGS}") | ||
| 980 | |||
| 981 | prefixmap = {} | ||
| 982 | for flag in cflags.split(): | ||
| 983 | if not flag.startswith("-fdebug-prefix-map"): | ||
| 984 | continue | ||
| 985 | if "recipe-sysroot" in flag: | ||
| 986 | continue | ||
| 987 | flag = flag.split("=") | ||
| 988 | prefixmap[flag[1]] = flag[2] | ||
| 989 | |||
| 990 | nosuchdir = [] | ||
| 991 | basepath = dvar | ||
| 992 | for p in debugsrcdir.split("/"): | ||
| 993 | basepath = basepath + "/" + p | ||
| 994 | if not cpath.exists(basepath): | ||
| 995 | nosuchdir.append(basepath) | ||
| 996 | bb.utils.mkdirhier(basepath) | ||
| 997 | cpath.updatecache(basepath) | ||
| 998 | |||
| 999 | for pmap in prefixmap: | ||
| 1000 | # Ignore files from the recipe sysroots (target and native) | ||
| 1001 | cmd = "LC_ALL=C ; sort -z -u '%s' | egrep -v -z '((<internal>|<built-in>)$|/.*recipe-sysroot.*/)' | " % sourcefile | ||
| 1002 | # We need to ignore files that are not actually ours | ||
| 1003 | # we do this by only paying attention to items from this package | ||
| 1004 | cmd += "fgrep -zw '%s' | " % prefixmap[pmap] | ||
| 1005 | # Remove prefix in the source paths | ||
| 1006 | cmd += "sed 's#%s/##g' | " % (prefixmap[pmap]) | ||
| 1007 | cmd += "(cd '%s' ; cpio -pd0mlL --no-preserve-owner '%s%s' 2>/dev/null)" % (pmap, dvar, prefixmap[pmap]) | ||
| 1008 | |||
| 1009 | try: | ||
| 1010 | subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT) | ||
| 1011 | except subprocess.CalledProcessError: | ||
| 1012 | # Can "fail" if internal headers/transient sources are attempted | ||
| 1013 | pass | ||
| 1014 | # cpio seems to have a bug with -lL together and symbolic links are just copied, not dereferenced. | ||
| 1015 | # Work around this by manually finding and copying any symbolic links that made it through. | ||
| 1016 | cmd = "find %s%s -type l -print0 -delete | sed s#%s%s/##g | (cd '%s' ; cpio -pd0mL --no-preserve-owner '%s%s')" % \ | ||
| 1017 | (dvar, prefixmap[pmap], dvar, prefixmap[pmap], pmap, dvar, prefixmap[pmap]) | ||
| 1018 | subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT) | ||
| 1019 | |||
| 1020 | # debugsources.list may be polluted from the host if we used externalsrc, | ||
| 1021 | # cpio uses copy-pass and may have just created a directory structure | ||
| 1022 | # matching the one from the host, if thats the case move those files to | ||
| 1023 | # debugsrcdir to avoid host contamination. | ||
| 1024 | # Empty dir structure will be deleted in the next step. | ||
| 1025 | |||
| 1026 | # Same check as above for externalsrc | ||
| 1027 | if workdir not in sdir: | ||
| 1028 | if os.path.exists(dvar + debugsrcdir + sdir): | ||
| 1029 | cmd = "mv %s%s%s/* %s%s" % (dvar, debugsrcdir, sdir, dvar,debugsrcdir) | ||
| 1030 | subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT) | ||
| 1031 | |||
| 1032 | # The copy by cpio may have resulted in some empty directories! Remove these | ||
| 1033 | cmd = "find %s%s -empty -type d -delete" % (dvar, debugsrcdir) | ||
| 1034 | subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT) | ||
| 1035 | |||
| 1036 | # Also remove debugsrcdir if its empty | ||
| 1037 | for p in nosuchdir[::-1]: | ||
| 1038 | if os.path.exists(p) and not os.listdir(p): | ||
| 1039 | os.rmdir(p) | ||
| 1040 | |||
| 1041 | |||
| 1042 | def process_split_and_strip_files(d): | ||
| 1043 | cpath = oe.cachedpath.CachedPath() | ||
| 1044 | |||
| 1045 | dvar = d.getVar('PKGD') | ||
| 1046 | pn = d.getVar('PN') | ||
| 1047 | hostos = d.getVar('HOST_OS') | ||
| 1048 | |||
| 1049 | oldcwd = os.getcwd() | ||
| 1050 | os.chdir(dvar) | ||
| 1051 | |||
| 1052 | dv = package_debug_vars(d) | ||
| 1053 | |||
| 1054 | # | ||
| 1055 | # First lets figure out all of the files we may have to process ... do this only once! | ||
| 1056 | # | ||
| 1057 | elffiles = {} | ||
| 1058 | symlinks = {} | ||
| 1059 | staticlibs = [] | ||
| 1060 | inodes = {} | ||
| 1061 | libdir = os.path.abspath(dvar + os.sep + d.getVar("libdir")) | ||
| 1062 | baselibdir = os.path.abspath(dvar + os.sep + d.getVar("base_libdir")) | ||
| 1063 | skipfiles = (d.getVar("INHIBIT_PACKAGE_STRIP_FILES") or "").split() | ||
| 1064 | if (d.getVar('INHIBIT_PACKAGE_STRIP') != '1' or \ | ||
| 1065 | d.getVar('INHIBIT_PACKAGE_DEBUG_SPLIT') != '1'): | ||
| 1066 | checkelf = {} | ||
| 1067 | checkelflinks = {} | ||
| 1068 | checkstatic = {} | ||
| 1069 | for root, dirs, files in cpath.walk(dvar): | ||
| 1070 | for f in files: | ||
| 1071 | file = os.path.join(root, f) | ||
| 1072 | |||
| 1073 | # Skip debug files | ||
| 1074 | if dv["append"] and file.endswith(dv["append"]): | ||
| 1075 | continue | ||
| 1076 | if dv["dir"] and dv["dir"] in os.path.dirname(file[len(dvar):]): | ||
| 1077 | continue | ||
| 1078 | |||
| 1079 | if file in skipfiles: | ||
| 1080 | continue | ||
| 1081 | |||
| 1082 | try: | ||
| 1083 | ltarget = cpath.realpath(file, dvar, False) | ||
| 1084 | s = cpath.lstat(ltarget) | ||
| 1085 | except OSError as e: | ||
| 1086 | (err, strerror) = e.args | ||
| 1087 | if err != errno.ENOENT: | ||
| 1088 | raise | ||
| 1089 | # Skip broken symlinks | ||
| 1090 | continue | ||
| 1091 | if not s: | ||
| 1092 | continue | ||
| 1093 | |||
| 1094 | if oe.package.is_static_lib(file): | ||
| 1095 | # Use a reference of device ID and inode number to identify files | ||
| 1096 | file_reference = "%d_%d" % (s.st_dev, s.st_ino) | ||
| 1097 | checkstatic[file] = (file, file_reference) | ||
| 1098 | continue | ||
| 1099 | |||
| 1100 | # Check its an executable | ||
| 1101 | if (s[stat.ST_MODE] & stat.S_IXUSR) or (s[stat.ST_MODE] & stat.S_IXGRP) \ | ||
| 1102 | or (s[stat.ST_MODE] & stat.S_IXOTH) \ | ||
| 1103 | or ((file.startswith(libdir) or file.startswith(baselibdir)) \ | ||
| 1104 | and (".so" in f or ".node" in f)) \ | ||
| 1105 | or (f.startswith('vmlinux') or ".ko" in f): | ||
| 1106 | |||
| 1107 | if cpath.islink(file): | ||
| 1108 | checkelflinks[file] = ltarget | ||
| 1109 | continue | ||
| 1110 | # Use a reference of device ID and inode number to identify files | ||
| 1111 | file_reference = "%d_%d" % (s.st_dev, s.st_ino) | ||
| 1112 | checkelf[file] = (file, file_reference) | ||
| 1113 | |||
| 1114 | results = oe.utils.multiprocess_launch(oe.package.is_elf, checkelflinks.values(), d) | ||
| 1115 | results_map = {} | ||
| 1116 | for (ltarget, elf_file) in results: | ||
| 1117 | results_map[ltarget] = elf_file | ||
| 1118 | for file in checkelflinks: | ||
| 1119 | ltarget = checkelflinks[file] | ||
| 1120 | # If it's a symlink, and points to an ELF file, we capture the readlink target | ||
| 1121 | if results_map[ltarget]: | ||
| 1122 | target = os.readlink(file) | ||
| 1123 | #bb.note("Sym: %s (%d)" % (ltarget, results_map[ltarget])) | ||
| 1124 | symlinks[file] = target | ||
| 1125 | |||
| 1126 | results = oe.utils.multiprocess_launch(oe.package.is_elf, checkelf.keys(), d) | ||
| 1127 | |||
| 1128 | # Sort results by file path. This ensures that the files are always | ||
| 1129 | # processed in the same order, which is important to make sure builds | ||
| 1130 | # are reproducible when dealing with hardlinks | ||
| 1131 | results.sort(key=lambda x: x[0]) | ||
| 1132 | |||
| 1133 | for (file, elf_file) in results: | ||
| 1134 | # It's a file (or hardlink), not a link | ||
| 1135 | # ...but is it ELF, and is it already stripped? | ||
| 1136 | if elf_file & 1: | ||
| 1137 | if elf_file & 2: | ||
| 1138 | if 'already-stripped' in (d.getVar('INSANE_SKIP:' + pn) or "").split(): | ||
| 1139 | bb.note("Skipping file %s from %s for already-stripped QA test" % (file[len(dvar):], pn)) | ||
| 1140 | else: | ||
| 1141 | msg = "File '%s' from %s was already stripped, this will prevent future debugging!" % (file[len(dvar):], pn) | ||
| 1142 | oe.qa.handle_error("already-stripped", msg, d) | ||
| 1143 | continue | ||
| 1144 | |||
| 1145 | # At this point we have an unstripped elf file. We need to: | ||
| 1146 | # a) Make sure any file we strip is not hardlinked to anything else outside this tree | ||
| 1147 | # b) Only strip any hardlinked file once (no races) | ||
| 1148 | # c) Track any hardlinks between files so that we can reconstruct matching debug file hardlinks | ||
| 1149 | |||
| 1150 | # Use a reference of device ID and inode number to identify files | ||
| 1151 | file_reference = checkelf[file][1] | ||
| 1152 | if file_reference in inodes: | ||
| 1153 | os.unlink(file) | ||
| 1154 | os.link(inodes[file_reference][0], file) | ||
| 1155 | inodes[file_reference].append(file) | ||
| 1156 | else: | ||
| 1157 | inodes[file_reference] = [file] | ||
| 1158 | # break hardlink | ||
| 1159 | bb.utils.break_hardlinks(file) | ||
| 1160 | elffiles[file] = elf_file | ||
| 1161 | # Modified the file so clear the cache | ||
| 1162 | cpath.updatecache(file) | ||
| 1163 | |||
| 1164 | # Do the same hardlink processing as above, but for static libraries | ||
| 1165 | results = list(checkstatic.keys()) | ||
| 1166 | |||
| 1167 | # As above, sort the results. | ||
| 1168 | results.sort(key=lambda x: x[0]) | ||
| 1169 | |||
| 1170 | for file in results: | ||
| 1171 | # Use a reference of device ID and inode number to identify files | ||
| 1172 | file_reference = checkstatic[file][1] | ||
| 1173 | if file_reference in inodes: | ||
| 1174 | os.unlink(file) | ||
| 1175 | os.link(inodes[file_reference][0], file) | ||
| 1176 | inodes[file_reference].append(file) | ||
| 1177 | else: | ||
| 1178 | inodes[file_reference] = [file] | ||
| 1179 | # break hardlink | ||
| 1180 | bb.utils.break_hardlinks(file) | ||
| 1181 | staticlibs.append(file) | ||
| 1182 | # Modified the file so clear the cache | ||
| 1183 | cpath.updatecache(file) | ||
| 1184 | |||
| 1185 | def strip_pkgd_prefix(f): | ||
| 1186 | nonlocal dvar | ||
| 1187 | |||
| 1188 | if f.startswith(dvar): | ||
| 1189 | return f[len(dvar):] | ||
| 1190 | |||
| 1191 | return f | ||
| 1192 | |||
| 1193 | # | ||
| 1194 | # First lets process debug splitting | ||
| 1195 | # | ||
| 1196 | if (d.getVar('INHIBIT_PACKAGE_DEBUG_SPLIT') != '1'): | ||
| 1197 | results = oe.utils.multiprocess_launch(splitdebuginfo, list(elffiles), d, extraargs=(dvar, dv, d)) | ||
| 1198 | |||
| 1199 | if dv["srcdir"] and not hostos.startswith("mingw"): | ||
| 1200 | if (d.getVar('PACKAGE_DEBUG_STATIC_SPLIT') == '1'): | ||
| 1201 | results = oe.utils.multiprocess_launch(splitstaticdebuginfo, staticlibs, d, extraargs=(dvar, dv, d)) | ||
| 1202 | else: | ||
| 1203 | for file in staticlibs: | ||
| 1204 | results.append( (file,source_info(file, d)) ) | ||
| 1205 | |||
| 1206 | d.setVar("PKGDEBUGSOURCES", {strip_pkgd_prefix(f): sorted(s) for f, s in results}) | ||
| 1207 | |||
| 1208 | sources = set() | ||
| 1209 | for r in results: | ||
| 1210 | sources.update(r[1]) | ||
| 1211 | |||
| 1212 | # Hardlink our debug symbols to the other hardlink copies | ||
| 1213 | for ref in inodes: | ||
| 1214 | if len(inodes[ref]) == 1: | ||
| 1215 | continue | ||
| 1216 | |||
| 1217 | target = inodes[ref][0][len(dvar):] | ||
| 1218 | for file in inodes[ref][1:]: | ||
| 1219 | src = file[len(dvar):] | ||
| 1220 | dest = dv["libdir"] + os.path.dirname(src) + dv["dir"] + "/" + os.path.basename(target) + dv["append"] | ||
| 1221 | fpath = dvar + dest | ||
| 1222 | ftarget = dvar + dv["libdir"] + os.path.dirname(target) + dv["dir"] + "/" + os.path.basename(target) + dv["append"] | ||
| 1223 | if os.access(ftarget, os.R_OK): | ||
| 1224 | bb.utils.mkdirhier(os.path.dirname(fpath)) | ||
| 1225 | # Only one hardlink of separated debug info file in each directory | ||
| 1226 | if not os.access(fpath, os.R_OK): | ||
| 1227 | #bb.note("Link %s -> %s" % (fpath, ftarget)) | ||
| 1228 | os.link(ftarget, fpath) | ||
| 1229 | elif (d.getVar('PACKAGE_DEBUG_STATIC_SPLIT') == '1'): | ||
| 1230 | deststatic = dv["staticlibdir"] + os.path.dirname(src) + dv["staticdir"] + "/" + os.path.basename(file) + dv["staticappend"] | ||
| 1231 | fpath = dvar + deststatic | ||
| 1232 | ftarget = dvar + dv["staticlibdir"] + os.path.dirname(target) + dv["staticdir"] + "/" + os.path.basename(target) + dv["staticappend"] | ||
| 1233 | if os.access(ftarget, os.R_OK): | ||
| 1234 | bb.utils.mkdirhier(os.path.dirname(fpath)) | ||
| 1235 | # Only one hardlink of separated debug info file in each directory | ||
| 1236 | if not os.access(fpath, os.R_OK): | ||
| 1237 | #bb.note("Link %s -> %s" % (fpath, ftarget)) | ||
| 1238 | os.link(ftarget, fpath) | ||
| 1239 | else: | ||
| 1240 | bb.note("Unable to find inode link target %s" % (target)) | ||
| 1241 | |||
| 1242 | # Create symlinks for all cases we were able to split symbols | ||
| 1243 | for file in symlinks: | ||
| 1244 | src = file[len(dvar):] | ||
| 1245 | dest = dv["libdir"] + os.path.dirname(src) + dv["dir"] + "/" + os.path.basename(src) + dv["append"] | ||
| 1246 | fpath = dvar + dest | ||
| 1247 | # Skip it if the target doesn't exist | ||
| 1248 | try: | ||
| 1249 | s = os.stat(fpath) | ||
| 1250 | except OSError as e: | ||
| 1251 | (err, strerror) = e.args | ||
| 1252 | if err != errno.ENOENT: | ||
| 1253 | raise | ||
| 1254 | continue | ||
| 1255 | |||
| 1256 | ltarget = symlinks[file] | ||
| 1257 | lpath = os.path.dirname(ltarget) | ||
| 1258 | lbase = os.path.basename(ltarget) | ||
| 1259 | ftarget = "" | ||
| 1260 | if lpath and lpath != ".": | ||
| 1261 | ftarget += lpath + dv["dir"] + "/" | ||
| 1262 | ftarget += lbase + dv["append"] | ||
| 1263 | if lpath.startswith(".."): | ||
| 1264 | ftarget = os.path.join("..", ftarget) | ||
| 1265 | bb.utils.mkdirhier(os.path.dirname(fpath)) | ||
| 1266 | #bb.note("Symlink %s -> %s" % (fpath, ftarget)) | ||
| 1267 | os.symlink(ftarget, fpath) | ||
| 1268 | |||
| 1269 | # Process the dv["srcdir"] if requested... | ||
| 1270 | # This copies and places the referenced sources for later debugging... | ||
| 1271 | copydebugsources(dv["srcdir"], sources, d) | ||
| 1272 | # | ||
| 1273 | # End of debug splitting | ||
| 1274 | # | ||
| 1275 | |||
| 1276 | # | ||
| 1277 | # Now lets go back over things and strip them | ||
| 1278 | # | ||
| 1279 | if (d.getVar('INHIBIT_PACKAGE_STRIP') != '1'): | ||
| 1280 | strip = d.getVar("STRIP") | ||
| 1281 | sfiles = [] | ||
| 1282 | for file in elffiles: | ||
| 1283 | elf_file = int(elffiles[file]) | ||
| 1284 | #bb.note("Strip %s" % file) | ||
| 1285 | sfiles.append((file, elf_file, strip)) | ||
| 1286 | if (d.getVar('PACKAGE_STRIP_STATIC') == '1' or d.getVar('PACKAGE_DEBUG_STATIC_SPLIT') == '1'): | ||
| 1287 | for f in staticlibs: | ||
| 1288 | sfiles.append((f, 16, strip)) | ||
| 1289 | |||
| 1290 | oe.utils.multiprocess_launch(oe.package.runstrip, sfiles, d) | ||
| 1291 | |||
| 1292 | # Build "minidebuginfo" and reinject it back into the stripped binaries | ||
| 1293 | if bb.utils.contains('DISTRO_FEATURES', 'minidebuginfo', True, False, d): | ||
| 1294 | oe.utils.multiprocess_launch(inject_minidebuginfo, list(elffiles), d, | ||
| 1295 | extraargs=(dvar, dv, d)) | ||
| 1296 | |||
| 1297 | # | ||
| 1298 | # End of strip | ||
| 1299 | # | ||
| 1300 | os.chdir(oldcwd) | ||
| 1301 | |||
| 1302 | |||
| 1303 | def populate_packages(d): | ||
| 1304 | cpath = oe.cachedpath.CachedPath() | ||
| 1305 | |||
| 1306 | workdir = d.getVar('WORKDIR') | ||
| 1307 | outdir = d.getVar('DEPLOY_DIR') | ||
| 1308 | dvar = d.getVar('PKGD') | ||
| 1309 | packages = d.getVar('PACKAGES').split() | ||
| 1310 | pn = d.getVar('PN') | ||
| 1311 | |||
| 1312 | bb.utils.mkdirhier(outdir) | ||
| 1313 | os.chdir(dvar) | ||
| 1314 | |||
| 1315 | autodebug = not (d.getVar("NOAUTOPACKAGEDEBUG") or False) | ||
| 1316 | |||
| 1317 | split_source_package = (d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-with-srcpkg') | ||
| 1318 | |||
| 1319 | # If debug-with-srcpkg mode is enabled then add the source package if it | ||
| 1320 | # doesn't exist and add the source file contents to the source package. | ||
| 1321 | if split_source_package: | ||
| 1322 | src_package_name = ('%s-src' % d.getVar('PN')) | ||
| 1323 | if not src_package_name in packages: | ||
| 1324 | packages.append(src_package_name) | ||
| 1325 | d.setVar('FILES:%s' % src_package_name, '/usr/src/debug') | ||
| 1326 | |||
| 1327 | # Sanity check PACKAGES for duplicates | ||
| 1328 | # Sanity should be moved to sanity.bbclass once we have the infrastructure | ||
| 1329 | package_dict = {} | ||
| 1330 | |||
| 1331 | for i, pkg in enumerate(packages): | ||
| 1332 | if pkg in package_dict: | ||
| 1333 | msg = "%s is listed in PACKAGES multiple times, this leads to packaging errors." % pkg | ||
| 1334 | oe.qa.handle_error("packages-list", msg, d) | ||
| 1335 | # Ensure the source package gets the chance to pick up the source files | ||
| 1336 | # before the debug package by ordering it first in PACKAGES. Whether it | ||
| 1337 | # actually picks up any source files is controlled by | ||
| 1338 | # PACKAGE_DEBUG_SPLIT_STYLE. | ||
| 1339 | elif pkg.endswith("-src"): | ||
| 1340 | package_dict[pkg] = (10, i) | ||
| 1341 | elif autodebug and pkg.endswith("-dbg"): | ||
| 1342 | package_dict[pkg] = (30, i) | ||
| 1343 | else: | ||
| 1344 | package_dict[pkg] = (50, i) | ||
| 1345 | packages = sorted(package_dict.keys(), key=package_dict.get) | ||
| 1346 | d.setVar('PACKAGES', ' '.join(packages)) | ||
| 1347 | pkgdest = d.getVar('PKGDEST') | ||
| 1348 | |||
| 1349 | seen = [] | ||
| 1350 | |||
| 1351 | # os.mkdir masks the permissions with umask so we have to unset it first | ||
| 1352 | oldumask = os.umask(0) | ||
| 1353 | |||
| 1354 | debug = [] | ||
| 1355 | for root, dirs, files in cpath.walk(dvar): | ||
| 1356 | dir = root[len(dvar):] | ||
| 1357 | if not dir: | ||
| 1358 | dir = os.sep | ||
| 1359 | for f in (files + dirs): | ||
| 1360 | path = "." + os.path.join(dir, f) | ||
| 1361 | if "/.debug/" in path or "/.debug-static/" in path or path.endswith("/.debug"): | ||
| 1362 | debug.append(path) | ||
| 1363 | |||
| 1364 | for pkg in packages: | ||
| 1365 | root = os.path.join(pkgdest, pkg) | ||
| 1366 | bb.utils.mkdirhier(root) | ||
| 1367 | |||
| 1368 | filesvar = d.getVar('FILES:%s' % pkg) or "" | ||
| 1369 | if "//" in filesvar: | ||
| 1370 | msg = "FILES variable for package %s contains '//' which is invalid. Attempting to fix this but you should correct the metadata.\n" % pkg | ||
| 1371 | oe.qa.handle_error("files-invalid", msg, d) | ||
| 1372 | filesvar.replace("//", "/") | ||
| 1373 | |||
| 1374 | origfiles = filesvar.split() | ||
| 1375 | files, symlink_paths = oe.package.files_from_filevars(origfiles) | ||
| 1376 | |||
| 1377 | if autodebug and pkg.endswith("-dbg"): | ||
| 1378 | files.extend(debug) | ||
| 1379 | |||
| 1380 | for file in files: | ||
| 1381 | if (not cpath.islink(file)) and (not cpath.exists(file)): | ||
| 1382 | continue | ||
| 1383 | if file in seen: | ||
| 1384 | continue | ||
| 1385 | seen.append(file) | ||
| 1386 | |||
| 1387 | def mkdir(src, dest, p): | ||
| 1388 | src = os.path.join(src, p) | ||
| 1389 | dest = os.path.join(dest, p) | ||
| 1390 | fstat = cpath.stat(src) | ||
| 1391 | os.mkdir(dest) | ||
| 1392 | os.chmod(dest, fstat.st_mode) | ||
| 1393 | os.chown(dest, fstat.st_uid, fstat.st_gid) | ||
| 1394 | if p not in seen: | ||
| 1395 | seen.append(p) | ||
| 1396 | cpath.updatecache(dest) | ||
| 1397 | |||
| 1398 | def mkdir_recurse(src, dest, paths): | ||
| 1399 | if cpath.exists(dest + '/' + paths): | ||
| 1400 | return | ||
| 1401 | while paths.startswith("./"): | ||
| 1402 | paths = paths[2:] | ||
| 1403 | p = "." | ||
| 1404 | for c in paths.split("/"): | ||
| 1405 | p = os.path.join(p, c) | ||
| 1406 | if not cpath.exists(os.path.join(dest, p)): | ||
| 1407 | mkdir(src, dest, p) | ||
| 1408 | |||
| 1409 | if cpath.isdir(file) and not cpath.islink(file): | ||
| 1410 | mkdir_recurse(dvar, root, file) | ||
| 1411 | continue | ||
| 1412 | |||
| 1413 | mkdir_recurse(dvar, root, os.path.dirname(file)) | ||
| 1414 | fpath = os.path.join(root,file) | ||
| 1415 | if not cpath.islink(file): | ||
| 1416 | os.link(file, fpath) | ||
| 1417 | continue | ||
| 1418 | ret = bb.utils.copyfile(file, fpath) | ||
| 1419 | if ret is False or ret == 0: | ||
| 1420 | bb.fatal("File population failed") | ||
| 1421 | |||
| 1422 | # Check if symlink paths exist | ||
| 1423 | for file in symlink_paths: | ||
| 1424 | if not os.path.exists(os.path.join(root,file)): | ||
| 1425 | bb.fatal("File '%s' cannot be packaged into '%s' because its " | ||
| 1426 | "parent directory structure does not exist. One of " | ||
| 1427 | "its parent directories is a symlink whose target " | ||
| 1428 | "directory is not included in the package." % | ||
| 1429 | (file, pkg)) | ||
| 1430 | |||
| 1431 | os.umask(oldumask) | ||
| 1432 | os.chdir(workdir) | ||
| 1433 | |||
| 1434 | # Handle excluding packages with incompatible licenses | ||
| 1435 | package_list = [] | ||
| 1436 | for pkg in packages: | ||
| 1437 | licenses = d.getVar('_exclude_incompatible-' + pkg) | ||
| 1438 | if licenses: | ||
| 1439 | msg = "Excluding %s from packaging as it has incompatible license(s): %s" % (pkg, licenses) | ||
| 1440 | oe.qa.handle_error("incompatible-license", msg, d) | ||
| 1441 | else: | ||
| 1442 | package_list.append(pkg) | ||
| 1443 | d.setVar('PACKAGES', ' '.join(package_list)) | ||
| 1444 | |||
| 1445 | unshipped = [] | ||
| 1446 | for root, dirs, files in cpath.walk(dvar): | ||
| 1447 | dir = root[len(dvar):] | ||
| 1448 | if not dir: | ||
| 1449 | dir = os.sep | ||
| 1450 | for f in (files + dirs): | ||
| 1451 | path = os.path.join(dir, f) | ||
| 1452 | if ('.' + path) not in seen: | ||
| 1453 | unshipped.append(path) | ||
| 1454 | |||
| 1455 | if unshipped != []: | ||
| 1456 | msg = pn + ": Files/directories were installed but not shipped in any package:" | ||
| 1457 | if "installed-vs-shipped" in (d.getVar('INSANE_SKIP:' + pn) or "").split(): | ||
| 1458 | bb.note("Package %s skipping QA tests: installed-vs-shipped" % pn) | ||
| 1459 | else: | ||
| 1460 | for f in unshipped: | ||
| 1461 | msg = msg + "\n " + f | ||
| 1462 | msg = msg + "\nPlease set FILES such that these items are packaged. Alternatively if they are unneeded, avoid installing them or delete them within do_install.\n" | ||
| 1463 | msg = msg + "%s: %d installed and not shipped files." % (pn, len(unshipped)) | ||
| 1464 | oe.qa.handle_error("installed-vs-shipped", msg, d) | ||
| 1465 | |||
| 1466 | def process_fixsymlinks(pkgfiles, d): | ||
| 1467 | cpath = oe.cachedpath.CachedPath() | ||
| 1468 | pkgdest = d.getVar('PKGDEST') | ||
| 1469 | packages = d.getVar("PACKAGES", False).split() | ||
| 1470 | |||
| 1471 | dangling_links = {} | ||
| 1472 | pkg_files = {} | ||
| 1473 | for pkg in packages: | ||
| 1474 | dangling_links[pkg] = [] | ||
| 1475 | pkg_files[pkg] = [] | ||
| 1476 | inst_root = os.path.join(pkgdest, pkg) | ||
| 1477 | for path in pkgfiles[pkg]: | ||
| 1478 | rpath = path[len(inst_root):] | ||
| 1479 | pkg_files[pkg].append(rpath) | ||
| 1480 | rtarget = cpath.realpath(path, inst_root, True, assume_dir = True) | ||
| 1481 | if not cpath.lexists(rtarget): | ||
| 1482 | dangling_links[pkg].append(os.path.normpath(rtarget[len(inst_root):])) | ||
| 1483 | |||
| 1484 | newrdepends = {} | ||
| 1485 | for pkg in dangling_links: | ||
| 1486 | for l in dangling_links[pkg]: | ||
| 1487 | found = False | ||
| 1488 | bb.debug(1, "%s contains dangling link %s" % (pkg, l)) | ||
| 1489 | for p in packages: | ||
| 1490 | if l in pkg_files[p]: | ||
| 1491 | found = True | ||
| 1492 | bb.debug(1, "target found in %s" % p) | ||
| 1493 | if p == pkg: | ||
| 1494 | break | ||
| 1495 | if pkg not in newrdepends: | ||
| 1496 | newrdepends[pkg] = [] | ||
| 1497 | newrdepends[pkg].append(p) | ||
| 1498 | break | ||
| 1499 | if found == False: | ||
| 1500 | bb.note("%s contains dangling symlink to %s" % (pkg, l)) | ||
| 1501 | |||
| 1502 | for pkg in newrdepends: | ||
| 1503 | rdepends = bb.utils.explode_dep_versions2(d.getVar('RDEPENDS:' + pkg) or "") | ||
| 1504 | for p in newrdepends[pkg]: | ||
| 1505 | if p not in rdepends: | ||
| 1506 | rdepends[p] = [] | ||
| 1507 | d.setVar('RDEPENDS:' + pkg, bb.utils.join_deps(rdepends, commasep=False)) | ||
| 1508 | |||
| 1509 | def process_filedeps(pkgfiles, d): | ||
| 1510 | """ | ||
| 1511 | Collect perfile run-time dependency metadata | ||
| 1512 | Output: | ||
| 1513 | FILERPROVIDESFLIST:pkg - list of all files w/ deps | ||
| 1514 | FILERPROVIDES:filepath:pkg - per file dep | ||
| 1515 | |||
| 1516 | FILERDEPENDSFLIST:pkg - list of all files w/ deps | ||
| 1517 | FILERDEPENDS:filepath:pkg - per file dep | ||
| 1518 | """ | ||
| 1519 | if d.getVar('SKIP_FILEDEPS') == '1': | ||
| 1520 | return | ||
| 1521 | |||
| 1522 | pkgdest = d.getVar('PKGDEST') | ||
| 1523 | packages = d.getVar('PACKAGES') | ||
| 1524 | rpmdeps = d.getVar('RPMDEPS') | ||
| 1525 | |||
| 1526 | def chunks(files, n): | ||
| 1527 | return [files[i:i+n] for i in range(0, len(files), n)] | ||
| 1528 | |||
| 1529 | pkglist = [] | ||
| 1530 | for pkg in packages.split(): | ||
| 1531 | if d.getVar('SKIP_FILEDEPS:' + pkg) == '1': | ||
| 1532 | continue | ||
| 1533 | if pkg.endswith('-dbg') or pkg.endswith('-doc') or pkg.find('-locale-') != -1 or pkg.find('-localedata-') != -1 or pkg.find('-gconv-') != -1 or pkg.find('-charmap-') != -1 or pkg.startswith('kernel-module-') or pkg.endswith('-src'): | ||
| 1534 | continue | ||
| 1535 | for files in chunks(pkgfiles[pkg], 100): | ||
| 1536 | pkglist.append((pkg, files, rpmdeps, pkgdest)) | ||
| 1537 | |||
| 1538 | processed = oe.utils.multiprocess_launch(oe.package.filedeprunner, pkglist, d) | ||
| 1539 | |||
| 1540 | provides_files = {} | ||
| 1541 | requires_files = {} | ||
| 1542 | |||
| 1543 | for result in processed: | ||
| 1544 | (pkg, provides, requires) = result | ||
| 1545 | |||
| 1546 | if pkg not in provides_files: | ||
| 1547 | provides_files[pkg] = [] | ||
| 1548 | if pkg not in requires_files: | ||
| 1549 | requires_files[pkg] = [] | ||
| 1550 | |||
| 1551 | for file in sorted(provides): | ||
| 1552 | provides_files[pkg].append(file) | ||
| 1553 | key = "FILERPROVIDES:" + file + ":" + pkg | ||
| 1554 | d.appendVar(key, " " + " ".join(provides[file])) | ||
| 1555 | |||
| 1556 | for file in sorted(requires): | ||
| 1557 | requires_files[pkg].append(file) | ||
| 1558 | key = "FILERDEPENDS:" + file + ":" + pkg | ||
| 1559 | d.appendVar(key, " " + " ".join(requires[file])) | ||
| 1560 | |||
| 1561 | for pkg in requires_files: | ||
| 1562 | d.setVar("FILERDEPENDSFLIST:" + pkg, " ".join(sorted(requires_files[pkg]))) | ||
| 1563 | for pkg in provides_files: | ||
| 1564 | d.setVar("FILERPROVIDESFLIST:" + pkg, " ".join(sorted(provides_files[pkg]))) | ||
| 1565 | |||
| 1566 | def process_shlibs(pkgfiles, d): | ||
| 1567 | cpath = oe.cachedpath.CachedPath() | ||
| 1568 | |||
| 1569 | exclude_shlibs = d.getVar('EXCLUDE_FROM_SHLIBS', False) | ||
| 1570 | if exclude_shlibs: | ||
| 1571 | bb.note("not generating shlibs") | ||
| 1572 | return | ||
| 1573 | |||
| 1574 | lib_re = re.compile(r"^.*\.so") | ||
| 1575 | libdir_re = re.compile(r".*/%s$" % d.getVar('baselib')) | ||
| 1576 | |||
| 1577 | packages = d.getVar('PACKAGES') | ||
| 1578 | |||
| 1579 | shlib_pkgs = [] | ||
| 1580 | exclusion_list = d.getVar("EXCLUDE_PACKAGES_FROM_SHLIBS") | ||
| 1581 | if exclusion_list: | ||
| 1582 | for pkg in packages.split(): | ||
| 1583 | if pkg not in exclusion_list.split(): | ||
| 1584 | shlib_pkgs.append(pkg) | ||
| 1585 | else: | ||
| 1586 | bb.note("not generating shlibs for %s" % pkg) | ||
| 1587 | else: | ||
| 1588 | shlib_pkgs = packages.split() | ||
| 1589 | |||
| 1590 | hostos = d.getVar('HOST_OS') | ||
| 1591 | |||
| 1592 | workdir = d.getVar('WORKDIR') | ||
| 1593 | |||
| 1594 | ver = d.getVar('PKGV') | ||
| 1595 | if not ver: | ||
| 1596 | msg = "PKGV not defined" | ||
| 1597 | oe.qa.handle_error("pkgv-undefined", msg, d) | ||
| 1598 | return | ||
| 1599 | |||
| 1600 | pkgdest = d.getVar('PKGDEST') | ||
| 1601 | |||
| 1602 | shlibswork_dir = d.getVar('SHLIBSWORKDIR') | ||
| 1603 | |||
| 1604 | def linux_so(file, pkg, pkgver, d): | ||
| 1605 | needs_ldconfig = False | ||
| 1606 | needed = set() | ||
| 1607 | sonames = set() | ||
| 1608 | renames = [] | ||
| 1609 | ldir = os.path.dirname(file).replace(pkgdest + "/" + pkg, '') | ||
| 1610 | cmd = d.getVar('OBJDUMP') + " -p " + shlex.quote(file) + " 2>/dev/null" | ||
| 1611 | fd = os.popen(cmd) | ||
| 1612 | lines = fd.readlines() | ||
| 1613 | fd.close() | ||
| 1614 | rpath = tuple() | ||
| 1615 | for l in lines: | ||
| 1616 | m = re.match(r"\s+RPATH\s+([^\s]*)", l) | ||
| 1617 | if m: | ||
| 1618 | rpaths = m.group(1).replace("$ORIGIN", ldir).split(":") | ||
| 1619 | rpath = tuple(map(os.path.normpath, rpaths)) | ||
| 1620 | for l in lines: | ||
| 1621 | m = re.match(r"\s+NEEDED\s+([^\s]*)", l) | ||
| 1622 | if m: | ||
| 1623 | dep = m.group(1) | ||
| 1624 | if dep not in needed: | ||
| 1625 | needed.add((dep, file, rpath)) | ||
| 1626 | m = re.match(r"\s+SONAME\s+([^\s]*)", l) | ||
| 1627 | if m: | ||
| 1628 | this_soname = m.group(1) | ||
| 1629 | prov = (this_soname, ldir, pkgver) | ||
| 1630 | if not prov in sonames: | ||
| 1631 | # if library is private (only used by package) then do not build shlib for it | ||
| 1632 | if not private_libs or len([i for i in private_libs if fnmatch.fnmatch(this_soname, i)]) == 0: | ||
| 1633 | sonames.add(prov) | ||
| 1634 | if libdir_re.match(os.path.dirname(file)): | ||
| 1635 | needs_ldconfig = True | ||
| 1636 | if needs_ldconfig and snap_symlinks and (os.path.basename(file) != this_soname): | ||
| 1637 | renames.append((file, os.path.join(os.path.dirname(file), this_soname))) | ||
| 1638 | return (needs_ldconfig, needed, sonames, renames) | ||
| 1639 | |||
| 1640 | def darwin_so(file, needed, sonames, renames, pkgver): | ||
| 1641 | if not os.path.exists(file): | ||
| 1642 | return | ||
| 1643 | ldir = os.path.dirname(file).replace(pkgdest + "/" + pkg, '') | ||
| 1644 | |||
| 1645 | def get_combinations(base): | ||
| 1646 | # | ||
| 1647 | # Given a base library name, find all combinations of this split by "." and "-" | ||
| 1648 | # | ||
| 1649 | combos = [] | ||
| 1650 | options = base.split(".") | ||
| 1651 | for i in range(1, len(options) + 1): | ||
| 1652 | combos.append(".".join(options[0:i])) | ||
| 1653 | options = base.split("-") | ||
| 1654 | for i in range(1, len(options) + 1): | ||
| 1655 | combos.append("-".join(options[0:i])) | ||
| 1656 | return combos | ||
| 1657 | |||
| 1658 | if (file.endswith('.dylib') or file.endswith('.so')) and not pkg.endswith('-dev') and not pkg.endswith('-dbg') and not pkg.endswith('-src'): | ||
| 1659 | # Drop suffix | ||
| 1660 | name = os.path.basename(file).rsplit(".",1)[0] | ||
| 1661 | # Find all combinations | ||
| 1662 | combos = get_combinations(name) | ||
| 1663 | for combo in combos: | ||
| 1664 | if not combo in sonames: | ||
| 1665 | prov = (combo, ldir, pkgver) | ||
| 1666 | sonames.add(prov) | ||
| 1667 | if file.endswith('.dylib') or file.endswith('.so'): | ||
| 1668 | rpath = [] | ||
| 1669 | p = subprocess.Popen([d.expand("${HOST_PREFIX}otool"), '-l', file], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) | ||
| 1670 | out, err = p.communicate() | ||
| 1671 | # If returned successfully, process stdout for results | ||
| 1672 | if p.returncode == 0: | ||
| 1673 | for l in out.split("\n"): | ||
| 1674 | l = l.strip() | ||
| 1675 | if l.startswith('path '): | ||
| 1676 | rpath.append(l.split()[1]) | ||
| 1677 | |||
| 1678 | p = subprocess.Popen([d.expand("${HOST_PREFIX}otool"), '-L', file], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) | ||
| 1679 | out, err = p.communicate() | ||
| 1680 | # If returned successfully, process stdout for results | ||
| 1681 | if p.returncode == 0: | ||
| 1682 | for l in out.split("\n"): | ||
| 1683 | l = l.strip() | ||
| 1684 | if not l or l.endswith(":"): | ||
| 1685 | continue | ||
| 1686 | if "is not an object file" in l: | ||
| 1687 | continue | ||
| 1688 | name = os.path.basename(l.split()[0]).rsplit(".", 1)[0] | ||
| 1689 | if name and name not in needed[pkg]: | ||
| 1690 | needed[pkg].add((name, file, tuple())) | ||
| 1691 | |||
| 1692 | def mingw_dll(file, needed, sonames, renames, pkgver): | ||
| 1693 | if not os.path.exists(file): | ||
| 1694 | return | ||
| 1695 | |||
| 1696 | if file.endswith(".dll"): | ||
| 1697 | # assume all dlls are shared objects provided by the package | ||
| 1698 | sonames.add((os.path.basename(file), os.path.dirname(file).replace(pkgdest + "/" + pkg, ''), pkgver)) | ||
| 1699 | |||
| 1700 | if (file.endswith(".dll") or file.endswith(".exe")): | ||
| 1701 | # use objdump to search for "DLL Name: .*\.dll" | ||
| 1702 | p = subprocess.Popen([d.expand("${OBJDUMP}"), "-p", file], stdout=subprocess.PIPE, stderr=subprocess.PIPE) | ||
| 1703 | out, err = p.communicate() | ||
| 1704 | # process the output, grabbing all .dll names | ||
| 1705 | if p.returncode == 0: | ||
| 1706 | for m in re.finditer(r"DLL Name: (.*?\.dll)$", out.decode(), re.MULTILINE | re.IGNORECASE): | ||
| 1707 | dllname = m.group(1) | ||
| 1708 | if dllname: | ||
| 1709 | needed[pkg].add((dllname, file, tuple())) | ||
| 1710 | |||
| 1711 | if d.getVar('PACKAGE_SNAP_LIB_SYMLINKS') == "1": | ||
| 1712 | snap_symlinks = True | ||
| 1713 | else: | ||
| 1714 | snap_symlinks = False | ||
| 1715 | |||
| 1716 | needed = {} | ||
| 1717 | |||
| 1718 | shlib_provider = oe.package.read_shlib_providers(d) | ||
| 1719 | |||
| 1720 | for pkg in shlib_pkgs: | ||
| 1721 | private_libs = d.getVar('PRIVATE_LIBS:' + pkg) or d.getVar('PRIVATE_LIBS') or "" | ||
| 1722 | private_libs = private_libs.split() | ||
| 1723 | needs_ldconfig = False | ||
| 1724 | bb.debug(2, "calculating shlib provides for %s" % pkg) | ||
| 1725 | |||
| 1726 | pkgver = d.getVar('PKGV:' + pkg) | ||
| 1727 | if not pkgver: | ||
| 1728 | pkgver = d.getVar('PV_' + pkg) | ||
| 1729 | if not pkgver: | ||
| 1730 | pkgver = ver | ||
| 1731 | |||
| 1732 | needed[pkg] = set() | ||
| 1733 | sonames = set() | ||
| 1734 | renames = [] | ||
| 1735 | linuxlist = [] | ||
| 1736 | for file in pkgfiles[pkg]: | ||
| 1737 | soname = None | ||
| 1738 | if cpath.islink(file): | ||
| 1739 | continue | ||
| 1740 | if hostos.startswith("darwin"): | ||
| 1741 | darwin_so(file, needed, sonames, renames, pkgver) | ||
| 1742 | elif hostos.startswith("mingw"): | ||
| 1743 | mingw_dll(file, needed, sonames, renames, pkgver) | ||
| 1744 | elif os.access(file, os.X_OK) or lib_re.match(file): | ||
| 1745 | linuxlist.append(file) | ||
| 1746 | |||
| 1747 | if linuxlist: | ||
| 1748 | results = oe.utils.multiprocess_launch(linux_so, linuxlist, d, extraargs=(pkg, pkgver, d)) | ||
| 1749 | for r in results: | ||
| 1750 | ldconfig = r[0] | ||
| 1751 | needed[pkg] |= r[1] | ||
| 1752 | sonames |= r[2] | ||
| 1753 | renames.extend(r[3]) | ||
| 1754 | needs_ldconfig = needs_ldconfig or ldconfig | ||
| 1755 | |||
| 1756 | for (old, new) in renames: | ||
| 1757 | bb.note("Renaming %s to %s" % (old, new)) | ||
| 1758 | bb.utils.rename(old, new) | ||
| 1759 | pkgfiles[pkg].remove(old) | ||
| 1760 | |||
| 1761 | shlibs_file = os.path.join(shlibswork_dir, pkg + ".list") | ||
| 1762 | if len(sonames): | ||
| 1763 | with open(shlibs_file, 'w') as fd: | ||
| 1764 | for s in sorted(sonames): | ||
| 1765 | if s[0] in shlib_provider and s[1] in shlib_provider[s[0]]: | ||
| 1766 | (old_pkg, old_pkgver) = shlib_provider[s[0]][s[1]] | ||
| 1767 | if old_pkg != pkg: | ||
| 1768 | bb.warn('%s-%s was registered as shlib provider for %s, changing it to %s-%s because it was built later' % (old_pkg, old_pkgver, s[0], pkg, pkgver)) | ||
| 1769 | bb.debug(1, 'registering %s-%s as shlib provider for %s' % (pkg, pkgver, s[0])) | ||
| 1770 | fd.write(s[0] + ':' + s[1] + ':' + s[2] + '\n') | ||
| 1771 | if s[0] not in shlib_provider: | ||
| 1772 | shlib_provider[s[0]] = {} | ||
| 1773 | shlib_provider[s[0]][s[1]] = (pkg, pkgver) | ||
| 1774 | if needs_ldconfig: | ||
| 1775 | bb.debug(1, 'adding ldconfig call to postinst for %s' % pkg) | ||
| 1776 | postinst = d.getVar('pkg_postinst:%s' % pkg) | ||
| 1777 | if not postinst: | ||
| 1778 | postinst = '#!/bin/sh\n' | ||
| 1779 | postinst += d.getVar('ldconfig_postinst_fragment') | ||
| 1780 | d.setVar('pkg_postinst:%s' % pkg, postinst) | ||
| 1781 | bb.debug(1, 'LIBNAMES: pkg %s sonames %s' % (pkg, sonames)) | ||
| 1782 | |||
| 1783 | assumed_libs = d.getVar('ASSUME_SHLIBS') | ||
| 1784 | if assumed_libs: | ||
| 1785 | libdir = d.getVar("libdir") | ||
| 1786 | for e in assumed_libs.split(): | ||
| 1787 | l, dep_pkg = e.split(":") | ||
| 1788 | lib_ver = None | ||
| 1789 | dep_pkg = dep_pkg.rsplit("_", 1) | ||
| 1790 | if len(dep_pkg) == 2: | ||
| 1791 | lib_ver = dep_pkg[1] | ||
| 1792 | dep_pkg = dep_pkg[0] | ||
| 1793 | if l not in shlib_provider: | ||
| 1794 | shlib_provider[l] = {} | ||
| 1795 | shlib_provider[l][libdir] = (dep_pkg, lib_ver) | ||
| 1796 | |||
| 1797 | libsearchpath = [d.getVar('libdir'), d.getVar('base_libdir')] | ||
| 1798 | |||
| 1799 | for pkg in shlib_pkgs: | ||
| 1800 | bb.debug(2, "calculating shlib requirements for %s" % pkg) | ||
| 1801 | |||
| 1802 | private_libs = d.getVar('PRIVATE_LIBS:' + pkg) or d.getVar('PRIVATE_LIBS') or "" | ||
| 1803 | private_libs = private_libs.split() | ||
| 1804 | |||
| 1805 | deps = list() | ||
| 1806 | for n in needed[pkg]: | ||
| 1807 | # if n is in private libraries, don't try to search provider for it | ||
| 1808 | # this could cause problem in case some abc.bb provides private | ||
| 1809 | # /opt/abc/lib/libfoo.so.1 and contains /usr/bin/abc depending on system library libfoo.so.1 | ||
| 1810 | # but skipping it is still better alternative than providing own | ||
| 1811 | # version and then adding runtime dependency for the same system library | ||
| 1812 | if private_libs and len([i for i in private_libs if fnmatch.fnmatch(n[0], i)]) > 0: | ||
| 1813 | bb.debug(2, '%s: Dependency %s covered by PRIVATE_LIBS' % (pkg, n[0])) | ||
| 1814 | continue | ||
| 1815 | if n[0] in shlib_provider.keys(): | ||
| 1816 | shlib_provider_map = shlib_provider[n[0]] | ||
| 1817 | matches = set() | ||
| 1818 | for p in itertools.chain(list(n[2]), sorted(shlib_provider_map.keys()), libsearchpath): | ||
| 1819 | if p in shlib_provider_map: | ||
| 1820 | matches.add(p) | ||
| 1821 | if len(matches) > 1: | ||
| 1822 | matchpkgs = ', '.join([shlib_provider_map[match][0] for match in matches]) | ||
| 1823 | bb.error("%s: Multiple shlib providers for %s: %s (used by files: %s)" % (pkg, n[0], matchpkgs, n[1])) | ||
| 1824 | elif len(matches) == 1: | ||
| 1825 | (dep_pkg, ver_needed) = shlib_provider_map[matches.pop()] | ||
| 1826 | |||
| 1827 | bb.debug(2, '%s: Dependency %s requires package %s (used by files: %s)' % (pkg, n[0], dep_pkg, n[1])) | ||
| 1828 | |||
| 1829 | if dep_pkg == pkg: | ||
| 1830 | continue | ||
| 1831 | |||
| 1832 | if ver_needed: | ||
| 1833 | dep = "%s (>= %s)" % (dep_pkg, ver_needed) | ||
| 1834 | else: | ||
| 1835 | dep = dep_pkg | ||
| 1836 | if not dep in deps: | ||
| 1837 | deps.append(dep) | ||
| 1838 | continue | ||
| 1839 | bb.note("Couldn't find shared library provider for %s, used by files: %s" % (n[0], n[1])) | ||
| 1840 | |||
| 1841 | deps_file = os.path.join(pkgdest, pkg + ".shlibdeps") | ||
| 1842 | if os.path.exists(deps_file): | ||
| 1843 | os.remove(deps_file) | ||
| 1844 | if deps: | ||
| 1845 | with open(deps_file, 'w') as fd: | ||
| 1846 | for dep in sorted(deps): | ||
| 1847 | fd.write(dep + '\n') | ||
| 1848 | |||
| 1849 | def process_pkgconfig(pkgfiles, d): | ||
| 1850 | packages = d.getVar('PACKAGES') | ||
| 1851 | workdir = d.getVar('WORKDIR') | ||
| 1852 | pkgdest = d.getVar('PKGDEST') | ||
| 1853 | |||
| 1854 | shlibs_dirs = d.getVar('SHLIBSDIRS').split() | ||
| 1855 | shlibswork_dir = d.getVar('SHLIBSWORKDIR') | ||
| 1856 | |||
| 1857 | pc_re = re.compile(r'(.*)\.pc$') | ||
| 1858 | var_re = re.compile(r'(.*)=(.*)') | ||
| 1859 | field_re = re.compile(r'(.*): (.*)') | ||
| 1860 | |||
| 1861 | pkgconfig_provided = {} | ||
| 1862 | pkgconfig_needed = {} | ||
| 1863 | for pkg in packages.split(): | ||
| 1864 | pkgconfig_provided[pkg] = [] | ||
| 1865 | pkgconfig_needed[pkg] = [] | ||
| 1866 | for file in sorted(pkgfiles[pkg]): | ||
| 1867 | m = pc_re.match(file) | ||
| 1868 | if m: | ||
| 1869 | pd = bb.data.init() | ||
| 1870 | name = m.group(1) | ||
| 1871 | pkgconfig_provided[pkg].append(os.path.basename(name)) | ||
| 1872 | if not os.access(file, os.R_OK): | ||
| 1873 | continue | ||
| 1874 | with open(file, 'r') as f: | ||
| 1875 | lines = f.readlines() | ||
| 1876 | for l in lines: | ||
| 1877 | m = field_re.match(l) | ||
| 1878 | if m: | ||
| 1879 | hdr = m.group(1) | ||
| 1880 | exp = pd.expand(m.group(2)) | ||
| 1881 | if hdr == 'Requires': | ||
| 1882 | pkgconfig_needed[pkg] += exp.replace(',', ' ').split() | ||
| 1883 | continue | ||
| 1884 | m = var_re.match(l) | ||
| 1885 | if m: | ||
| 1886 | name = m.group(1) | ||
| 1887 | val = m.group(2) | ||
| 1888 | pd.setVar(name, pd.expand(val)) | ||
| 1889 | |||
| 1890 | for pkg in packages.split(): | ||
| 1891 | pkgs_file = os.path.join(shlibswork_dir, pkg + ".pclist") | ||
| 1892 | if pkgconfig_provided[pkg] != []: | ||
| 1893 | with open(pkgs_file, 'w') as f: | ||
| 1894 | for p in sorted(pkgconfig_provided[pkg]): | ||
| 1895 | f.write('%s\n' % p) | ||
| 1896 | |||
| 1897 | # Go from least to most specific since the last one found wins | ||
| 1898 | for dir in reversed(shlibs_dirs): | ||
| 1899 | if not os.path.exists(dir): | ||
| 1900 | continue | ||
| 1901 | for file in sorted(os.listdir(dir)): | ||
| 1902 | m = re.match(r'^(.*)\.pclist$', file) | ||
| 1903 | if m: | ||
| 1904 | pkg = m.group(1) | ||
| 1905 | with open(os.path.join(dir, file)) as fd: | ||
| 1906 | lines = fd.readlines() | ||
| 1907 | pkgconfig_provided[pkg] = [] | ||
| 1908 | for l in lines: | ||
| 1909 | pkgconfig_provided[pkg].append(l.rstrip()) | ||
| 1910 | |||
| 1911 | for pkg in packages.split(): | ||
| 1912 | deps = [] | ||
| 1913 | for n in pkgconfig_needed[pkg]: | ||
| 1914 | found = False | ||
| 1915 | for k in pkgconfig_provided.keys(): | ||
| 1916 | if n in pkgconfig_provided[k]: | ||
| 1917 | if k != pkg and not (k in deps): | ||
| 1918 | deps.append(k) | ||
| 1919 | found = True | ||
| 1920 | if found == False: | ||
| 1921 | bb.note("couldn't find pkgconfig module '%s' in any package" % n) | ||
| 1922 | deps_file = os.path.join(pkgdest, pkg + ".pcdeps") | ||
| 1923 | if len(deps): | ||
| 1924 | with open(deps_file, 'w') as fd: | ||
| 1925 | for dep in deps: | ||
| 1926 | fd.write(dep + '\n') | ||
| 1927 | |||
| 1928 | def read_libdep_files(d): | ||
| 1929 | pkglibdeps = {} | ||
| 1930 | packages = d.getVar('PACKAGES').split() | ||
| 1931 | for pkg in packages: | ||
| 1932 | pkglibdeps[pkg] = {} | ||
| 1933 | for extension in ".shlibdeps", ".pcdeps", ".clilibdeps": | ||
| 1934 | depsfile = d.expand("${PKGDEST}/" + pkg + extension) | ||
| 1935 | if os.access(depsfile, os.R_OK): | ||
| 1936 | with open(depsfile) as fd: | ||
| 1937 | lines = fd.readlines() | ||
| 1938 | for l in lines: | ||
| 1939 | l.rstrip() | ||
| 1940 | deps = bb.utils.explode_dep_versions2(l) | ||
| 1941 | for dep in deps: | ||
| 1942 | if not dep in pkglibdeps[pkg]: | ||
| 1943 | pkglibdeps[pkg][dep] = deps[dep] | ||
| 1944 | return pkglibdeps | ||
| 1945 | |||
| 1946 | def process_depchains(pkgfiles, d): | ||
| 1947 | """ | ||
| 1948 | For a given set of prefix and postfix modifiers, make those packages | ||
| 1949 | RRECOMMENDS on the corresponding packages for its RDEPENDS. | ||
| 1950 | |||
| 1951 | Example: If package A depends upon package B, and A's .bb emits an | ||
| 1952 | A-dev package, this would make A-dev Recommends: B-dev. | ||
| 1953 | |||
| 1954 | If only one of a given suffix is specified, it will take the RRECOMMENDS | ||
| 1955 | based on the RDEPENDS of *all* other packages. If more than one of a given | ||
| 1956 | suffix is specified, its will only use the RDEPENDS of the single parent | ||
| 1957 | package. | ||
| 1958 | """ | ||
| 1959 | |||
| 1960 | packages = d.getVar('PACKAGES') | ||
| 1961 | postfixes = (d.getVar('DEPCHAIN_POST') or '').split() | ||
| 1962 | prefixes = (d.getVar('DEPCHAIN_PRE') or '').split() | ||
| 1963 | |||
| 1964 | def pkg_adddeprrecs(pkg, base, suffix, getname, depends, d): | ||
| 1965 | |||
| 1966 | #bb.note('depends for %s is %s' % (base, depends)) | ||
| 1967 | rreclist = bb.utils.explode_dep_versions2(d.getVar('RRECOMMENDS:' + pkg) or "") | ||
| 1968 | |||
| 1969 | for depend in sorted(depends): | ||
| 1970 | if depend.find('-native') != -1 or depend.find('-cross') != -1 or depend.startswith('virtual/'): | ||
| 1971 | #bb.note("Skipping %s" % depend) | ||
| 1972 | continue | ||
| 1973 | if depend.endswith('-dev'): | ||
| 1974 | depend = depend[:-4] | ||
| 1975 | if depend.endswith('-dbg'): | ||
| 1976 | depend = depend[:-4] | ||
| 1977 | pkgname = getname(depend, suffix) | ||
| 1978 | #bb.note("Adding %s for %s" % (pkgname, depend)) | ||
| 1979 | if pkgname not in rreclist and pkgname != pkg: | ||
| 1980 | rreclist[pkgname] = [] | ||
| 1981 | |||
| 1982 | #bb.note('setting: RRECOMMENDS:%s=%s' % (pkg, ' '.join(rreclist))) | ||
| 1983 | d.setVar('RRECOMMENDS:%s' % pkg, bb.utils.join_deps(rreclist, commasep=False)) | ||
| 1984 | |||
| 1985 | def pkg_addrrecs(pkg, base, suffix, getname, rdepends, d): | ||
| 1986 | |||
| 1987 | #bb.note('rdepends for %s is %s' % (base, rdepends)) | ||
| 1988 | rreclist = bb.utils.explode_dep_versions2(d.getVar('RRECOMMENDS:' + pkg) or "") | ||
| 1989 | |||
| 1990 | for depend in sorted(rdepends): | ||
| 1991 | if depend.find('virtual-locale-') != -1: | ||
| 1992 | #bb.note("Skipping %s" % depend) | ||
| 1993 | continue | ||
| 1994 | if depend.endswith('-dev'): | ||
| 1995 | depend = depend[:-4] | ||
| 1996 | if depend.endswith('-dbg'): | ||
| 1997 | depend = depend[:-4] | ||
| 1998 | pkgname = getname(depend, suffix) | ||
| 1999 | #bb.note("Adding %s for %s" % (pkgname, depend)) | ||
| 2000 | if pkgname not in rreclist and pkgname != pkg: | ||
| 2001 | rreclist[pkgname] = [] | ||
| 2002 | |||
| 2003 | #bb.note('setting: RRECOMMENDS:%s=%s' % (pkg, ' '.join(rreclist))) | ||
| 2004 | d.setVar('RRECOMMENDS:%s' % pkg, bb.utils.join_deps(rreclist, commasep=False)) | ||
| 2005 | |||
| 2006 | def add_dep(list, dep): | ||
| 2007 | if dep not in list: | ||
| 2008 | list.append(dep) | ||
| 2009 | |||
| 2010 | depends = [] | ||
| 2011 | for dep in bb.utils.explode_deps(d.getVar('DEPENDS') or ""): | ||
| 2012 | add_dep(depends, dep) | ||
| 2013 | |||
| 2014 | rdepends = [] | ||
| 2015 | for pkg in packages.split(): | ||
| 2016 | for dep in bb.utils.explode_deps(d.getVar('RDEPENDS:' + pkg) or ""): | ||
| 2017 | add_dep(rdepends, dep) | ||
| 2018 | |||
| 2019 | #bb.note('rdepends is %s' % rdepends) | ||
| 2020 | |||
| 2021 | def post_getname(name, suffix): | ||
| 2022 | return '%s%s' % (name, suffix) | ||
| 2023 | def pre_getname(name, suffix): | ||
| 2024 | return '%s%s' % (suffix, name) | ||
| 2025 | |||
| 2026 | pkgs = {} | ||
| 2027 | for pkg in packages.split(): | ||
| 2028 | for postfix in postfixes: | ||
| 2029 | if pkg.endswith(postfix): | ||
| 2030 | if not postfix in pkgs: | ||
| 2031 | pkgs[postfix] = {} | ||
| 2032 | pkgs[postfix][pkg] = (pkg[:-len(postfix)], post_getname) | ||
| 2033 | |||
| 2034 | for prefix in prefixes: | ||
| 2035 | if pkg.startswith(prefix): | ||
| 2036 | if not prefix in pkgs: | ||
| 2037 | pkgs[prefix] = {} | ||
| 2038 | pkgs[prefix][pkg] = (pkg[:-len(prefix)], pre_getname) | ||
| 2039 | |||
| 2040 | if "-dbg" in pkgs: | ||
| 2041 | pkglibdeps = read_libdep_files(d) | ||
| 2042 | pkglibdeplist = [] | ||
| 2043 | for pkg in pkglibdeps: | ||
| 2044 | for k in pkglibdeps[pkg]: | ||
| 2045 | add_dep(pkglibdeplist, k) | ||
| 2046 | dbgdefaultdeps = ((d.getVar('DEPCHAIN_DBGDEFAULTDEPS') == '1') or (bb.data.inherits_class('packagegroup', d))) | ||
| 2047 | |||
| 2048 | for suffix in pkgs: | ||
| 2049 | for pkg in pkgs[suffix]: | ||
| 2050 | if d.getVarFlag('RRECOMMENDS:' + pkg, 'nodeprrecs'): | ||
| 2051 | continue | ||
| 2052 | (base, func) = pkgs[suffix][pkg] | ||
| 2053 | if suffix == "-dev": | ||
| 2054 | pkg_adddeprrecs(pkg, base, suffix, func, depends, d) | ||
| 2055 | elif suffix == "-dbg": | ||
| 2056 | if not dbgdefaultdeps: | ||
| 2057 | pkg_addrrecs(pkg, base, suffix, func, pkglibdeplist, d) | ||
| 2058 | continue | ||
| 2059 | if len(pkgs[suffix]) == 1: | ||
| 2060 | pkg_addrrecs(pkg, base, suffix, func, rdepends, d) | ||
| 2061 | else: | ||
| 2062 | rdeps = [] | ||
| 2063 | for dep in bb.utils.explode_deps(d.getVar('RDEPENDS:' + base) or ""): | ||
| 2064 | add_dep(rdeps, dep) | ||
| 2065 | pkg_addrrecs(pkg, base, suffix, func, rdeps, d) | ||
diff --git a/meta-xilinx-core/lib/oe/packagedata.py b/meta-xilinx-core/lib/oe/packagedata.py new file mode 100644 index 00000000..2d1d6dde --- /dev/null +++ b/meta-xilinx-core/lib/oe/packagedata.py | |||
| @@ -0,0 +1,366 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: GPL-2.0-only | ||
| 5 | # | ||
| 6 | |||
| 7 | import codecs | ||
| 8 | import os | ||
| 9 | import json | ||
| 10 | import bb.compress.zstd | ||
| 11 | import oe.path | ||
| 12 | |||
| 13 | from glob import glob | ||
| 14 | |||
| 15 | def packaged(pkg, d): | ||
| 16 | return os.access(get_subpkgedata_fn(pkg, d) + '.packaged', os.R_OK) | ||
| 17 | |||
| 18 | def read_pkgdatafile(fn): | ||
| 19 | pkgdata = {} | ||
| 20 | |||
| 21 | def decode(str): | ||
| 22 | c = codecs.getdecoder("unicode_escape") | ||
| 23 | return c(str)[0] | ||
| 24 | |||
| 25 | if os.access(fn, os.R_OK): | ||
| 26 | import re | ||
| 27 | with open(fn, 'r') as f: | ||
| 28 | lines = f.readlines() | ||
| 29 | r = re.compile(r"(^.+?):\s+(.*)") | ||
| 30 | for l in lines: | ||
| 31 | m = r.match(l) | ||
| 32 | if m: | ||
| 33 | pkgdata[m.group(1)] = decode(m.group(2)) | ||
| 34 | |||
| 35 | return pkgdata | ||
| 36 | |||
| 37 | def get_subpkgedata_fn(pkg, d): | ||
| 38 | return d.expand('${PKGDATA_DIR}/runtime/%s' % pkg) | ||
| 39 | |||
| 40 | def has_subpkgdata(pkg, d): | ||
| 41 | return os.access(get_subpkgedata_fn(pkg, d), os.R_OK) | ||
| 42 | |||
| 43 | def read_subpkgdata(pkg, d): | ||
| 44 | return read_pkgdatafile(get_subpkgedata_fn(pkg, d)) | ||
| 45 | |||
| 46 | def has_pkgdata(pn, d): | ||
| 47 | fn = d.expand('${PKGDATA_DIR}/%s' % pn) | ||
| 48 | return os.access(fn, os.R_OK) | ||
| 49 | |||
| 50 | def read_pkgdata(pn, d): | ||
| 51 | fn = d.expand('${PKGDATA_DIR}/%s' % pn) | ||
| 52 | return read_pkgdatafile(fn) | ||
| 53 | |||
| 54 | # | ||
| 55 | # Collapse FOO:pkg variables into FOO | ||
| 56 | # | ||
| 57 | def read_subpkgdata_dict(pkg, d): | ||
| 58 | ret = {} | ||
| 59 | subd = read_pkgdatafile(get_subpkgedata_fn(pkg, d)) | ||
| 60 | for var in subd: | ||
| 61 | newvar = var.replace(":" + pkg, "") | ||
| 62 | if newvar == var and var + ":" + pkg in subd: | ||
| 63 | continue | ||
| 64 | ret[newvar] = subd[var] | ||
| 65 | return ret | ||
| 66 | |||
| 67 | def read_subpkgdata_extended(pkg, d): | ||
| 68 | import json | ||
| 69 | import bb.compress.zstd | ||
| 70 | |||
| 71 | fn = d.expand("${PKGDATA_DIR}/extended/%s.json.zstd" % pkg) | ||
| 72 | try: | ||
| 73 | num_threads = int(d.getVar("BB_NUMBER_THREADS")) | ||
| 74 | with bb.compress.zstd.open(fn, "rt", encoding="utf-8", num_threads=num_threads) as f: | ||
| 75 | return json.load(f) | ||
| 76 | except FileNotFoundError: | ||
| 77 | return None | ||
| 78 | |||
| 79 | def _pkgmap(d): | ||
| 80 | """Return a dictionary mapping package to recipe name.""" | ||
| 81 | |||
| 82 | pkgdatadir = d.getVar("PKGDATA_DIR") | ||
| 83 | |||
| 84 | pkgmap = {} | ||
| 85 | try: | ||
| 86 | files = os.listdir(pkgdatadir) | ||
| 87 | except OSError: | ||
| 88 | bb.warn("No files in %s?" % pkgdatadir) | ||
| 89 | files = [] | ||
| 90 | |||
| 91 | for pn in [f for f in files if not os.path.isdir(os.path.join(pkgdatadir, f))]: | ||
| 92 | try: | ||
| 93 | pkgdata = read_pkgdatafile(os.path.join(pkgdatadir, pn)) | ||
| 94 | except OSError: | ||
| 95 | continue | ||
| 96 | |||
| 97 | packages = pkgdata.get("PACKAGES") or "" | ||
| 98 | for pkg in packages.split(): | ||
| 99 | pkgmap[pkg] = pn | ||
| 100 | |||
| 101 | return pkgmap | ||
| 102 | |||
| 103 | def pkgmap(d): | ||
| 104 | """Return a dictionary mapping package to recipe name. | ||
| 105 | Cache the mapping in the metadata""" | ||
| 106 | |||
| 107 | pkgmap_data = d.getVar("__pkgmap_data", False) | ||
| 108 | if pkgmap_data is None: | ||
| 109 | pkgmap_data = _pkgmap(d) | ||
| 110 | d.setVar("__pkgmap_data", pkgmap_data) | ||
| 111 | |||
| 112 | return pkgmap_data | ||
| 113 | |||
| 114 | def recipename(pkg, d): | ||
| 115 | """Return the recipe name for the given binary package name.""" | ||
| 116 | |||
| 117 | return pkgmap(d).get(pkg) | ||
| 118 | |||
| 119 | def foreach_runtime_provider_pkgdata(d, rdep, include_rdep=False): | ||
| 120 | pkgdata_dir = d.getVar("PKGDATA_DIR") | ||
| 121 | possibles = set() | ||
| 122 | try: | ||
| 123 | possibles |= set(os.listdir("%s/runtime-rprovides/%s/" % (pkgdata_dir, rdep))) | ||
| 124 | except OSError: | ||
| 125 | pass | ||
| 126 | |||
| 127 | if include_rdep: | ||
| 128 | possibles.add(rdep) | ||
| 129 | |||
| 130 | for p in sorted(list(possibles)): | ||
| 131 | rdep_data = read_subpkgdata(p, d) | ||
| 132 | yield p, rdep_data | ||
| 133 | |||
| 134 | def get_package_mapping(pkg, basepkg, d, depversions=None): | ||
| 135 | import oe.packagedata | ||
| 136 | |||
| 137 | data = oe.packagedata.read_subpkgdata(pkg, d) | ||
| 138 | key = "PKG:%s" % pkg | ||
| 139 | |||
| 140 | if key in data: | ||
| 141 | if bb.data.inherits_class('allarch', d) and bb.data.inherits_class('packagegroup', d) and pkg != data[key]: | ||
| 142 | bb.error("An allarch packagegroup shouldn't depend on packages which are dynamically renamed (%s to %s)" % (pkg, data[key])) | ||
| 143 | # Have to avoid undoing the write_extra_pkgs(global_variants...) | ||
| 144 | if bb.data.inherits_class('allarch', d) and not d.getVar('MULTILIB_VARIANTS') \ | ||
| 145 | and data[key] == basepkg: | ||
| 146 | return pkg | ||
| 147 | if depversions == []: | ||
| 148 | # Avoid returning a mapping if the renamed package rprovides its original name | ||
| 149 | rprovkey = "RPROVIDES:%s" % pkg | ||
| 150 | if rprovkey in data: | ||
| 151 | if pkg in bb.utils.explode_dep_versions2(data[rprovkey]): | ||
| 152 | bb.note("%s rprovides %s, not replacing the latter" % (data[key], pkg)) | ||
| 153 | return pkg | ||
| 154 | # Do map to rewritten package name | ||
| 155 | return data[key] | ||
| 156 | |||
| 157 | return pkg | ||
| 158 | |||
| 159 | def get_package_additional_metadata(pkg_type, d): | ||
| 160 | base_key = "PACKAGE_ADD_METADATA" | ||
| 161 | for key in ("%s_%s" % (base_key, pkg_type.upper()), base_key): | ||
| 162 | if d.getVar(key, False) is None: | ||
| 163 | continue | ||
| 164 | d.setVarFlag(key, "type", "list") | ||
| 165 | if d.getVarFlag(key, "separator") is None: | ||
| 166 | d.setVarFlag(key, "separator", "\\n") | ||
| 167 | metadata_fields = [field.strip() for field in oe.data.typed_value(key, d)] | ||
| 168 | return "\n".join(metadata_fields).strip() | ||
| 169 | |||
| 170 | def runtime_mapping_rename(varname, pkg, d): | ||
| 171 | #bb.note("%s before: %s" % (varname, d.getVar(varname))) | ||
| 172 | |||
| 173 | new_depends = {} | ||
| 174 | deps = bb.utils.explode_dep_versions2(d.getVar(varname) or "") | ||
| 175 | for depend, depversions in deps.items(): | ||
| 176 | new_depend = get_package_mapping(depend, pkg, d, depversions) | ||
| 177 | if depend != new_depend: | ||
| 178 | bb.note("package name mapping done: %s -> %s" % (depend, new_depend)) | ||
| 179 | new_depends[new_depend] = deps[depend] | ||
| 180 | |||
| 181 | d.setVar(varname, bb.utils.join_deps(new_depends, commasep=False)) | ||
| 182 | |||
| 183 | #bb.note("%s after: %s" % (varname, d.getVar(varname))) | ||
| 184 | |||
| 185 | def emit_pkgdata(pkgfiles, d): | ||
| 186 | def process_postinst_on_target(pkg, mlprefix): | ||
| 187 | pkgval = d.getVar('PKG:%s' % pkg) | ||
| 188 | if pkgval is None: | ||
| 189 | pkgval = pkg | ||
| 190 | |||
| 191 | defer_fragment = """ | ||
| 192 | if [ -n "$D" ]; then | ||
| 193 | $INTERCEPT_DIR/postinst_intercept delay_to_first_boot %s mlprefix=%s | ||
| 194 | exit 0 | ||
| 195 | fi | ||
| 196 | """ % (pkgval, mlprefix) | ||
| 197 | |||
| 198 | postinst = d.getVar('pkg_postinst:%s' % pkg) | ||
| 199 | postinst_ontarget = d.getVar('pkg_postinst_ontarget:%s' % pkg) | ||
| 200 | |||
| 201 | if postinst_ontarget: | ||
| 202 | bb.debug(1, 'adding deferred pkg_postinst_ontarget() to pkg_postinst() for %s' % pkg) | ||
| 203 | if not postinst: | ||
| 204 | postinst = '#!/bin/sh\n' | ||
| 205 | postinst += defer_fragment | ||
| 206 | postinst += postinst_ontarget | ||
| 207 | d.setVar('pkg_postinst:%s' % pkg, postinst) | ||
| 208 | |||
| 209 | def add_set_e_to_scriptlets(pkg): | ||
| 210 | for scriptlet_name in ('pkg_preinst', 'pkg_postinst', 'pkg_prerm', 'pkg_postrm'): | ||
| 211 | scriptlet = d.getVar('%s:%s' % (scriptlet_name, pkg)) | ||
| 212 | if scriptlet: | ||
| 213 | scriptlet_split = scriptlet.split('\n') | ||
| 214 | if scriptlet_split[0].startswith("#!"): | ||
| 215 | scriptlet = scriptlet_split[0] + "\nset -e\n" + "\n".join(scriptlet_split[1:]) | ||
| 216 | else: | ||
| 217 | scriptlet = "set -e\n" + "\n".join(scriptlet_split[0:]) | ||
| 218 | d.setVar('%s:%s' % (scriptlet_name, pkg), scriptlet) | ||
| 219 | |||
| 220 | def write_if_exists(f, pkg, var): | ||
| 221 | def encode(str): | ||
| 222 | import codecs | ||
| 223 | c = codecs.getencoder("unicode_escape") | ||
| 224 | return c(str)[0].decode("latin1") | ||
| 225 | |||
| 226 | val = d.getVar('%s:%s' % (var, pkg)) | ||
| 227 | if val: | ||
| 228 | f.write('%s:%s: %s\n' % (var, pkg, encode(val))) | ||
| 229 | return val | ||
| 230 | val = d.getVar('%s' % (var)) | ||
| 231 | if val: | ||
| 232 | f.write('%s: %s\n' % (var, encode(val))) | ||
| 233 | return val | ||
| 234 | |||
| 235 | def write_extra_pkgs(variants, pn, packages, pkgdatadir): | ||
| 236 | for variant in variants: | ||
| 237 | with open("%s/%s-%s" % (pkgdatadir, variant, pn), 'w') as fd: | ||
| 238 | fd.write("PACKAGES: %s\n" % ' '.join( | ||
| 239 | map(lambda pkg: '%s-%s' % (variant, pkg), packages.split()))) | ||
| 240 | |||
| 241 | def write_extra_runtime_pkgs(variants, packages, pkgdatadir): | ||
| 242 | for variant in variants: | ||
| 243 | for pkg in packages.split(): | ||
| 244 | ml_pkg = "%s-%s" % (variant, pkg) | ||
| 245 | subdata_file = "%s/runtime/%s" % (pkgdatadir, ml_pkg) | ||
| 246 | with open(subdata_file, 'w') as fd: | ||
| 247 | fd.write("PKG:%s: %s" % (ml_pkg, pkg)) | ||
| 248 | |||
| 249 | packages = d.getVar('PACKAGES') | ||
| 250 | pkgdest = d.getVar('PKGDEST') | ||
| 251 | pkgdatadir = d.getVar('PKGDESTWORK') | ||
| 252 | |||
| 253 | data_file = pkgdatadir + d.expand("/${PN}") | ||
| 254 | with open(data_file, 'w') as fd: | ||
| 255 | fd.write("PACKAGES: %s\n" % packages) | ||
| 256 | |||
| 257 | pkgdebugsource = d.getVar("PKGDEBUGSOURCES") or [] | ||
| 258 | |||
| 259 | pn = d.getVar('PN') | ||
| 260 | global_variants = (d.getVar('MULTILIB_GLOBAL_VARIANTS') or "").split() | ||
| 261 | variants = (d.getVar('MULTILIB_VARIANTS') or "").split() | ||
| 262 | |||
| 263 | if bb.data.inherits_class('kernel', d) or bb.data.inherits_class('module-base', d): | ||
| 264 | write_extra_pkgs(variants, pn, packages, pkgdatadir) | ||
| 265 | |||
| 266 | if bb.data.inherits_class('allarch', d) and not variants \ | ||
| 267 | and not bb.data.inherits_class('packagegroup', d): | ||
| 268 | write_extra_pkgs(global_variants, pn, packages, pkgdatadir) | ||
| 269 | |||
| 270 | workdir = d.getVar('WORKDIR') | ||
| 271 | |||
| 272 | for pkg in packages.split(): | ||
| 273 | pkgval = d.getVar('PKG:%s' % pkg) | ||
| 274 | if pkgval is None: | ||
| 275 | pkgval = pkg | ||
| 276 | d.setVar('PKG:%s' % pkg, pkg) | ||
| 277 | |||
| 278 | extended_data = { | ||
| 279 | "files_info": {} | ||
| 280 | } | ||
| 281 | |||
| 282 | pkgdestpkg = os.path.join(pkgdest, pkg) | ||
| 283 | files = {} | ||
| 284 | files_extra = {} | ||
| 285 | total_size = 0 | ||
| 286 | seen = set() | ||
| 287 | for f in pkgfiles[pkg]: | ||
| 288 | fpath = os.sep + os.path.relpath(f, pkgdestpkg) | ||
| 289 | |||
| 290 | fstat = os.lstat(f) | ||
| 291 | files[fpath] = fstat.st_size | ||
| 292 | |||
| 293 | extended_data["files_info"].setdefault(fpath, {}) | ||
| 294 | extended_data["files_info"][fpath]['size'] = fstat.st_size | ||
| 295 | |||
| 296 | if fstat.st_ino not in seen: | ||
| 297 | seen.add(fstat.st_ino) | ||
| 298 | total_size += fstat.st_size | ||
| 299 | |||
| 300 | if fpath in pkgdebugsource: | ||
| 301 | extended_data["files_info"][fpath]['debugsrc'] = pkgdebugsource[fpath] | ||
| 302 | del pkgdebugsource[fpath] | ||
| 303 | |||
| 304 | d.setVar('FILES_INFO:' + pkg , json.dumps(files, sort_keys=True)) | ||
| 305 | |||
| 306 | process_postinst_on_target(pkg, d.getVar("MLPREFIX")) | ||
| 307 | add_set_e_to_scriptlets(pkg) | ||
| 308 | |||
| 309 | subdata_file = pkgdatadir + "/runtime/%s" % pkg | ||
| 310 | with open(subdata_file, 'w') as sf: | ||
| 311 | for var in (d.getVar('PKGDATA_VARS') or "").split(): | ||
| 312 | val = write_if_exists(sf, pkg, var) | ||
| 313 | |||
| 314 | write_if_exists(sf, pkg, 'FILERPROVIDESFLIST') | ||
| 315 | for dfile in sorted((d.getVar('FILERPROVIDESFLIST:' + pkg) or "").split()): | ||
| 316 | write_if_exists(sf, pkg, 'FILERPROVIDES:' + dfile) | ||
| 317 | |||
| 318 | write_if_exists(sf, pkg, 'FILERDEPENDSFLIST') | ||
| 319 | for dfile in sorted((d.getVar('FILERDEPENDSFLIST:' + pkg) or "").split()): | ||
| 320 | write_if_exists(sf, pkg, 'FILERDEPENDS:' + dfile) | ||
| 321 | |||
| 322 | sf.write('%s:%s: %d\n' % ('PKGSIZE', pkg, total_size)) | ||
| 323 | |||
| 324 | subdata_extended_file = pkgdatadir + "/extended/%s.json.zstd" % pkg | ||
| 325 | num_threads = int(d.getVar("BB_NUMBER_THREADS")) | ||
| 326 | with bb.compress.zstd.open(subdata_extended_file, "wt", encoding="utf-8", num_threads=num_threads) as f: | ||
| 327 | json.dump(extended_data, f, sort_keys=True, separators=(",", ":")) | ||
| 328 | |||
| 329 | # Symlinks needed for rprovides lookup | ||
| 330 | rprov = d.getVar('RPROVIDES:%s' % pkg) or d.getVar('RPROVIDES') | ||
| 331 | if rprov: | ||
| 332 | for p in bb.utils.explode_deps(rprov): | ||
| 333 | subdata_sym = pkgdatadir + "/runtime-rprovides/%s/%s" % (p, pkg) | ||
| 334 | bb.utils.mkdirhier(os.path.dirname(subdata_sym)) | ||
| 335 | oe.path.relsymlink(subdata_file, subdata_sym, True) | ||
| 336 | |||
| 337 | allow_empty = d.getVar('ALLOW_EMPTY:%s' % pkg) | ||
| 338 | if not allow_empty: | ||
| 339 | allow_empty = d.getVar('ALLOW_EMPTY') | ||
| 340 | root = "%s/%s" % (pkgdest, pkg) | ||
| 341 | os.chdir(root) | ||
| 342 | g = glob('*') | ||
| 343 | if g or allow_empty == "1": | ||
| 344 | # Symlinks needed for reverse lookups (from the final package name) | ||
| 345 | subdata_sym = pkgdatadir + "/runtime-reverse/%s" % pkgval | ||
| 346 | oe.path.relsymlink(subdata_file, subdata_sym, True) | ||
| 347 | |||
| 348 | packagedfile = pkgdatadir + '/runtime/%s.packaged' % pkg | ||
| 349 | open(packagedfile, 'w').close() | ||
| 350 | |||
| 351 | if bb.data.inherits_class('kernel', d) or bb.data.inherits_class('module-base', d): | ||
| 352 | write_extra_runtime_pkgs(variants, packages, pkgdatadir) | ||
| 353 | |||
| 354 | if bb.data.inherits_class('allarch', d) and not variants \ | ||
| 355 | and not bb.data.inherits_class('packagegroup', d): | ||
| 356 | write_extra_runtime_pkgs(global_variants, packages, pkgdatadir) | ||
| 357 | |||
| 358 | def mapping_rename_hook(d): | ||
| 359 | """ | ||
| 360 | Rewrite variables to account for package renaming in things | ||
| 361 | like debian.bbclass or manual PKG variable name changes | ||
| 362 | """ | ||
| 363 | pkg = d.getVar("PKG") | ||
| 364 | oe.packagedata.runtime_mapping_rename("RDEPENDS", pkg, d) | ||
| 365 | oe.packagedata.runtime_mapping_rename("RRECOMMENDS", pkg, d) | ||
| 366 | oe.packagedata.runtime_mapping_rename("RSUGGESTS", pkg, d) | ||
diff --git a/meta-xilinx-core/lib/oe/packagegroup.py b/meta-xilinx-core/lib/oe/packagegroup.py new file mode 100644 index 00000000..7b759475 --- /dev/null +++ b/meta-xilinx-core/lib/oe/packagegroup.py | |||
| @@ -0,0 +1,36 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: GPL-2.0-only | ||
| 5 | # | ||
| 6 | |||
| 7 | import itertools | ||
| 8 | |||
| 9 | def is_optional(feature, d): | ||
| 10 | return bool(d.getVarFlag("FEATURE_PACKAGES_%s" % feature, "optional")) | ||
| 11 | |||
| 12 | def packages(features, d): | ||
| 13 | for feature in features: | ||
| 14 | packages = d.getVar("FEATURE_PACKAGES_%s" % feature) | ||
| 15 | for pkg in (packages or "").split(): | ||
| 16 | yield pkg | ||
| 17 | |||
| 18 | def required_packages(features, d): | ||
| 19 | req = [feature for feature in features if not is_optional(feature, d)] | ||
| 20 | return packages(req, d) | ||
| 21 | |||
| 22 | def optional_packages(features, d): | ||
| 23 | opt = [feature for feature in features if is_optional(feature, d)] | ||
| 24 | return packages(opt, d) | ||
| 25 | |||
| 26 | def active_packages(features, d): | ||
| 27 | return itertools.chain(required_packages(features, d), | ||
| 28 | optional_packages(features, d)) | ||
| 29 | |||
| 30 | def active_recipes(features, d): | ||
| 31 | import oe.packagedata | ||
| 32 | |||
| 33 | for pkg in active_packages(features, d): | ||
| 34 | recipe = oe.packagedata.recipename(pkg, d) | ||
| 35 | if recipe: | ||
| 36 | yield recipe | ||
diff --git a/meta-xilinx-core/lib/oe/patch.py b/meta-xilinx-core/lib/oe/patch.py new file mode 100644 index 00000000..60a0cc82 --- /dev/null +++ b/meta-xilinx-core/lib/oe/patch.py | |||
| @@ -0,0 +1,1001 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: GPL-2.0-only | ||
| 5 | # | ||
| 6 | |||
| 7 | import os | ||
| 8 | import shlex | ||
| 9 | import subprocess | ||
| 10 | import oe.path | ||
| 11 | import oe.types | ||
| 12 | |||
| 13 | class NotFoundError(bb.BBHandledException): | ||
| 14 | def __init__(self, path): | ||
| 15 | self.path = path | ||
| 16 | |||
| 17 | def __str__(self): | ||
| 18 | return "Error: %s not found." % self.path | ||
| 19 | |||
| 20 | class CmdError(bb.BBHandledException): | ||
| 21 | def __init__(self, command, exitstatus, output): | ||
| 22 | self.command = command | ||
| 23 | self.status = exitstatus | ||
| 24 | self.output = output | ||
| 25 | |||
| 26 | def __str__(self): | ||
| 27 | return "Command Error: '%s' exited with %d Output:\n%s" % \ | ||
| 28 | (self.command, self.status, self.output) | ||
| 29 | |||
| 30 | |||
| 31 | def runcmd(args, dir = None): | ||
| 32 | if dir: | ||
| 33 | olddir = os.path.abspath(os.curdir) | ||
| 34 | if not os.path.exists(dir): | ||
| 35 | raise NotFoundError(dir) | ||
| 36 | os.chdir(dir) | ||
| 37 | # print("cwd: %s -> %s" % (olddir, dir)) | ||
| 38 | |||
| 39 | try: | ||
| 40 | args = [ shlex.quote(str(arg)) for arg in args ] | ||
| 41 | cmd = " ".join(args) | ||
| 42 | # print("cmd: %s" % cmd) | ||
| 43 | proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) | ||
| 44 | stdout, stderr = proc.communicate() | ||
| 45 | stdout = stdout.decode('utf-8') | ||
| 46 | stderr = stderr.decode('utf-8') | ||
| 47 | exitstatus = proc.returncode | ||
| 48 | if exitstatus != 0: | ||
| 49 | raise CmdError(cmd, exitstatus >> 8, "stdout: %s\nstderr: %s" % (stdout, stderr)) | ||
| 50 | if " fuzz " in stdout and "Hunk " in stdout: | ||
| 51 | # Drop patch fuzz info with header and footer to log file so | ||
| 52 | # insane.bbclass can handle to throw error/warning | ||
| 53 | bb.note("--- Patch fuzz start ---\n%s\n--- Patch fuzz end ---" % format(stdout)) | ||
| 54 | |||
| 55 | return stdout | ||
| 56 | |||
| 57 | finally: | ||
| 58 | if dir: | ||
| 59 | os.chdir(olddir) | ||
| 60 | |||
| 61 | |||
| 62 | class PatchError(Exception): | ||
| 63 | def __init__(self, msg): | ||
| 64 | self.msg = msg | ||
| 65 | |||
| 66 | def __str__(self): | ||
| 67 | return "Patch Error: %s" % self.msg | ||
| 68 | |||
| 69 | class PatchSet(object): | ||
| 70 | defaults = { | ||
| 71 | "strippath": 1 | ||
| 72 | } | ||
| 73 | |||
| 74 | def __init__(self, dir, d): | ||
| 75 | self.dir = dir | ||
| 76 | self.d = d | ||
| 77 | self.patches = [] | ||
| 78 | self._current = None | ||
| 79 | |||
| 80 | def current(self): | ||
| 81 | return self._current | ||
| 82 | |||
| 83 | def Clean(self): | ||
| 84 | """ | ||
| 85 | Clean out the patch set. Generally includes unapplying all | ||
| 86 | patches and wiping out all associated metadata. | ||
| 87 | """ | ||
| 88 | raise NotImplementedError() | ||
| 89 | |||
| 90 | def Import(self, patch, force): | ||
| 91 | if not patch.get("file"): | ||
| 92 | if not patch.get("remote"): | ||
| 93 | raise PatchError("Patch file must be specified in patch import.") | ||
| 94 | else: | ||
| 95 | patch["file"] = bb.fetch2.localpath(patch["remote"], self.d) | ||
| 96 | |||
| 97 | for param in PatchSet.defaults: | ||
| 98 | if not patch.get(param): | ||
| 99 | patch[param] = PatchSet.defaults[param] | ||
| 100 | |||
| 101 | if patch.get("remote"): | ||
| 102 | patch["file"] = self.d.expand(bb.fetch2.localpath(patch["remote"], self.d)) | ||
| 103 | |||
| 104 | patch["filemd5"] = bb.utils.md5_file(patch["file"]) | ||
| 105 | |||
| 106 | def Push(self, force): | ||
| 107 | raise NotImplementedError() | ||
| 108 | |||
| 109 | def Pop(self, force): | ||
| 110 | raise NotImplementedError() | ||
| 111 | |||
| 112 | def Refresh(self, remote = None, all = None): | ||
| 113 | raise NotImplementedError() | ||
| 114 | |||
| 115 | @staticmethod | ||
| 116 | def getPatchedFiles(patchfile, striplevel, srcdir=None): | ||
| 117 | """ | ||
| 118 | Read a patch file and determine which files it will modify. | ||
| 119 | Params: | ||
| 120 | patchfile: the patch file to read | ||
| 121 | striplevel: the strip level at which the patch is going to be applied | ||
| 122 | srcdir: optional path to join onto the patched file paths | ||
| 123 | Returns: | ||
| 124 | A list of tuples of file path and change mode ('A' for add, | ||
| 125 | 'D' for delete or 'M' for modify) | ||
| 126 | """ | ||
| 127 | |||
| 128 | def patchedpath(patchline): | ||
| 129 | filepth = patchline.split()[1] | ||
| 130 | if filepth.endswith('/dev/null'): | ||
| 131 | return '/dev/null' | ||
| 132 | filesplit = filepth.split(os.sep) | ||
| 133 | if striplevel > len(filesplit): | ||
| 134 | bb.error('Patch %s has invalid strip level %d' % (patchfile, striplevel)) | ||
| 135 | return None | ||
| 136 | return os.sep.join(filesplit[striplevel:]) | ||
| 137 | |||
| 138 | for encoding in ['utf-8', 'latin-1']: | ||
| 139 | try: | ||
| 140 | copiedmode = False | ||
| 141 | filelist = [] | ||
| 142 | with open(patchfile) as f: | ||
| 143 | for line in f: | ||
| 144 | if line.startswith('--- '): | ||
| 145 | patchpth = patchedpath(line) | ||
| 146 | if not patchpth: | ||
| 147 | break | ||
| 148 | if copiedmode: | ||
| 149 | addedfile = patchpth | ||
| 150 | else: | ||
| 151 | removedfile = patchpth | ||
| 152 | elif line.startswith('+++ '): | ||
| 153 | addedfile = patchedpath(line) | ||
| 154 | if not addedfile: | ||
| 155 | break | ||
| 156 | elif line.startswith('*** '): | ||
| 157 | copiedmode = True | ||
| 158 | removedfile = patchedpath(line) | ||
| 159 | if not removedfile: | ||
| 160 | break | ||
| 161 | else: | ||
| 162 | removedfile = None | ||
| 163 | addedfile = None | ||
| 164 | |||
| 165 | if addedfile and removedfile: | ||
| 166 | if removedfile == '/dev/null': | ||
| 167 | mode = 'A' | ||
| 168 | elif addedfile == '/dev/null': | ||
| 169 | mode = 'D' | ||
| 170 | else: | ||
| 171 | mode = 'M' | ||
| 172 | if srcdir: | ||
| 173 | fullpath = os.path.abspath(os.path.join(srcdir, addedfile)) | ||
| 174 | else: | ||
| 175 | fullpath = addedfile | ||
| 176 | filelist.append((fullpath, mode)) | ||
| 177 | except UnicodeDecodeError: | ||
| 178 | continue | ||
| 179 | break | ||
| 180 | else: | ||
| 181 | raise PatchError('Unable to decode %s' % patchfile) | ||
| 182 | |||
| 183 | return filelist | ||
| 184 | |||
| 185 | |||
| 186 | class PatchTree(PatchSet): | ||
| 187 | def __init__(self, dir, d): | ||
| 188 | PatchSet.__init__(self, dir, d) | ||
| 189 | self.patchdir = os.path.join(self.dir, 'patches') | ||
| 190 | self.seriespath = os.path.join(self.dir, 'patches', 'series') | ||
| 191 | bb.utils.mkdirhier(self.patchdir) | ||
| 192 | |||
| 193 | def _appendPatchFile(self, patch, strippath): | ||
| 194 | with open(self.seriespath, 'a') as f: | ||
| 195 | f.write(os.path.basename(patch) + "," + strippath + "\n") | ||
| 196 | shellcmd = ["cat", patch, ">" , self.patchdir + "/" + os.path.basename(patch)] | ||
| 197 | runcmd(["sh", "-c", " ".join(shellcmd)], self.dir) | ||
| 198 | |||
| 199 | def _removePatch(self, p): | ||
| 200 | patch = {} | ||
| 201 | patch['file'] = p.split(",")[0] | ||
| 202 | patch['strippath'] = p.split(",")[1] | ||
| 203 | self._applypatch(patch, False, True) | ||
| 204 | |||
| 205 | def _removePatchFile(self, all = False): | ||
| 206 | if not os.path.exists(self.seriespath): | ||
| 207 | return | ||
| 208 | with open(self.seriespath, 'r+') as f: | ||
| 209 | patches = f.readlines() | ||
| 210 | if all: | ||
| 211 | for p in reversed(patches): | ||
| 212 | self._removePatch(os.path.join(self.patchdir, p.strip())) | ||
| 213 | patches = [] | ||
| 214 | else: | ||
| 215 | self._removePatch(os.path.join(self.patchdir, patches[-1].strip())) | ||
| 216 | patches.pop() | ||
| 217 | with open(self.seriespath, 'w') as f: | ||
| 218 | for p in patches: | ||
| 219 | f.write(p) | ||
| 220 | |||
| 221 | def Import(self, patch, force = None): | ||
| 222 | """""" | ||
| 223 | PatchSet.Import(self, patch, force) | ||
| 224 | |||
| 225 | if self._current is not None: | ||
| 226 | i = self._current + 1 | ||
| 227 | else: | ||
| 228 | i = 0 | ||
| 229 | self.patches.insert(i, patch) | ||
| 230 | |||
| 231 | def _applypatch(self, patch, force = False, reverse = False, run = True): | ||
| 232 | shellcmd = ["cat", patch['file'], "|", "patch", "--no-backup-if-mismatch", "-p", patch['strippath']] | ||
| 233 | if reverse: | ||
| 234 | shellcmd.append('-R') | ||
| 235 | |||
| 236 | if not run: | ||
| 237 | return "sh" + "-c" + " ".join(shellcmd) | ||
| 238 | |||
| 239 | if not force: | ||
| 240 | shellcmd.append('--dry-run') | ||
| 241 | |||
| 242 | try: | ||
| 243 | output = runcmd(["sh", "-c", " ".join(shellcmd)], self.dir) | ||
| 244 | |||
| 245 | if force: | ||
| 246 | return | ||
| 247 | |||
| 248 | shellcmd.pop(len(shellcmd) - 1) | ||
| 249 | output = runcmd(["sh", "-c", " ".join(shellcmd)], self.dir) | ||
| 250 | except CmdError as err: | ||
| 251 | raise bb.BBHandledException("Applying '%s' failed:\n%s" % | ||
| 252 | (os.path.basename(patch['file']), err.output)) | ||
| 253 | |||
| 254 | if not reverse: | ||
| 255 | self._appendPatchFile(patch['file'], patch['strippath']) | ||
| 256 | |||
| 257 | return output | ||
| 258 | |||
| 259 | def Push(self, force = False, all = False, run = True): | ||
| 260 | bb.note("self._current is %s" % self._current) | ||
| 261 | bb.note("patches is %s" % self.patches) | ||
| 262 | if all: | ||
| 263 | for i in self.patches: | ||
| 264 | bb.note("applying patch %s" % i) | ||
| 265 | self._applypatch(i, force) | ||
| 266 | self._current = i | ||
| 267 | else: | ||
| 268 | if self._current is not None: | ||
| 269 | next = self._current + 1 | ||
| 270 | else: | ||
| 271 | next = 0 | ||
| 272 | |||
| 273 | bb.note("applying patch %s" % self.patches[next]) | ||
| 274 | ret = self._applypatch(self.patches[next], force) | ||
| 275 | |||
| 276 | self._current = next | ||
| 277 | return ret | ||
| 278 | |||
| 279 | def Pop(self, force = None, all = None): | ||
| 280 | if all: | ||
| 281 | self._removePatchFile(True) | ||
| 282 | self._current = None | ||
| 283 | else: | ||
| 284 | self._removePatchFile(False) | ||
| 285 | |||
| 286 | if self._current == 0: | ||
| 287 | self._current = None | ||
| 288 | |||
| 289 | if self._current is not None: | ||
| 290 | self._current = self._current - 1 | ||
| 291 | |||
| 292 | def Clean(self): | ||
| 293 | """""" | ||
| 294 | self.Pop(all=True) | ||
| 295 | |||
| 296 | class GitApplyTree(PatchTree): | ||
| 297 | notes_ref = "refs/notes/devtool" | ||
| 298 | original_patch = 'original patch' | ||
| 299 | ignore_commit = 'ignore' | ||
| 300 | |||
| 301 | def __init__(self, dir, d): | ||
| 302 | PatchTree.__init__(self, dir, d) | ||
| 303 | self.commituser = d.getVar('PATCH_GIT_USER_NAME') | ||
| 304 | self.commitemail = d.getVar('PATCH_GIT_USER_EMAIL') | ||
| 305 | if not self._isInitialized(d): | ||
| 306 | self._initRepo() | ||
| 307 | |||
| 308 | def _isInitialized(self, d): | ||
| 309 | cmd = "git rev-parse --show-toplevel" | ||
| 310 | try: | ||
| 311 | output = runcmd(cmd.split(), self.dir).strip() | ||
| 312 | except CmdError as err: | ||
| 313 | ## runcmd returned non-zero which most likely means 128 | ||
| 314 | ## Not a git directory | ||
| 315 | return False | ||
| 316 | ## Make sure repo is in builddir to not break top-level git repos, or under workdir | ||
| 317 | return os.path.samefile(output, self.dir) or oe.path.is_path_parent(d.getVar('WORKDIR'), output) | ||
| 318 | |||
| 319 | def _initRepo(self): | ||
| 320 | runcmd("git init".split(), self.dir) | ||
| 321 | runcmd("git add .".split(), self.dir) | ||
| 322 | runcmd("git commit -a --allow-empty -m bitbake_patching_started".split(), self.dir) | ||
| 323 | |||
| 324 | @staticmethod | ||
| 325 | def extractPatchHeader(patchfile): | ||
| 326 | """ | ||
| 327 | Extract just the header lines from the top of a patch file | ||
| 328 | """ | ||
| 329 | for encoding in ['utf-8', 'latin-1']: | ||
| 330 | lines = [] | ||
| 331 | try: | ||
| 332 | with open(patchfile, 'r', encoding=encoding) as f: | ||
| 333 | for line in f: | ||
| 334 | if line.startswith('Index: ') or line.startswith('diff -') or line.startswith('---'): | ||
| 335 | break | ||
| 336 | lines.append(line) | ||
| 337 | except UnicodeDecodeError: | ||
| 338 | continue | ||
| 339 | break | ||
| 340 | else: | ||
| 341 | raise PatchError('Unable to find a character encoding to decode %s' % patchfile) | ||
| 342 | return lines | ||
| 343 | |||
| 344 | @staticmethod | ||
| 345 | def decodeAuthor(line): | ||
| 346 | from email.header import decode_header | ||
| 347 | authorval = line.split(':', 1)[1].strip().replace('"', '') | ||
| 348 | result = decode_header(authorval)[0][0] | ||
| 349 | if hasattr(result, 'decode'): | ||
| 350 | result = result.decode('utf-8') | ||
| 351 | return result | ||
| 352 | |||
| 353 | @staticmethod | ||
| 354 | def interpretPatchHeader(headerlines): | ||
| 355 | import re | ||
| 356 | author_re = re.compile(r'[\S ]+ <\S+@\S+\.\S+>') | ||
| 357 | from_commit_re = re.compile(r'^From [a-z0-9]{40} .*') | ||
| 358 | outlines = [] | ||
| 359 | author = None | ||
| 360 | date = None | ||
| 361 | subject = None | ||
| 362 | for line in headerlines: | ||
| 363 | if line.startswith('Subject: '): | ||
| 364 | subject = line.split(':', 1)[1] | ||
| 365 | # Remove any [PATCH][oe-core] etc. | ||
| 366 | subject = re.sub(r'\[.+?\]\s*', '', subject) | ||
| 367 | continue | ||
| 368 | elif line.startswith('From: ') or line.startswith('Author: '): | ||
| 369 | authorval = GitApplyTree.decodeAuthor(line) | ||
| 370 | # git is fussy about author formatting i.e. it must be Name <email@domain> | ||
| 371 | if author_re.match(authorval): | ||
| 372 | author = authorval | ||
| 373 | continue | ||
| 374 | elif line.startswith('Date: '): | ||
| 375 | if date is None: | ||
| 376 | dateval = line.split(':', 1)[1].strip() | ||
| 377 | # Very crude check for date format, since git will blow up if it's not in the right | ||
| 378 | # format. Without e.g. a python-dateutils dependency we can't do a whole lot more | ||
| 379 | if len(dateval) > 12: | ||
| 380 | date = dateval | ||
| 381 | continue | ||
| 382 | elif not author and line.lower().startswith('signed-off-by: '): | ||
| 383 | authorval = GitApplyTree.decodeAuthor(line) | ||
| 384 | # git is fussy about author formatting i.e. it must be Name <email@domain> | ||
| 385 | if author_re.match(authorval): | ||
| 386 | author = authorval | ||
| 387 | elif from_commit_re.match(line): | ||
| 388 | # We don't want the From <commit> line - if it's present it will break rebasing | ||
| 389 | continue | ||
| 390 | outlines.append(line) | ||
| 391 | |||
| 392 | if not subject: | ||
| 393 | firstline = None | ||
| 394 | for line in headerlines: | ||
| 395 | line = line.strip() | ||
| 396 | if firstline: | ||
| 397 | if line: | ||
| 398 | # Second line is not blank, the first line probably isn't usable | ||
| 399 | firstline = None | ||
| 400 | break | ||
| 401 | elif line: | ||
| 402 | firstline = line | ||
| 403 | if firstline and not firstline.startswith(('#', 'Index:', 'Upstream-Status:')) and len(firstline) < 100: | ||
| 404 | subject = firstline | ||
| 405 | |||
| 406 | return outlines, author, date, subject | ||
| 407 | |||
| 408 | @staticmethod | ||
| 409 | def gitCommandUserOptions(cmd, commituser=None, commitemail=None, d=None): | ||
| 410 | if d: | ||
| 411 | commituser = d.getVar('PATCH_GIT_USER_NAME') | ||
| 412 | commitemail = d.getVar('PATCH_GIT_USER_EMAIL') | ||
| 413 | if commituser: | ||
| 414 | cmd += ['-c', 'user.name="%s"' % commituser] | ||
| 415 | if commitemail: | ||
| 416 | cmd += ['-c', 'user.email="%s"' % commitemail] | ||
| 417 | |||
| 418 | @staticmethod | ||
| 419 | def prepareCommit(patchfile, commituser=None, commitemail=None): | ||
| 420 | """ | ||
| 421 | Prepare a git commit command line based on the header from a patch file | ||
| 422 | (typically this is useful for patches that cannot be applied with "git am" due to formatting) | ||
| 423 | """ | ||
| 424 | import tempfile | ||
| 425 | # Process patch header and extract useful information | ||
| 426 | lines = GitApplyTree.extractPatchHeader(patchfile) | ||
| 427 | outlines, author, date, subject = GitApplyTree.interpretPatchHeader(lines) | ||
| 428 | if not author or not subject or not date: | ||
| 429 | try: | ||
| 430 | shellcmd = ["git", "log", "--format=email", "--follow", "--diff-filter=A", "--", patchfile] | ||
| 431 | out = runcmd(["sh", "-c", " ".join(shellcmd)], os.path.dirname(patchfile)) | ||
| 432 | except CmdError: | ||
| 433 | out = None | ||
| 434 | if out: | ||
| 435 | _, newauthor, newdate, newsubject = GitApplyTree.interpretPatchHeader(out.splitlines()) | ||
| 436 | if not author: | ||
| 437 | # If we're setting the author then the date should be set as well | ||
| 438 | author = newauthor | ||
| 439 | date = newdate | ||
| 440 | elif not date: | ||
| 441 | # If we don't do this we'll get the current date, at least this will be closer | ||
| 442 | date = newdate | ||
| 443 | if not subject: | ||
| 444 | subject = newsubject | ||
| 445 | if subject and not (outlines and outlines[0].strip() == subject): | ||
| 446 | outlines.insert(0, '%s\n\n' % subject.strip()) | ||
| 447 | |||
| 448 | # Write out commit message to a file | ||
| 449 | with tempfile.NamedTemporaryFile('w', delete=False) as tf: | ||
| 450 | tmpfile = tf.name | ||
| 451 | for line in outlines: | ||
| 452 | tf.write(line) | ||
| 453 | # Prepare git command | ||
| 454 | cmd = ["git"] | ||
| 455 | GitApplyTree.gitCommandUserOptions(cmd, commituser, commitemail) | ||
| 456 | cmd += ["commit", "-F", tmpfile, "--no-verify"] | ||
| 457 | # git doesn't like plain email addresses as authors | ||
| 458 | if author and '<' in author: | ||
| 459 | cmd.append('--author="%s"' % author) | ||
| 460 | if date: | ||
| 461 | cmd.append('--date="%s"' % date) | ||
| 462 | return (tmpfile, cmd) | ||
| 463 | |||
| 464 | @staticmethod | ||
| 465 | def addNote(repo, ref, key, value=None): | ||
| 466 | note = key + (": %s" % value if value else "") | ||
| 467 | notes_ref = GitApplyTree.notes_ref | ||
| 468 | runcmd(["git", "config", "notes.rewriteMode", "ignore"], repo) | ||
| 469 | runcmd(["git", "config", "notes.displayRef", notes_ref, notes_ref], repo) | ||
| 470 | runcmd(["git", "config", "notes.rewriteRef", notes_ref, notes_ref], repo) | ||
| 471 | runcmd(["git", "notes", "--ref", notes_ref, "append", "-m", note, ref], repo) | ||
| 472 | |||
| 473 | @staticmethod | ||
| 474 | def removeNote(repo, ref, key): | ||
| 475 | notes = GitApplyTree.getNotes(repo, ref) | ||
| 476 | notes = {k: v for k, v in notes.items() if k != key and not k.startswith(key + ":")} | ||
| 477 | runcmd(["git", "notes", "--ref", GitApplyTree.notes_ref, "remove", "--ignore-missing", ref], repo) | ||
| 478 | for note, value in notes.items(): | ||
| 479 | GitApplyTree.addNote(repo, ref, note, value) | ||
| 480 | |||
| 481 | @staticmethod | ||
| 482 | def getNotes(repo, ref): | ||
| 483 | import re | ||
| 484 | |||
| 485 | note = None | ||
| 486 | try: | ||
| 487 | note = runcmd(["git", "notes", "--ref", GitApplyTree.notes_ref, "show", ref], repo) | ||
| 488 | prefix = "" | ||
| 489 | except CmdError: | ||
| 490 | note = runcmd(['git', 'show', '-s', '--format=%B', ref], repo) | ||
| 491 | prefix = "%% " | ||
| 492 | |||
| 493 | note_re = re.compile(r'^%s(.*?)(?::\s*(.*))?$' % prefix) | ||
| 494 | notes = dict() | ||
| 495 | for line in note.splitlines(): | ||
| 496 | m = note_re.match(line) | ||
| 497 | if m: | ||
| 498 | notes[m.group(1)] = m.group(2) | ||
| 499 | |||
| 500 | return notes | ||
| 501 | |||
| 502 | @staticmethod | ||
| 503 | def commitIgnored(subject, dir=None, files=None, d=None): | ||
| 504 | if files: | ||
| 505 | runcmd(['git', 'add'] + files, dir) | ||
| 506 | cmd = ["git"] | ||
| 507 | GitApplyTree.gitCommandUserOptions(cmd, d=d) | ||
| 508 | cmd += ["commit", "-m", subject, "--no-verify"] | ||
| 509 | runcmd(cmd, dir) | ||
| 510 | GitApplyTree.addNote(dir, "HEAD", GitApplyTree.ignore_commit) | ||
| 511 | |||
| 512 | @staticmethod | ||
| 513 | def extractPatches(tree, startcommits, outdir, paths=None): | ||
| 514 | import tempfile | ||
| 515 | import shutil | ||
| 516 | tempdir = tempfile.mkdtemp(prefix='oepatch') | ||
| 517 | try: | ||
| 518 | for name, rev in startcommits.items(): | ||
| 519 | shellcmd = ["git", "format-patch", "--no-signature", "--no-numbered", rev, "-o", tempdir] | ||
| 520 | if paths: | ||
| 521 | shellcmd.append('--') | ||
| 522 | shellcmd.extend(paths) | ||
| 523 | out = runcmd(["sh", "-c", " ".join(shellcmd)], os.path.join(tree, name)) | ||
| 524 | if out: | ||
| 525 | for srcfile in out.split(): | ||
| 526 | # This loop, which is used to remove any line that | ||
| 527 | # starts with "%% original patch", is kept for backwards | ||
| 528 | # compatibility. If/when that compatibility is dropped, | ||
| 529 | # it can be replaced with code to just read the first | ||
| 530 | # line of the patch file to get the SHA-1, and the code | ||
| 531 | # below that writes the modified patch file can be | ||
| 532 | # replaced with a simple file move. | ||
| 533 | for encoding in ['utf-8', 'latin-1']: | ||
| 534 | patchlines = [] | ||
| 535 | try: | ||
| 536 | with open(srcfile, 'r', encoding=encoding, newline='') as f: | ||
| 537 | for line in f: | ||
| 538 | if line.startswith("%% " + GitApplyTree.original_patch): | ||
| 539 | continue | ||
| 540 | patchlines.append(line) | ||
| 541 | except UnicodeDecodeError: | ||
| 542 | continue | ||
| 543 | break | ||
| 544 | else: | ||
| 545 | raise PatchError('Unable to find a character encoding to decode %s' % srcfile) | ||
| 546 | |||
| 547 | sha1 = patchlines[0].split()[1] | ||
| 548 | notes = GitApplyTree.getNotes(os.path.join(tree, name), sha1) | ||
| 549 | if GitApplyTree.ignore_commit in notes: | ||
| 550 | continue | ||
| 551 | outfile = notes.get(GitApplyTree.original_patch, os.path.basename(srcfile)) | ||
| 552 | |||
| 553 | bb.utils.mkdirhier(os.path.join(outdir, name)) | ||
| 554 | with open(os.path.join(outdir, name, outfile), 'w') as of: | ||
| 555 | for line in patchlines: | ||
| 556 | of.write(line) | ||
| 557 | finally: | ||
| 558 | shutil.rmtree(tempdir) | ||
| 559 | |||
| 560 | def _need_dirty_check(self): | ||
| 561 | fetch = bb.fetch2.Fetch([], self.d) | ||
| 562 | check_dirtyness = False | ||
| 563 | for url in fetch.urls: | ||
| 564 | url_data = fetch.ud[url] | ||
| 565 | parm = url_data.parm | ||
| 566 | # a git url with subpath param will surely be dirty | ||
| 567 | # since the git tree from which we clone will be emptied | ||
| 568 | # from all files that are not in the subpath | ||
| 569 | if url_data.type == 'git' and parm.get('subpath'): | ||
| 570 | check_dirtyness = True | ||
| 571 | return check_dirtyness | ||
| 572 | |||
| 573 | def _commitpatch(self, patch, patchfilevar): | ||
| 574 | output = "" | ||
| 575 | # Add all files | ||
| 576 | shellcmd = ["git", "add", "-f", "-A", "."] | ||
| 577 | output += runcmd(["sh", "-c", " ".join(shellcmd)], self.dir) | ||
| 578 | # Exclude the patches directory | ||
| 579 | shellcmd = ["git", "reset", "HEAD", self.patchdir] | ||
| 580 | output += runcmd(["sh", "-c", " ".join(shellcmd)], self.dir) | ||
| 581 | # Commit the result | ||
| 582 | (tmpfile, shellcmd) = self.prepareCommit(patch['file'], self.commituser, self.commitemail) | ||
| 583 | try: | ||
| 584 | shellcmd.insert(0, patchfilevar) | ||
| 585 | output += runcmd(["sh", "-c", " ".join(shellcmd)], self.dir) | ||
| 586 | finally: | ||
| 587 | os.remove(tmpfile) | ||
| 588 | return output | ||
| 589 | |||
| 590 | def _applypatch(self, patch, force = False, reverse = False, run = True): | ||
| 591 | import shutil | ||
| 592 | |||
| 593 | def _applypatchhelper(shellcmd, patch, force = False, reverse = False, run = True): | ||
| 594 | if reverse: | ||
| 595 | shellcmd.append('-R') | ||
| 596 | |||
| 597 | shellcmd.append(patch['file']) | ||
| 598 | |||
| 599 | if not run: | ||
| 600 | return "sh" + "-c" + " ".join(shellcmd) | ||
| 601 | |||
| 602 | return runcmd(["sh", "-c", " ".join(shellcmd)], self.dir) | ||
| 603 | |||
| 604 | reporoot = (runcmd("git rev-parse --show-toplevel".split(), self.dir) or '').strip() | ||
| 605 | if not reporoot: | ||
| 606 | raise Exception("Cannot get repository root for directory %s" % self.dir) | ||
| 607 | |||
| 608 | patch_applied = True | ||
| 609 | try: | ||
| 610 | patchfilevar = 'PATCHFILE="%s"' % os.path.basename(patch['file']) | ||
| 611 | if self._need_dirty_check(): | ||
| 612 | # Check dirtyness of the tree | ||
| 613 | try: | ||
| 614 | output = runcmd(["git", "--work-tree=%s" % reporoot, "status", "--short"]) | ||
| 615 | except CmdError: | ||
| 616 | pass | ||
| 617 | else: | ||
| 618 | if output: | ||
| 619 | # The tree is dirty, no need to try to apply patches with git anymore | ||
| 620 | # since they fail, fallback directly to patch | ||
| 621 | output = PatchTree._applypatch(self, patch, force, reverse, run) | ||
| 622 | output += self._commitpatch(patch, patchfilevar) | ||
| 623 | return output | ||
| 624 | try: | ||
| 625 | shellcmd = [patchfilevar, "git", "--work-tree=%s" % reporoot] | ||
| 626 | self.gitCommandUserOptions(shellcmd, self.commituser, self.commitemail) | ||
| 627 | shellcmd += ["am", "-3", "--keep-cr", "--no-scissors", "-p%s" % patch['strippath']] | ||
| 628 | return _applypatchhelper(shellcmd, patch, force, reverse, run) | ||
| 629 | except CmdError: | ||
| 630 | # Need to abort the git am, or we'll still be within it at the end | ||
| 631 | try: | ||
| 632 | shellcmd = ["git", "--work-tree=%s" % reporoot, "am", "--abort"] | ||
| 633 | runcmd(["sh", "-c", " ".join(shellcmd)], self.dir) | ||
| 634 | except CmdError: | ||
| 635 | pass | ||
| 636 | # git am won't always clean up after itself, sadly, so... | ||
| 637 | shellcmd = ["git", "--work-tree=%s" % reporoot, "reset", "--hard", "HEAD"] | ||
| 638 | runcmd(["sh", "-c", " ".join(shellcmd)], self.dir) | ||
| 639 | # Also need to take care of any stray untracked files | ||
| 640 | shellcmd = ["git", "--work-tree=%s" % reporoot, "clean", "-f"] | ||
| 641 | runcmd(["sh", "-c", " ".join(shellcmd)], self.dir) | ||
| 642 | |||
| 643 | # Fall back to git apply | ||
| 644 | shellcmd = ["git", "--git-dir=%s" % reporoot, "apply", "-p%s" % patch['strippath']] | ||
| 645 | try: | ||
| 646 | output = _applypatchhelper(shellcmd, patch, force, reverse, run) | ||
| 647 | except CmdError: | ||
| 648 | # Fall back to patch | ||
| 649 | output = PatchTree._applypatch(self, patch, force, reverse, run) | ||
| 650 | output += self._commitpatch(patch, patchfilevar) | ||
| 651 | return output | ||
| 652 | except: | ||
| 653 | patch_applied = False | ||
| 654 | raise | ||
| 655 | finally: | ||
| 656 | if patch_applied: | ||
| 657 | GitApplyTree.addNote(self.dir, "HEAD", GitApplyTree.original_patch, os.path.basename(patch['file'])) | ||
| 658 | |||
| 659 | |||
| 660 | class QuiltTree(PatchSet): | ||
| 661 | def _runcmd(self, args, run = True): | ||
| 662 | quiltrc = self.d.getVar('QUILTRCFILE') | ||
| 663 | if not run: | ||
| 664 | return ["quilt"] + ["--quiltrc"] + [quiltrc] + args | ||
| 665 | runcmd(["quilt"] + ["--quiltrc"] + [quiltrc] + args, self.dir) | ||
| 666 | |||
| 667 | def _quiltpatchpath(self, file): | ||
| 668 | return os.path.join(self.dir, "patches", os.path.basename(file)) | ||
| 669 | |||
| 670 | |||
| 671 | def __init__(self, dir, d): | ||
| 672 | PatchSet.__init__(self, dir, d) | ||
| 673 | self.initialized = False | ||
| 674 | p = os.path.join(self.dir, 'patches') | ||
| 675 | if not os.path.exists(p): | ||
| 676 | os.makedirs(p) | ||
| 677 | |||
| 678 | def Clean(self): | ||
| 679 | try: | ||
| 680 | # make sure that patches/series file exists before quilt pop to keep quilt-0.67 happy | ||
| 681 | open(os.path.join(self.dir, "patches","series"), 'a').close() | ||
| 682 | self._runcmd(["pop", "-a", "-f"]) | ||
| 683 | oe.path.remove(os.path.join(self.dir, "patches","series")) | ||
| 684 | except Exception: | ||
| 685 | pass | ||
| 686 | self.initialized = True | ||
| 687 | |||
| 688 | def InitFromDir(self): | ||
| 689 | # read series -> self.patches | ||
| 690 | seriespath = os.path.join(self.dir, 'patches', 'series') | ||
| 691 | if not os.path.exists(self.dir): | ||
| 692 | raise NotFoundError(self.dir) | ||
| 693 | if os.path.exists(seriespath): | ||
| 694 | with open(seriespath, 'r') as f: | ||
| 695 | for line in f.readlines(): | ||
| 696 | patch = {} | ||
| 697 | parts = line.strip().split() | ||
| 698 | patch["quiltfile"] = self._quiltpatchpath(parts[0]) | ||
| 699 | patch["quiltfilemd5"] = bb.utils.md5_file(patch["quiltfile"]) | ||
| 700 | if len(parts) > 1: | ||
| 701 | patch["strippath"] = parts[1][2:] | ||
| 702 | self.patches.append(patch) | ||
| 703 | |||
| 704 | # determine which patches are applied -> self._current | ||
| 705 | try: | ||
| 706 | output = runcmd(["quilt", "applied"], self.dir) | ||
| 707 | except CmdError: | ||
| 708 | import sys | ||
| 709 | if sys.exc_value.output.strip() == "No patches applied": | ||
| 710 | return | ||
| 711 | else: | ||
| 712 | raise | ||
| 713 | output = [val for val in output.split('\n') if not val.startswith('#')] | ||
| 714 | for patch in self.patches: | ||
| 715 | if os.path.basename(patch["quiltfile"]) == output[-1]: | ||
| 716 | self._current = self.patches.index(patch) | ||
| 717 | self.initialized = True | ||
| 718 | |||
| 719 | def Import(self, patch, force = None): | ||
| 720 | if not self.initialized: | ||
| 721 | self.InitFromDir() | ||
| 722 | PatchSet.Import(self, patch, force) | ||
| 723 | oe.path.symlink(patch["file"], self._quiltpatchpath(patch["file"]), force=True) | ||
| 724 | with open(os.path.join(self.dir, "patches", "series"), "a") as f: | ||
| 725 | f.write(os.path.basename(patch["file"]) + " -p" + patch["strippath"] + "\n") | ||
| 726 | patch["quiltfile"] = self._quiltpatchpath(patch["file"]) | ||
| 727 | patch["quiltfilemd5"] = bb.utils.md5_file(patch["quiltfile"]) | ||
| 728 | |||
| 729 | # TODO: determine if the file being imported: | ||
| 730 | # 1) is already imported, and is the same | ||
| 731 | # 2) is already imported, but differs | ||
| 732 | |||
| 733 | self.patches.insert(self._current or 0, patch) | ||
| 734 | |||
| 735 | |||
| 736 | def Push(self, force = False, all = False, run = True): | ||
| 737 | # quilt push [-f] | ||
| 738 | |||
| 739 | args = ["push"] | ||
| 740 | if force: | ||
| 741 | args.append("-f") | ||
| 742 | if all: | ||
| 743 | args.append("-a") | ||
| 744 | if not run: | ||
| 745 | return self._runcmd(args, run) | ||
| 746 | |||
| 747 | self._runcmd(args) | ||
| 748 | |||
| 749 | if self._current is not None: | ||
| 750 | self._current = self._current + 1 | ||
| 751 | else: | ||
| 752 | self._current = 0 | ||
| 753 | |||
| 754 | def Pop(self, force = None, all = None): | ||
| 755 | # quilt pop [-f] | ||
| 756 | args = ["pop"] | ||
| 757 | if force: | ||
| 758 | args.append("-f") | ||
| 759 | if all: | ||
| 760 | args.append("-a") | ||
| 761 | |||
| 762 | self._runcmd(args) | ||
| 763 | |||
| 764 | if self._current == 0: | ||
| 765 | self._current = None | ||
| 766 | |||
| 767 | if self._current is not None: | ||
| 768 | self._current = self._current - 1 | ||
| 769 | |||
| 770 | def Refresh(self, **kwargs): | ||
| 771 | if kwargs.get("remote"): | ||
| 772 | patch = self.patches[kwargs["patch"]] | ||
| 773 | if not patch: | ||
| 774 | raise PatchError("No patch found at index %s in patchset." % kwargs["patch"]) | ||
| 775 | (type, host, path, user, pswd, parm) = bb.fetch.decodeurl(patch["remote"]) | ||
| 776 | if type == "file": | ||
| 777 | import shutil | ||
| 778 | if not patch.get("file") and patch.get("remote"): | ||
| 779 | patch["file"] = bb.fetch2.localpath(patch["remote"], self.d) | ||
| 780 | |||
| 781 | shutil.copyfile(patch["quiltfile"], patch["file"]) | ||
| 782 | else: | ||
| 783 | raise PatchError("Unable to do a remote refresh of %s, unsupported remote url scheme %s." % (os.path.basename(patch["quiltfile"]), type)) | ||
| 784 | else: | ||
| 785 | # quilt refresh | ||
| 786 | args = ["refresh"] | ||
| 787 | if kwargs.get("quiltfile"): | ||
| 788 | args.append(os.path.basename(kwargs["quiltfile"])) | ||
| 789 | elif kwargs.get("patch"): | ||
| 790 | args.append(os.path.basename(self.patches[kwargs["patch"]]["quiltfile"])) | ||
| 791 | self._runcmd(args) | ||
| 792 | |||
| 793 | class Resolver(object): | ||
| 794 | def __init__(self, patchset, terminal): | ||
| 795 | raise NotImplementedError() | ||
| 796 | |||
| 797 | def Resolve(self): | ||
| 798 | raise NotImplementedError() | ||
| 799 | |||
| 800 | def Revert(self): | ||
| 801 | raise NotImplementedError() | ||
| 802 | |||
| 803 | def Finalize(self): | ||
| 804 | raise NotImplementedError() | ||
| 805 | |||
| 806 | class NOOPResolver(Resolver): | ||
| 807 | def __init__(self, patchset, terminal): | ||
| 808 | self.patchset = patchset | ||
| 809 | self.terminal = terminal | ||
| 810 | |||
| 811 | def Resolve(self): | ||
| 812 | olddir = os.path.abspath(os.curdir) | ||
| 813 | os.chdir(self.patchset.dir) | ||
| 814 | try: | ||
| 815 | self.patchset.Push() | ||
| 816 | except Exception: | ||
| 817 | import sys | ||
| 818 | raise | ||
| 819 | finally: | ||
| 820 | os.chdir(olddir) | ||
| 821 | |||
| 822 | # Patch resolver which relies on the user doing all the work involved in the | ||
| 823 | # resolution, with the exception of refreshing the remote copy of the patch | ||
| 824 | # files (the urls). | ||
| 825 | class UserResolver(Resolver): | ||
| 826 | def __init__(self, patchset, terminal): | ||
| 827 | self.patchset = patchset | ||
| 828 | self.terminal = terminal | ||
| 829 | |||
| 830 | # Force a push in the patchset, then drop to a shell for the user to | ||
| 831 | # resolve any rejected hunks | ||
| 832 | def Resolve(self): | ||
| 833 | olddir = os.path.abspath(os.curdir) | ||
| 834 | os.chdir(self.patchset.dir) | ||
| 835 | try: | ||
| 836 | self.patchset.Push(False) | ||
| 837 | except CmdError as v: | ||
| 838 | # Patch application failed | ||
| 839 | patchcmd = self.patchset.Push(True, False, False) | ||
| 840 | |||
| 841 | t = self.patchset.d.getVar('T') | ||
| 842 | if not t: | ||
| 843 | bb.msg.fatal("Build", "T not set") | ||
| 844 | bb.utils.mkdirhier(t) | ||
| 845 | import random | ||
| 846 | rcfile = "%s/bashrc.%s.%s" % (t, str(os.getpid()), random.random()) | ||
| 847 | with open(rcfile, "w") as f: | ||
| 848 | f.write("echo '*** Manual patch resolution mode ***'\n") | ||
| 849 | f.write("echo 'Dropping to a shell, so patch rejects can be fixed manually.'\n") | ||
| 850 | f.write("echo 'Run \"quilt refresh\" when patch is corrected, press CTRL+D to exit.'\n") | ||
| 851 | f.write("echo ''\n") | ||
| 852 | f.write(" ".join(patchcmd) + "\n") | ||
| 853 | os.chmod(rcfile, 0o775) | ||
| 854 | |||
| 855 | self.terminal("bash --rcfile " + rcfile, 'Patch Rejects: Please fix patch rejects manually', self.patchset.d) | ||
| 856 | |||
| 857 | # Construct a new PatchSet after the user's changes, compare the | ||
| 858 | # sets, checking patches for modifications, and doing a remote | ||
| 859 | # refresh on each. | ||
| 860 | oldpatchset = self.patchset | ||
| 861 | self.patchset = oldpatchset.__class__(self.patchset.dir, self.patchset.d) | ||
| 862 | |||
| 863 | for patch in self.patchset.patches: | ||
| 864 | oldpatch = None | ||
| 865 | for opatch in oldpatchset.patches: | ||
| 866 | if opatch["quiltfile"] == patch["quiltfile"]: | ||
| 867 | oldpatch = opatch | ||
| 868 | |||
| 869 | if oldpatch: | ||
| 870 | patch["remote"] = oldpatch["remote"] | ||
| 871 | if patch["quiltfile"] == oldpatch["quiltfile"]: | ||
| 872 | if patch["quiltfilemd5"] != oldpatch["quiltfilemd5"]: | ||
| 873 | bb.note("Patch %s has changed, updating remote url %s" % (os.path.basename(patch["quiltfile"]), patch["remote"])) | ||
| 874 | # user change? remote refresh | ||
| 875 | self.patchset.Refresh(remote=True, patch=self.patchset.patches.index(patch)) | ||
| 876 | else: | ||
| 877 | # User did not fix the problem. Abort. | ||
| 878 | raise PatchError("Patch application failed, and user did not fix and refresh the patch.") | ||
| 879 | except Exception: | ||
| 880 | raise | ||
| 881 | finally: | ||
| 882 | os.chdir(olddir) | ||
| 883 | |||
| 884 | |||
| 885 | def patch_path(url, fetch, workdir, expand=True): | ||
| 886 | """Return the local path of a patch, or return nothing if this isn't a patch""" | ||
| 887 | |||
| 888 | local = fetch.localpath(url) | ||
| 889 | if os.path.isdir(local): | ||
| 890 | return | ||
| 891 | base, ext = os.path.splitext(os.path.basename(local)) | ||
| 892 | if ext in ('.gz', '.bz2', '.xz', '.Z'): | ||
| 893 | if expand: | ||
| 894 | local = os.path.join(workdir, base) | ||
| 895 | ext = os.path.splitext(base)[1] | ||
| 896 | |||
| 897 | urldata = fetch.ud[url] | ||
| 898 | if "apply" in urldata.parm: | ||
| 899 | apply = oe.types.boolean(urldata.parm["apply"]) | ||
| 900 | if not apply: | ||
| 901 | return | ||
| 902 | elif ext not in (".diff", ".patch"): | ||
| 903 | return | ||
| 904 | |||
| 905 | return local | ||
| 906 | |||
| 907 | def src_patches(d, all=False, expand=True): | ||
| 908 | workdir = d.getVar('WORKDIR') | ||
| 909 | fetch = bb.fetch2.Fetch([], d) | ||
| 910 | patches = [] | ||
| 911 | sources = [] | ||
| 912 | for url in fetch.urls: | ||
| 913 | local = patch_path(url, fetch, workdir, expand) | ||
| 914 | if not local: | ||
| 915 | if all: | ||
| 916 | local = fetch.localpath(url) | ||
| 917 | sources.append(local) | ||
| 918 | continue | ||
| 919 | |||
| 920 | urldata = fetch.ud[url] | ||
| 921 | parm = urldata.parm | ||
| 922 | patchname = parm.get('pname') or os.path.basename(local) | ||
| 923 | |||
| 924 | apply, reason = should_apply(parm, d) | ||
| 925 | if not apply: | ||
| 926 | if reason: | ||
| 927 | bb.note("Patch %s %s" % (patchname, reason)) | ||
| 928 | continue | ||
| 929 | |||
| 930 | patchparm = {'patchname': patchname} | ||
| 931 | if "striplevel" in parm: | ||
| 932 | striplevel = parm["striplevel"] | ||
| 933 | elif "pnum" in parm: | ||
| 934 | #bb.msg.warn(None, "Deprecated usage of 'pnum' url parameter in '%s', please use 'striplevel'" % url) | ||
| 935 | striplevel = parm["pnum"] | ||
| 936 | else: | ||
| 937 | striplevel = '1' | ||
| 938 | patchparm['striplevel'] = striplevel | ||
| 939 | |||
| 940 | patchdir = parm.get('patchdir') | ||
| 941 | if patchdir: | ||
| 942 | patchparm['patchdir'] = patchdir | ||
| 943 | |||
| 944 | localurl = bb.fetch.encodeurl(('file', '', local, '', '', patchparm)) | ||
| 945 | patches.append(localurl) | ||
| 946 | |||
| 947 | if all: | ||
| 948 | return sources | ||
| 949 | |||
| 950 | return patches | ||
| 951 | |||
| 952 | |||
| 953 | def should_apply(parm, d): | ||
| 954 | import bb.utils | ||
| 955 | if "mindate" in parm or "maxdate" in parm: | ||
| 956 | pn = d.getVar('PN') | ||
| 957 | srcdate = d.getVar('SRCDATE_%s' % pn) | ||
| 958 | if not srcdate: | ||
| 959 | srcdate = d.getVar('SRCDATE') | ||
| 960 | |||
| 961 | if srcdate == "now": | ||
| 962 | srcdate = d.getVar('DATE') | ||
| 963 | |||
| 964 | if "maxdate" in parm and parm["maxdate"] < srcdate: | ||
| 965 | return False, 'is outdated' | ||
| 966 | |||
| 967 | if "mindate" in parm and parm["mindate"] > srcdate: | ||
| 968 | return False, 'is predated' | ||
| 969 | |||
| 970 | |||
| 971 | if "minrev" in parm: | ||
| 972 | srcrev = d.getVar('SRCREV') | ||
| 973 | if srcrev and srcrev < parm["minrev"]: | ||
| 974 | return False, 'applies to later revisions' | ||
| 975 | |||
| 976 | if "maxrev" in parm: | ||
| 977 | srcrev = d.getVar('SRCREV') | ||
| 978 | if srcrev and srcrev > parm["maxrev"]: | ||
| 979 | return False, 'applies to earlier revisions' | ||
| 980 | |||
| 981 | if "rev" in parm: | ||
| 982 | srcrev = d.getVar('SRCREV') | ||
| 983 | if srcrev and parm["rev"] not in srcrev: | ||
| 984 | return False, "doesn't apply to revision" | ||
| 985 | |||
| 986 | if "notrev" in parm: | ||
| 987 | srcrev = d.getVar('SRCREV') | ||
| 988 | if srcrev and parm["notrev"] in srcrev: | ||
| 989 | return False, "doesn't apply to revision" | ||
| 990 | |||
| 991 | if "maxver" in parm: | ||
| 992 | pv = d.getVar('PV') | ||
| 993 | if bb.utils.vercmp_string_op(pv, parm["maxver"], ">"): | ||
| 994 | return False, "applies to earlier version" | ||
| 995 | |||
| 996 | if "minver" in parm: | ||
| 997 | pv = d.getVar('PV') | ||
| 998 | if bb.utils.vercmp_string_op(pv, parm["minver"], "<"): | ||
| 999 | return False, "applies to later version" | ||
| 1000 | |||
| 1001 | return True, None | ||
diff --git a/meta-xilinx-core/lib/oe/path.py b/meta-xilinx-core/lib/oe/path.py new file mode 100644 index 00000000..5d21cdcb --- /dev/null +++ b/meta-xilinx-core/lib/oe/path.py | |||
| @@ -0,0 +1,349 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: GPL-2.0-only | ||
| 5 | # | ||
| 6 | |||
| 7 | import errno | ||
| 8 | import glob | ||
| 9 | import shutil | ||
| 10 | import subprocess | ||
| 11 | import os.path | ||
| 12 | |||
| 13 | def join(*paths): | ||
| 14 | """Like os.path.join but doesn't treat absolute RHS specially""" | ||
| 15 | return os.path.normpath("/".join(paths)) | ||
| 16 | |||
| 17 | def relative(src, dest): | ||
| 18 | """ Return a relative path from src to dest. | ||
| 19 | |||
| 20 | >>> relative("/usr/bin", "/tmp/foo/bar") | ||
| 21 | ../../tmp/foo/bar | ||
| 22 | |||
| 23 | >>> relative("/usr/bin", "/usr/lib") | ||
| 24 | ../lib | ||
| 25 | |||
| 26 | >>> relative("/tmp", "/tmp/foo/bar") | ||
| 27 | foo/bar | ||
| 28 | """ | ||
| 29 | |||
| 30 | return os.path.relpath(dest, src) | ||
| 31 | |||
| 32 | def make_relative_symlink(path): | ||
| 33 | """ Convert an absolute symlink to a relative one """ | ||
| 34 | if not os.path.islink(path): | ||
| 35 | return | ||
| 36 | link = os.readlink(path) | ||
| 37 | if not os.path.isabs(link): | ||
| 38 | return | ||
| 39 | |||
| 40 | # find the common ancestor directory | ||
| 41 | ancestor = path | ||
| 42 | depth = 0 | ||
| 43 | while ancestor and not link.startswith(ancestor): | ||
| 44 | ancestor = ancestor.rpartition('/')[0] | ||
| 45 | depth += 1 | ||
| 46 | |||
| 47 | if not ancestor: | ||
| 48 | print("make_relative_symlink() Error: unable to find the common ancestor of %s and its target" % path) | ||
| 49 | return | ||
| 50 | |||
| 51 | base = link.partition(ancestor)[2].strip('/') | ||
| 52 | while depth > 1: | ||
| 53 | base = "../" + base | ||
| 54 | depth -= 1 | ||
| 55 | |||
| 56 | os.remove(path) | ||
| 57 | os.symlink(base, path) | ||
| 58 | |||
| 59 | def replace_absolute_symlinks(basedir, d): | ||
| 60 | """ | ||
| 61 | Walk basedir looking for absolute symlinks and replacing them with relative ones. | ||
| 62 | The absolute links are assumed to be relative to basedir | ||
| 63 | (compared to make_relative_symlink above which tries to compute common ancestors | ||
| 64 | using pattern matching instead) | ||
| 65 | """ | ||
| 66 | for walkroot, dirs, files in os.walk(basedir): | ||
| 67 | for file in files + dirs: | ||
| 68 | path = os.path.join(walkroot, file) | ||
| 69 | if not os.path.islink(path): | ||
| 70 | continue | ||
| 71 | link = os.readlink(path) | ||
| 72 | if not os.path.isabs(link): | ||
| 73 | continue | ||
| 74 | walkdir = os.path.dirname(path.rpartition(basedir)[2]) | ||
| 75 | base = os.path.relpath(link, walkdir) | ||
| 76 | bb.debug(2, "Replacing absolute path %s with relative path %s" % (link, base)) | ||
| 77 | os.remove(path) | ||
| 78 | os.symlink(base, path) | ||
| 79 | |||
| 80 | def format_display(path, metadata): | ||
| 81 | """ Prepare a path for display to the user. """ | ||
| 82 | rel = relative(metadata.getVar("TOPDIR"), path) | ||
| 83 | if len(rel) > len(path): | ||
| 84 | return path | ||
| 85 | else: | ||
| 86 | return rel | ||
| 87 | |||
| 88 | def copytree(src, dst): | ||
| 89 | # We could use something like shutil.copytree here but it turns out to | ||
| 90 | # to be slow. It takes twice as long copying to an empty directory. | ||
| 91 | # If dst already has contents performance can be 15 time slower | ||
| 92 | # This way we also preserve hardlinks between files in the tree. | ||
| 93 | |||
| 94 | bb.utils.mkdirhier(dst) | ||
| 95 | cmd = "tar --xattrs --xattrs-include='*' -cf - -S -C %s -p . | tar --xattrs --xattrs-include='*' -xf - -C %s" % (src, dst) | ||
| 96 | subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT) | ||
| 97 | |||
| 98 | def copyhardlinktree(src, dst): | ||
| 99 | """Make a tree of hard links when possible, otherwise copy.""" | ||
| 100 | bb.utils.mkdirhier(dst) | ||
| 101 | if os.path.isdir(src) and not len(os.listdir(src)): | ||
| 102 | return | ||
| 103 | |||
| 104 | canhard = False | ||
| 105 | testfile = None | ||
| 106 | for root, dirs, files in os.walk(src): | ||
| 107 | if len(files): | ||
| 108 | testfile = os.path.join(root, files[0]) | ||
| 109 | break | ||
| 110 | |||
| 111 | if testfile is not None: | ||
| 112 | try: | ||
| 113 | os.link(testfile, os.path.join(dst, 'testfile')) | ||
| 114 | os.unlink(os.path.join(dst, 'testfile')) | ||
| 115 | canhard = True | ||
| 116 | except Exception as e: | ||
| 117 | bb.debug(2, "Hardlink test failed with " + str(e)) | ||
| 118 | |||
| 119 | if (canhard): | ||
| 120 | # Need to copy directories only with tar first since cp will error if two | ||
| 121 | # writers try and create a directory at the same time | ||
| 122 | cmd = "cd %s; find . -type d -print | tar --xattrs --xattrs-include='*' -cf - -S -C %s -p --no-recursion --files-from - | tar --xattrs --xattrs-include='*' -xhf - -C %s" % (src, src, dst) | ||
| 123 | subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT) | ||
| 124 | source = '' | ||
| 125 | if os.path.isdir(src): | ||
| 126 | if len(glob.glob('%s/.??*' % src)) > 0: | ||
| 127 | source = './.??* ' | ||
| 128 | if len(glob.glob('%s/**' % src)) > 0: | ||
| 129 | source += './*' | ||
| 130 | s_dir = src | ||
| 131 | else: | ||
| 132 | source = src | ||
| 133 | s_dir = os.getcwd() | ||
| 134 | cmd = 'cp -afl --preserve=xattr %s %s' % (source, os.path.realpath(dst)) | ||
| 135 | subprocess.check_output(cmd, shell=True, cwd=s_dir, stderr=subprocess.STDOUT) | ||
| 136 | else: | ||
| 137 | copytree(src, dst) | ||
| 138 | |||
| 139 | def copyhardlink(src, dst): | ||
| 140 | """Make a hard link when possible, otherwise copy.""" | ||
| 141 | |||
| 142 | try: | ||
| 143 | os.link(src, dst) | ||
| 144 | except OSError: | ||
| 145 | shutil.copy(src, dst) | ||
| 146 | |||
| 147 | def remove(path, recurse=True): | ||
| 148 | """ | ||
| 149 | Equivalent to rm -f or rm -rf | ||
| 150 | NOTE: be careful about passing paths that may contain filenames with | ||
| 151 | wildcards in them (as opposed to passing an actual wildcarded path) - | ||
| 152 | since we use glob.glob() to expand the path. Filenames containing | ||
| 153 | square brackets are particularly problematic since the they may not | ||
| 154 | actually expand to match the original filename. | ||
| 155 | """ | ||
| 156 | for name in glob.glob(path): | ||
| 157 | try: | ||
| 158 | os.unlink(name) | ||
| 159 | except OSError as exc: | ||
| 160 | if recurse and exc.errno == errno.EISDIR: | ||
| 161 | shutil.rmtree(name) | ||
| 162 | elif exc.errno != errno.ENOENT: | ||
| 163 | raise | ||
| 164 | |||
| 165 | def symlink(source, destination, force=False): | ||
| 166 | """Create a symbolic link""" | ||
| 167 | try: | ||
| 168 | if force: | ||
| 169 | remove(destination) | ||
| 170 | os.symlink(source, destination) | ||
| 171 | except OSError as e: | ||
| 172 | if e.errno != errno.EEXIST or os.readlink(destination) != source: | ||
| 173 | raise | ||
| 174 | |||
| 175 | def relsymlink(target, name, force=False): | ||
| 176 | symlink(os.path.relpath(target, os.path.dirname(name)), name, force=force) | ||
| 177 | |||
| 178 | def find(dir, **walkoptions): | ||
| 179 | """ Given a directory, recurses into that directory, | ||
| 180 | returning all files as absolute paths. """ | ||
| 181 | |||
| 182 | for root, dirs, files in os.walk(dir, **walkoptions): | ||
| 183 | for file in files: | ||
| 184 | yield os.path.join(root, file) | ||
| 185 | |||
| 186 | |||
| 187 | ## realpath() related functions | ||
| 188 | def __is_path_below(file, root): | ||
| 189 | return (file + os.path.sep).startswith(root) | ||
| 190 | |||
| 191 | def __realpath_rel(start, rel_path, root, loop_cnt, assume_dir): | ||
| 192 | """Calculates real path of symlink 'start' + 'rel_path' below | ||
| 193 | 'root'; no part of 'start' below 'root' must contain symlinks. """ | ||
| 194 | have_dir = True | ||
| 195 | |||
| 196 | for d in rel_path.split(os.path.sep): | ||
| 197 | if not have_dir and not assume_dir: | ||
| 198 | raise OSError(errno.ENOENT, "no such directory %s" % start) | ||
| 199 | |||
| 200 | if d == os.path.pardir: # '..' | ||
| 201 | if len(start) >= len(root): | ||
| 202 | # do not follow '..' before root | ||
| 203 | start = os.path.dirname(start) | ||
| 204 | else: | ||
| 205 | # emit warning? | ||
| 206 | pass | ||
| 207 | else: | ||
| 208 | (start, have_dir) = __realpath(os.path.join(start, d), | ||
| 209 | root, loop_cnt, assume_dir) | ||
| 210 | |||
| 211 | assert(__is_path_below(start, root)) | ||
| 212 | |||
| 213 | return start | ||
| 214 | |||
| 215 | def __realpath(file, root, loop_cnt, assume_dir): | ||
| 216 | while os.path.islink(file) and len(file) >= len(root): | ||
| 217 | if loop_cnt == 0: | ||
| 218 | raise OSError(errno.ELOOP, file) | ||
| 219 | |||
| 220 | loop_cnt -= 1 | ||
| 221 | target = os.path.normpath(os.readlink(file)) | ||
| 222 | |||
| 223 | if not os.path.isabs(target): | ||
| 224 | tdir = os.path.dirname(file) | ||
| 225 | assert(__is_path_below(tdir, root)) | ||
| 226 | else: | ||
| 227 | tdir = root | ||
| 228 | |||
| 229 | file = __realpath_rel(tdir, target, root, loop_cnt, assume_dir) | ||
| 230 | |||
| 231 | try: | ||
| 232 | is_dir = os.path.isdir(file) | ||
| 233 | except: | ||
| 234 | is_dir = false | ||
| 235 | |||
| 236 | return (file, is_dir) | ||
| 237 | |||
| 238 | def realpath(file, root, use_physdir = True, loop_cnt = 100, assume_dir = False): | ||
| 239 | """ Returns the canonical path of 'file' with assuming a | ||
| 240 | toplevel 'root' directory. When 'use_physdir' is set, all | ||
| 241 | preceding path components of 'file' will be resolved first; | ||
| 242 | this flag should be set unless it is guaranteed that there is | ||
| 243 | no symlink in the path. When 'assume_dir' is not set, missing | ||
| 244 | path components will raise an ENOENT error""" | ||
| 245 | |||
| 246 | root = os.path.normpath(root) | ||
| 247 | file = os.path.normpath(file) | ||
| 248 | |||
| 249 | if not root.endswith(os.path.sep): | ||
| 250 | # letting root end with '/' makes some things easier | ||
| 251 | root = root + os.path.sep | ||
| 252 | |||
| 253 | if not __is_path_below(file, root): | ||
| 254 | raise OSError(errno.EINVAL, "file '%s' is not below root" % file) | ||
| 255 | |||
| 256 | try: | ||
| 257 | if use_physdir: | ||
| 258 | file = __realpath_rel(root, file[(len(root) - 1):], root, loop_cnt, assume_dir) | ||
| 259 | else: | ||
| 260 | file = __realpath(file, root, loop_cnt, assume_dir)[0] | ||
| 261 | except OSError as e: | ||
| 262 | if e.errno == errno.ELOOP: | ||
| 263 | # make ELOOP more readable; without catching it, there will | ||
| 264 | # be printed a backtrace with 100s of OSError exceptions | ||
| 265 | # else | ||
| 266 | raise OSError(errno.ELOOP, | ||
| 267 | "too much recursions while resolving '%s'; loop in '%s'" % | ||
| 268 | (file, e.strerror)) | ||
| 269 | |||
| 270 | raise | ||
| 271 | |||
| 272 | return file | ||
| 273 | |||
| 274 | def is_path_parent(possible_parent, *paths): | ||
| 275 | """ | ||
| 276 | Return True if a path is the parent of another, False otherwise. | ||
| 277 | Multiple paths to test can be specified in which case all | ||
| 278 | specified test paths must be under the parent in order to | ||
| 279 | return True. | ||
| 280 | """ | ||
| 281 | def abs_path_trailing(pth): | ||
| 282 | pth_abs = os.path.abspath(pth) | ||
| 283 | if not pth_abs.endswith(os.sep): | ||
| 284 | pth_abs += os.sep | ||
| 285 | return pth_abs | ||
| 286 | |||
| 287 | possible_parent_abs = abs_path_trailing(possible_parent) | ||
| 288 | if not paths: | ||
| 289 | return False | ||
| 290 | for path in paths: | ||
| 291 | path_abs = abs_path_trailing(path) | ||
| 292 | if not path_abs.startswith(possible_parent_abs): | ||
| 293 | return False | ||
| 294 | return True | ||
| 295 | |||
| 296 | def which_wild(pathname, path=None, mode=os.F_OK, *, reverse=False, candidates=False): | ||
| 297 | """Search a search path for pathname, supporting wildcards. | ||
| 298 | |||
| 299 | Return all paths in the specific search path matching the wildcard pattern | ||
| 300 | in pathname, returning only the first encountered for each file. If | ||
| 301 | candidates is True, information on all potential candidate paths are | ||
| 302 | included. | ||
| 303 | """ | ||
| 304 | paths = (path or os.environ.get('PATH', os.defpath)).split(':') | ||
| 305 | if reverse: | ||
| 306 | paths.reverse() | ||
| 307 | |||
| 308 | seen, files = set(), [] | ||
| 309 | for index, element in enumerate(paths): | ||
| 310 | if not os.path.isabs(element): | ||
| 311 | element = os.path.abspath(element) | ||
| 312 | |||
| 313 | candidate = os.path.join(element, pathname) | ||
| 314 | globbed = glob.glob(candidate) | ||
| 315 | if globbed: | ||
| 316 | for found_path in sorted(globbed): | ||
| 317 | if not os.access(found_path, mode): | ||
| 318 | continue | ||
| 319 | rel = os.path.relpath(found_path, element) | ||
| 320 | if rel not in seen: | ||
| 321 | seen.add(rel) | ||
| 322 | if candidates: | ||
| 323 | files.append((found_path, [os.path.join(p, rel) for p in paths[:index+1]])) | ||
| 324 | else: | ||
| 325 | files.append(found_path) | ||
| 326 | |||
| 327 | return files | ||
| 328 | |||
| 329 | def canonicalize(paths, sep=','): | ||
| 330 | """Given a string with paths (separated by commas by default), expand | ||
| 331 | each path using os.path.realpath() and return the resulting paths as a | ||
| 332 | string (separated using the same separator a the original string). | ||
| 333 | """ | ||
| 334 | # Ignore paths containing "$" as they are assumed to be unexpanded bitbake | ||
| 335 | # variables. Normally they would be ignored, e.g., when passing the paths | ||
| 336 | # through the shell they would expand to empty strings. However, when they | ||
| 337 | # are passed through os.path.realpath(), it will cause them to be prefixed | ||
| 338 | # with the absolute path to the current directory and thus not be empty | ||
| 339 | # anymore. | ||
| 340 | # | ||
| 341 | # Also maintain trailing slashes, as the paths may actually be used as | ||
| 342 | # prefixes in sting compares later on, where the slashes then are important. | ||
| 343 | canonical_paths = [] | ||
| 344 | for path in (paths or '').split(sep): | ||
| 345 | if '$' not in path: | ||
| 346 | trailing_slash = path.endswith('/') and '/' or '' | ||
| 347 | canonical_paths.append(os.path.realpath(path) + trailing_slash) | ||
| 348 | |||
| 349 | return sep.join(canonical_paths) | ||
diff --git a/meta-xilinx-core/lib/oe/prservice.py b/meta-xilinx-core/lib/oe/prservice.py new file mode 100644 index 00000000..c41242c8 --- /dev/null +++ b/meta-xilinx-core/lib/oe/prservice.py | |||
| @@ -0,0 +1,127 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: GPL-2.0-only | ||
| 5 | # | ||
| 6 | |||
| 7 | def prserv_make_conn(d, check = False): | ||
| 8 | import prserv.serv | ||
| 9 | host_params = list([_f for _f in (d.getVar("PRSERV_HOST") or '').split(':') if _f]) | ||
| 10 | try: | ||
| 11 | conn = None | ||
| 12 | conn = prserv.serv.connect(host_params[0], int(host_params[1])) | ||
| 13 | if check: | ||
| 14 | if not conn.ping(): | ||
| 15 | raise Exception('service not available') | ||
| 16 | except Exception as exc: | ||
| 17 | bb.fatal("Connecting to PR service %s:%s failed: %s" % (host_params[0], host_params[1], str(exc))) | ||
| 18 | |||
| 19 | return conn | ||
| 20 | |||
| 21 | def prserv_dump_db(d): | ||
| 22 | if not d.getVar('PRSERV_HOST'): | ||
| 23 | bb.error("Not using network based PR service") | ||
| 24 | return None | ||
| 25 | |||
| 26 | conn = prserv_make_conn(d) | ||
| 27 | if conn is None: | ||
| 28 | bb.error("Making connection failed to remote PR service") | ||
| 29 | return None | ||
| 30 | |||
| 31 | #dump db | ||
| 32 | opt_version = d.getVar('PRSERV_DUMPOPT_VERSION') | ||
| 33 | opt_pkgarch = d.getVar('PRSERV_DUMPOPT_PKGARCH') | ||
| 34 | opt_checksum = d.getVar('PRSERV_DUMPOPT_CHECKSUM') | ||
| 35 | opt_col = ("1" == d.getVar('PRSERV_DUMPOPT_COL')) | ||
| 36 | d = conn.export(opt_version, opt_pkgarch, opt_checksum, opt_col) | ||
| 37 | conn.close() | ||
| 38 | return d | ||
| 39 | |||
| 40 | def prserv_import_db(d, filter_version=None, filter_pkgarch=None, filter_checksum=None): | ||
| 41 | if not d.getVar('PRSERV_HOST'): | ||
| 42 | bb.error("Not using network based PR service") | ||
| 43 | return None | ||
| 44 | |||
| 45 | conn = prserv_make_conn(d) | ||
| 46 | if conn is None: | ||
| 47 | bb.error("Making connection failed to remote PR service") | ||
| 48 | return None | ||
| 49 | #get the entry values | ||
| 50 | imported = [] | ||
| 51 | prefix = "PRAUTO$" | ||
| 52 | for v in d.keys(): | ||
| 53 | if v.startswith(prefix): | ||
| 54 | (remain, sep, checksum) = v.rpartition('$') | ||
| 55 | (remain, sep, pkgarch) = remain.rpartition('$') | ||
| 56 | (remain, sep, version) = remain.rpartition('$') | ||
| 57 | if (remain + '$' != prefix) or \ | ||
| 58 | (filter_version and filter_version != version) or \ | ||
| 59 | (filter_pkgarch and filter_pkgarch != pkgarch) or \ | ||
| 60 | (filter_checksum and filter_checksum != checksum): | ||
| 61 | continue | ||
| 62 | try: | ||
| 63 | value = int(d.getVar(remain + '$' + version + '$' + pkgarch + '$' + checksum)) | ||
| 64 | except BaseException as exc: | ||
| 65 | bb.debug("Not valid value of %s:%s" % (v,str(exc))) | ||
| 66 | continue | ||
| 67 | ret = conn.importone(version,pkgarch,checksum,value) | ||
| 68 | if ret != value: | ||
| 69 | bb.error("importing(%s,%s,%s,%d) failed. DB may have larger value %d" % (version,pkgarch,checksum,value,ret)) | ||
| 70 | else: | ||
| 71 | imported.append((version,pkgarch,checksum,value)) | ||
| 72 | conn.close() | ||
| 73 | return imported | ||
| 74 | |||
| 75 | def prserv_export_tofile(d, metainfo, datainfo, lockdown, nomax=False): | ||
| 76 | import bb.utils | ||
| 77 | #initilize the output file | ||
| 78 | bb.utils.mkdirhier(d.getVar('PRSERV_DUMPDIR')) | ||
| 79 | df = d.getVar('PRSERV_DUMPFILE') | ||
| 80 | #write data | ||
| 81 | with open(df, "a") as f, bb.utils.fileslocked(["%s.lock" % df]) as locks: | ||
| 82 | if metainfo: | ||
| 83 | #dump column info | ||
| 84 | f.write("#PR_core_ver = \"%s\"\n\n" % metainfo['core_ver']); | ||
| 85 | f.write("#Table: %s\n" % metainfo['tbl_name']) | ||
| 86 | f.write("#Columns:\n") | ||
| 87 | f.write("#name \t type \t notn \t dflt \t pk\n") | ||
| 88 | f.write("#----------\t --------\t --------\t --------\t ----\n") | ||
| 89 | for i in range(len(metainfo['col_info'])): | ||
| 90 | f.write("#%10s\t %8s\t %8s\t %8s\t %4s\n" % | ||
| 91 | (metainfo['col_info'][i]['name'], | ||
| 92 | metainfo['col_info'][i]['type'], | ||
| 93 | metainfo['col_info'][i]['notnull'], | ||
| 94 | metainfo['col_info'][i]['dflt_value'], | ||
| 95 | metainfo['col_info'][i]['pk'])) | ||
| 96 | f.write("\n") | ||
| 97 | |||
| 98 | if lockdown: | ||
| 99 | f.write("PRSERV_LOCKDOWN = \"1\"\n\n") | ||
| 100 | |||
| 101 | if datainfo: | ||
| 102 | idx = {} | ||
| 103 | for i in range(len(datainfo)): | ||
| 104 | pkgarch = datainfo[i]['pkgarch'] | ||
| 105 | value = datainfo[i]['value'] | ||
| 106 | if pkgarch not in idx: | ||
| 107 | idx[pkgarch] = i | ||
| 108 | elif value > datainfo[idx[pkgarch]]['value']: | ||
| 109 | idx[pkgarch] = i | ||
| 110 | f.write("PRAUTO$%s$%s$%s = \"%s\"\n" % | ||
| 111 | (str(datainfo[i]['version']), pkgarch, str(datainfo[i]['checksum']), str(value))) | ||
| 112 | if not nomax: | ||
| 113 | for i in idx: | ||
| 114 | f.write("PRAUTO_%s_%s = \"%s\"\n" % (str(datainfo[idx[i]]['version']),str(datainfo[idx[i]]['pkgarch']),str(datainfo[idx[i]]['value']))) | ||
| 115 | |||
| 116 | def prserv_check_avail(d): | ||
| 117 | host_params = list([_f for _f in (d.getVar("PRSERV_HOST") or '').split(':') if _f]) | ||
| 118 | try: | ||
| 119 | if len(host_params) != 2: | ||
| 120 | raise TypeError | ||
| 121 | else: | ||
| 122 | int(host_params[1]) | ||
| 123 | except TypeError: | ||
| 124 | bb.fatal('Undefined/incorrect PRSERV_HOST value. Format: "host:port"') | ||
| 125 | else: | ||
| 126 | conn = prserv_make_conn(d, True) | ||
| 127 | conn.close() | ||
diff --git a/meta-xilinx-core/lib/oe/qa.py b/meta-xilinx-core/lib/oe/qa.py new file mode 100644 index 00000000..f8ae3c74 --- /dev/null +++ b/meta-xilinx-core/lib/oe/qa.py | |||
| @@ -0,0 +1,238 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: GPL-2.0-only | ||
| 5 | # | ||
| 6 | |||
| 7 | import os, struct, mmap | ||
| 8 | |||
| 9 | class NotELFFileError(Exception): | ||
| 10 | pass | ||
| 11 | |||
| 12 | class ELFFile: | ||
| 13 | EI_NIDENT = 16 | ||
| 14 | |||
| 15 | EI_CLASS = 4 | ||
| 16 | EI_DATA = 5 | ||
| 17 | EI_VERSION = 6 | ||
| 18 | EI_OSABI = 7 | ||
| 19 | EI_ABIVERSION = 8 | ||
| 20 | |||
| 21 | E_MACHINE = 0x12 | ||
| 22 | |||
| 23 | # possible values for EI_CLASS | ||
| 24 | ELFCLASSNONE = 0 | ||
| 25 | ELFCLASS32 = 1 | ||
| 26 | ELFCLASS64 = 2 | ||
| 27 | |||
| 28 | # possible value for EI_VERSION | ||
| 29 | EV_CURRENT = 1 | ||
| 30 | |||
| 31 | # possible values for EI_DATA | ||
| 32 | EI_DATA_NONE = 0 | ||
| 33 | EI_DATA_LSB = 1 | ||
| 34 | EI_DATA_MSB = 2 | ||
| 35 | |||
| 36 | PT_INTERP = 3 | ||
| 37 | |||
| 38 | def my_assert(self, expectation, result): | ||
| 39 | if not expectation == result: | ||
| 40 | #print "'%x','%x' %s" % (ord(expectation), ord(result), self.name) | ||
| 41 | raise NotELFFileError("%s is not an ELF" % self.name) | ||
| 42 | |||
| 43 | def __init__(self, name): | ||
| 44 | self.name = name | ||
| 45 | self.objdump_output = {} | ||
| 46 | self.data = None | ||
| 47 | |||
| 48 | # Context Manager functions to close the mmap explicitly | ||
| 49 | def __enter__(self): | ||
| 50 | return self | ||
| 51 | |||
| 52 | def __exit__(self, exc_type, exc_value, traceback): | ||
| 53 | self.close() | ||
| 54 | |||
| 55 | def close(self): | ||
| 56 | if self.data: | ||
| 57 | self.data.close() | ||
| 58 | |||
| 59 | def open(self): | ||
| 60 | with open(self.name, "rb") as f: | ||
| 61 | try: | ||
| 62 | self.data = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ) | ||
| 63 | except ValueError: | ||
| 64 | # This means the file is empty | ||
| 65 | raise NotELFFileError("%s is empty" % self.name) | ||
| 66 | |||
| 67 | # Check the file has the minimum number of ELF table entries | ||
| 68 | if len(self.data) < ELFFile.EI_NIDENT + 4: | ||
| 69 | raise NotELFFileError("%s is not an ELF" % self.name) | ||
| 70 | |||
| 71 | # ELF header | ||
| 72 | self.my_assert(self.data[0], 0x7f) | ||
| 73 | self.my_assert(self.data[1], ord('E')) | ||
| 74 | self.my_assert(self.data[2], ord('L')) | ||
| 75 | self.my_assert(self.data[3], ord('F')) | ||
| 76 | if self.data[ELFFile.EI_CLASS] == ELFFile.ELFCLASS32: | ||
| 77 | self.bits = 32 | ||
| 78 | elif self.data[ELFFile.EI_CLASS] == ELFFile.ELFCLASS64: | ||
| 79 | self.bits = 64 | ||
| 80 | else: | ||
| 81 | # Not 32-bit or 64.. lets assert | ||
| 82 | raise NotELFFileError("ELF but not 32 or 64 bit.") | ||
| 83 | self.my_assert(self.data[ELFFile.EI_VERSION], ELFFile.EV_CURRENT) | ||
| 84 | |||
| 85 | self.endian = self.data[ELFFile.EI_DATA] | ||
| 86 | if self.endian not in (ELFFile.EI_DATA_LSB, ELFFile.EI_DATA_MSB): | ||
| 87 | raise NotELFFileError("Unexpected EI_DATA %x" % self.endian) | ||
| 88 | |||
| 89 | def osAbi(self): | ||
| 90 | return self.data[ELFFile.EI_OSABI] | ||
| 91 | |||
| 92 | def abiVersion(self): | ||
| 93 | return self.data[ELFFile.EI_ABIVERSION] | ||
| 94 | |||
| 95 | def abiSize(self): | ||
| 96 | return self.bits | ||
| 97 | |||
| 98 | def isLittleEndian(self): | ||
| 99 | return self.endian == ELFFile.EI_DATA_LSB | ||
| 100 | |||
| 101 | def isBigEndian(self): | ||
| 102 | return self.endian == ELFFile.EI_DATA_MSB | ||
| 103 | |||
| 104 | def getStructEndian(self): | ||
| 105 | return {ELFFile.EI_DATA_LSB: "<", | ||
| 106 | ELFFile.EI_DATA_MSB: ">"}[self.endian] | ||
| 107 | |||
| 108 | def getShort(self, offset): | ||
| 109 | return struct.unpack_from(self.getStructEndian() + "H", self.data, offset)[0] | ||
| 110 | |||
| 111 | def getWord(self, offset): | ||
| 112 | return struct.unpack_from(self.getStructEndian() + "i", self.data, offset)[0] | ||
| 113 | |||
| 114 | def isDynamic(self): | ||
| 115 | """ | ||
| 116 | Return True if there is a .interp segment (therefore dynamically | ||
| 117 | linked), otherwise False (statically linked). | ||
| 118 | """ | ||
| 119 | offset = self.getWord(self.bits == 32 and 0x1C or 0x20) | ||
| 120 | size = self.getShort(self.bits == 32 and 0x2A or 0x36) | ||
| 121 | count = self.getShort(self.bits == 32 and 0x2C or 0x38) | ||
| 122 | |||
| 123 | for i in range(0, count): | ||
| 124 | p_type = self.getWord(offset + i * size) | ||
| 125 | if p_type == ELFFile.PT_INTERP: | ||
| 126 | return True | ||
| 127 | return False | ||
| 128 | |||
| 129 | def machine(self): | ||
| 130 | """ | ||
| 131 | We know the endian stored in self.endian and we | ||
| 132 | know the position | ||
| 133 | """ | ||
| 134 | return self.getShort(ELFFile.E_MACHINE) | ||
| 135 | |||
| 136 | def set_objdump(self, cmd, output): | ||
| 137 | self.objdump_output[cmd] = output | ||
| 138 | |||
| 139 | def run_objdump(self, cmd, d): | ||
| 140 | import bb.process | ||
| 141 | import sys | ||
| 142 | |||
| 143 | if cmd in self.objdump_output: | ||
| 144 | return self.objdump_output[cmd] | ||
| 145 | |||
| 146 | objdump = d.getVar('OBJDUMP') | ||
| 147 | |||
| 148 | env = os.environ.copy() | ||
| 149 | env["LC_ALL"] = "C" | ||
| 150 | env["PATH"] = d.getVar('PATH') | ||
| 151 | |||
| 152 | try: | ||
| 153 | bb.note("%s %s %s" % (objdump, cmd, self.name)) | ||
| 154 | self.objdump_output[cmd] = bb.process.run([objdump, cmd, self.name], env=env, shell=False)[0] | ||
| 155 | return self.objdump_output[cmd] | ||
| 156 | except Exception as e: | ||
| 157 | bb.note("%s %s %s failed: %s" % (objdump, cmd, self.name, e)) | ||
| 158 | return "" | ||
| 159 | |||
| 160 | def elf_machine_to_string(machine): | ||
| 161 | """ | ||
| 162 | Return the name of a given ELF e_machine field or the hex value as a string | ||
| 163 | if it isn't recognised. | ||
| 164 | """ | ||
| 165 | try: | ||
| 166 | return { | ||
| 167 | 0x00: "Unset", | ||
| 168 | 0x02: "SPARC", | ||
| 169 | 0x03: "x86", | ||
| 170 | 0x08: "MIPS", | ||
| 171 | 0x14: "PowerPC", | ||
| 172 | 0x28: "ARM", | ||
| 173 | 0x2A: "SuperH", | ||
| 174 | 0x32: "IA-64", | ||
| 175 | 0x3E: "x86-64", | ||
| 176 | 0xB7: "AArch64", | ||
| 177 | 0xF7: "BPF" | ||
| 178 | }[machine] | ||
| 179 | except: | ||
| 180 | return "Unknown (%s)" % repr(machine) | ||
| 181 | |||
| 182 | def write_error(type, error, d): | ||
| 183 | logfile = d.getVar('QA_LOGFILE') | ||
| 184 | if logfile: | ||
| 185 | p = d.getVar('P') | ||
| 186 | with open(logfile, "a+") as f: | ||
| 187 | f.write("%s: %s [%s]\n" % (p, error, type)) | ||
| 188 | |||
| 189 | def handle_error(error_class, error_msg, d): | ||
| 190 | if error_class in (d.getVar("ERROR_QA") or "").split(): | ||
| 191 | write_error(error_class, error_msg, d) | ||
| 192 | bb.error("QA Issue: %s [%s]" % (error_msg, error_class)) | ||
| 193 | d.setVar("QA_ERRORS_FOUND", "True") | ||
| 194 | return False | ||
| 195 | elif error_class in (d.getVar("WARN_QA") or "").split(): | ||
| 196 | write_error(error_class, error_msg, d) | ||
| 197 | bb.warn("QA Issue: %s [%s]" % (error_msg, error_class)) | ||
| 198 | else: | ||
| 199 | bb.note("QA Issue: %s [%s]" % (error_msg, error_class)) | ||
| 200 | return True | ||
| 201 | |||
| 202 | def add_message(messages, section, new_msg): | ||
| 203 | if section not in messages: | ||
| 204 | messages[section] = new_msg | ||
| 205 | else: | ||
| 206 | messages[section] = messages[section] + "\n" + new_msg | ||
| 207 | |||
| 208 | def exit_with_message_if_errors(message, d): | ||
| 209 | qa_fatal_errors = bb.utils.to_boolean(d.getVar("QA_ERRORS_FOUND"), False) | ||
| 210 | if qa_fatal_errors: | ||
| 211 | bb.fatal(message) | ||
| 212 | |||
| 213 | def exit_if_errors(d): | ||
| 214 | exit_with_message_if_errors("Fatal QA errors were found, failing task.", d) | ||
| 215 | |||
| 216 | def check_upstream_status(fullpath): | ||
| 217 | import re | ||
| 218 | kinda_status_re = re.compile(r"^.*upstream.*status.*$", re.IGNORECASE | re.MULTILINE) | ||
| 219 | strict_status_re = re.compile(r"^Upstream-Status: (Pending|Submitted|Denied|Inappropriate|Backport|Inactive-Upstream)( .+)?$", re.MULTILINE) | ||
| 220 | guidelines = "https://docs.yoctoproject.org/contributor-guide/recipe-style-guide.html#patch-upstream-status" | ||
| 221 | |||
| 222 | with open(fullpath, encoding='utf-8', errors='ignore') as f: | ||
| 223 | file_content = f.read() | ||
| 224 | match_kinda = kinda_status_re.search(file_content) | ||
| 225 | match_strict = strict_status_re.search(file_content) | ||
| 226 | |||
| 227 | if not match_strict: | ||
| 228 | if match_kinda: | ||
| 229 | return "Malformed Upstream-Status in patch\n%s\nPlease correct according to %s :\n%s" % (fullpath, guidelines, match_kinda.group(0)) | ||
| 230 | else: | ||
| 231 | return "Missing Upstream-Status in patch\n%s\nPlease add according to %s ." % (fullpath, guidelines) | ||
| 232 | |||
| 233 | if __name__ == "__main__": | ||
| 234 | import sys | ||
| 235 | |||
| 236 | with ELFFile(sys.argv[1]) as elf: | ||
| 237 | elf.open() | ||
| 238 | print(elf.isDynamic()) | ||
diff --git a/meta-xilinx-core/lib/oe/recipeutils.py b/meta-xilinx-core/lib/oe/recipeutils.py new file mode 100644 index 00000000..de1fbdd3 --- /dev/null +++ b/meta-xilinx-core/lib/oe/recipeutils.py | |||
| @@ -0,0 +1,1185 @@ | |||
| 1 | # Utility functions for reading and modifying recipes | ||
| 2 | # | ||
| 3 | # Some code borrowed from the OE layer index | ||
| 4 | # | ||
| 5 | # Copyright (C) 2013-2017 Intel Corporation | ||
| 6 | # | ||
| 7 | # SPDX-License-Identifier: GPL-2.0-only | ||
| 8 | # | ||
| 9 | |||
| 10 | import sys | ||
| 11 | import os | ||
| 12 | import os.path | ||
| 13 | import tempfile | ||
| 14 | import textwrap | ||
| 15 | import difflib | ||
| 16 | from . import utils | ||
| 17 | import shutil | ||
| 18 | import re | ||
| 19 | import fnmatch | ||
| 20 | import glob | ||
| 21 | import bb.tinfoil | ||
| 22 | |||
| 23 | from collections import OrderedDict, defaultdict | ||
| 24 | from bb.utils import vercmp_string | ||
| 25 | |||
| 26 | # Help us to find places to insert values | ||
| 27 | recipe_progression = ['SUMMARY', 'DESCRIPTION', 'HOMEPAGE', 'BUGTRACKER', 'SECTION', 'LICENSE', 'LICENSE_FLAGS', 'LIC_FILES_CHKSUM', 'PROVIDES', 'DEPENDS', 'PR', 'PV', 'SRCREV', 'SRC_URI', 'S', 'do_fetch()', 'do_unpack()', 'do_patch()', 'EXTRA_OECONF', 'EXTRA_OECMAKE', 'EXTRA_OESCONS', 'do_configure()', 'EXTRA_OEMAKE', 'do_compile()', 'do_install()', 'do_populate_sysroot()', 'INITSCRIPT', 'USERADD', 'GROUPADD', 'PACKAGES', 'FILES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RPROVIDES', 'RREPLACES', 'RCONFLICTS', 'ALLOW_EMPTY', 'populate_packages()', 'do_package()', 'do_deploy()', 'BBCLASSEXTEND'] | ||
| 28 | # Variables that sometimes are a bit long but shouldn't be wrapped | ||
| 29 | nowrap_vars = ['SUMMARY', 'HOMEPAGE', 'BUGTRACKER', r'SRC_URI\[(.+\.)?md5sum\]', r'SRC_URI\[(.+\.)?sha[0-9]+sum\]'] | ||
| 30 | list_vars = ['SRC_URI', 'LIC_FILES_CHKSUM'] | ||
| 31 | meta_vars = ['SUMMARY', 'DESCRIPTION', 'HOMEPAGE', 'BUGTRACKER', 'SECTION'] | ||
| 32 | |||
| 33 | |||
| 34 | def simplify_history(history, d): | ||
| 35 | """ | ||
| 36 | Eliminate any irrelevant events from a variable history | ||
| 37 | """ | ||
| 38 | ret_history = [] | ||
| 39 | has_set = False | ||
| 40 | # Go backwards through the history and remove any immediate operations | ||
| 41 | # before the most recent set | ||
| 42 | for event in reversed(history): | ||
| 43 | if 'flag' in event or not 'file' in event: | ||
| 44 | continue | ||
| 45 | if event['op'] == 'set': | ||
| 46 | if has_set: | ||
| 47 | continue | ||
| 48 | has_set = True | ||
| 49 | elif event['op'] in ('append', 'prepend', 'postdot', 'predot'): | ||
| 50 | # Reminder: "append" and "prepend" mean += and =+ respectively, NOT :append / :prepend | ||
| 51 | if has_set: | ||
| 52 | continue | ||
| 53 | ret_history.insert(0, event) | ||
| 54 | return ret_history | ||
| 55 | |||
| 56 | |||
| 57 | def get_var_files(fn, varlist, d): | ||
| 58 | """Find the file in which each of a list of variables is set. | ||
| 59 | Note: requires variable history to be enabled when parsing. | ||
| 60 | """ | ||
| 61 | varfiles = {} | ||
| 62 | for v in varlist: | ||
| 63 | files = [] | ||
| 64 | if '[' in v: | ||
| 65 | varsplit = v.split('[') | ||
| 66 | varflag = varsplit[1].split(']')[0] | ||
| 67 | history = d.varhistory.variable(varsplit[0]) | ||
| 68 | for event in history: | ||
| 69 | if 'file' in event and event.get('flag', '') == varflag: | ||
| 70 | files.append(event['file']) | ||
| 71 | else: | ||
| 72 | history = d.varhistory.variable(v) | ||
| 73 | for event in history: | ||
| 74 | if 'file' in event and not 'flag' in event: | ||
| 75 | files.append(event['file']) | ||
| 76 | if files: | ||
| 77 | actualfile = files[-1] | ||
| 78 | else: | ||
| 79 | actualfile = None | ||
| 80 | varfiles[v] = actualfile | ||
| 81 | |||
| 82 | return varfiles | ||
| 83 | |||
| 84 | |||
| 85 | def split_var_value(value, assignment=True): | ||
| 86 | """ | ||
| 87 | Split a space-separated variable's value into a list of items, | ||
| 88 | taking into account that some of the items might be made up of | ||
| 89 | expressions containing spaces that should not be split. | ||
| 90 | Parameters: | ||
| 91 | value: | ||
| 92 | The string value to split | ||
| 93 | assignment: | ||
| 94 | True to assume that the value represents an assignment | ||
| 95 | statement, False otherwise. If True, and an assignment | ||
| 96 | statement is passed in the first item in | ||
| 97 | the returned list will be the part of the assignment | ||
| 98 | statement up to and including the opening quote character, | ||
| 99 | and the last item will be the closing quote. | ||
| 100 | """ | ||
| 101 | inexpr = 0 | ||
| 102 | lastchar = None | ||
| 103 | out = [] | ||
| 104 | buf = '' | ||
| 105 | for char in value: | ||
| 106 | if char == '{': | ||
| 107 | if lastchar == '$': | ||
| 108 | inexpr += 1 | ||
| 109 | elif char == '}': | ||
| 110 | inexpr -= 1 | ||
| 111 | elif assignment and char in '"\'' and inexpr == 0: | ||
| 112 | if buf: | ||
| 113 | out.append(buf) | ||
| 114 | out.append(char) | ||
| 115 | char = '' | ||
| 116 | buf = '' | ||
| 117 | elif char.isspace() and inexpr == 0: | ||
| 118 | char = '' | ||
| 119 | if buf: | ||
| 120 | out.append(buf) | ||
| 121 | buf = '' | ||
| 122 | buf += char | ||
| 123 | lastchar = char | ||
| 124 | if buf: | ||
| 125 | out.append(buf) | ||
| 126 | |||
| 127 | # Join together assignment statement and opening quote | ||
| 128 | outlist = out | ||
| 129 | if assignment: | ||
| 130 | assigfound = False | ||
| 131 | for idx, item in enumerate(out): | ||
| 132 | if '=' in item: | ||
| 133 | assigfound = True | ||
| 134 | if assigfound: | ||
| 135 | if '"' in item or "'" in item: | ||
| 136 | outlist = [' '.join(out[:idx+1])] | ||
| 137 | outlist.extend(out[idx+1:]) | ||
| 138 | break | ||
| 139 | return outlist | ||
| 140 | |||
| 141 | |||
| 142 | def patch_recipe_lines(fromlines, values, trailing_newline=True): | ||
| 143 | """Update or insert variable values into lines from a recipe. | ||
| 144 | Note that some manual inspection/intervention may be required | ||
| 145 | since this cannot handle all situations. | ||
| 146 | """ | ||
| 147 | |||
| 148 | import bb.utils | ||
| 149 | |||
| 150 | if trailing_newline: | ||
| 151 | newline = '\n' | ||
| 152 | else: | ||
| 153 | newline = '' | ||
| 154 | |||
| 155 | nowrap_vars_res = [] | ||
| 156 | for item in nowrap_vars: | ||
| 157 | nowrap_vars_res.append(re.compile('^%s$' % item)) | ||
| 158 | |||
| 159 | recipe_progression_res = [] | ||
| 160 | recipe_progression_restrs = [] | ||
| 161 | for item in recipe_progression: | ||
| 162 | if item.endswith('()'): | ||
| 163 | key = item[:-2] | ||
| 164 | else: | ||
| 165 | key = item | ||
| 166 | restr = r'%s(_[a-zA-Z0-9-_$(){}]+|\[[^\]]*\])?' % key | ||
| 167 | if item.endswith('()'): | ||
| 168 | recipe_progression_restrs.append(restr + '()') | ||
| 169 | else: | ||
| 170 | recipe_progression_restrs.append(restr) | ||
| 171 | recipe_progression_res.append(re.compile('^%s$' % restr)) | ||
| 172 | |||
| 173 | def get_recipe_pos(variable): | ||
| 174 | for i, p in enumerate(recipe_progression_res): | ||
| 175 | if p.match(variable): | ||
| 176 | return i | ||
| 177 | return -1 | ||
| 178 | |||
| 179 | remainingnames = {} | ||
| 180 | for k in values.keys(): | ||
| 181 | remainingnames[k] = get_recipe_pos(k) | ||
| 182 | remainingnames = OrderedDict(sorted(remainingnames.items(), key=lambda x: x[1])) | ||
| 183 | |||
| 184 | modifying = False | ||
| 185 | |||
| 186 | def outputvalue(name, lines, rewindcomments=False): | ||
| 187 | if values[name] is None: | ||
| 188 | return | ||
| 189 | if isinstance(values[name], tuple): | ||
| 190 | op, value = values[name] | ||
| 191 | if op == '+=' and value.strip() == '': | ||
| 192 | return | ||
| 193 | else: | ||
| 194 | value = values[name] | ||
| 195 | op = '=' | ||
| 196 | rawtext = '%s %s "%s"%s' % (name, op, value, newline) | ||
| 197 | addlines = [] | ||
| 198 | nowrap = False | ||
| 199 | for nowrap_re in nowrap_vars_res: | ||
| 200 | if nowrap_re.match(name): | ||
| 201 | nowrap = True | ||
| 202 | break | ||
| 203 | if nowrap: | ||
| 204 | addlines.append(rawtext) | ||
| 205 | elif name in list_vars: | ||
| 206 | splitvalue = split_var_value(value, assignment=False) | ||
| 207 | if len(splitvalue) > 1: | ||
| 208 | linesplit = ' \\\n' + (' ' * (len(name) + 4)) | ||
| 209 | addlines.append('%s %s "%s%s"%s' % (name, op, linesplit.join(splitvalue), linesplit, newline)) | ||
| 210 | else: | ||
| 211 | addlines.append(rawtext) | ||
| 212 | else: | ||
| 213 | wrapped = textwrap.wrap(rawtext) | ||
| 214 | for wrapline in wrapped[:-1]: | ||
| 215 | addlines.append('%s \\%s' % (wrapline, newline)) | ||
| 216 | addlines.append('%s%s' % (wrapped[-1], newline)) | ||
| 217 | |||
| 218 | # Split on newlines - this isn't strictly necessary if you are only | ||
| 219 | # going to write the output to disk, but if you want to compare it | ||
| 220 | # (as patch_recipe_file() will do if patch=True) then it's important. | ||
| 221 | addlines = [line for l in addlines for line in l.splitlines(True)] | ||
| 222 | if rewindcomments: | ||
| 223 | # Ensure we insert the lines before any leading comments | ||
| 224 | # (that we'd want to ensure remain leading the next value) | ||
| 225 | for i, ln in reversed(list(enumerate(lines))): | ||
| 226 | if not ln.startswith('#'): | ||
| 227 | lines[i+1:i+1] = addlines | ||
| 228 | break | ||
| 229 | else: | ||
| 230 | lines.extend(addlines) | ||
| 231 | else: | ||
| 232 | lines.extend(addlines) | ||
| 233 | |||
| 234 | existingnames = [] | ||
| 235 | def patch_recipe_varfunc(varname, origvalue, op, newlines): | ||
| 236 | if modifying: | ||
| 237 | # Insert anything that should come before this variable | ||
| 238 | pos = get_recipe_pos(varname) | ||
| 239 | for k in list(remainingnames): | ||
| 240 | if remainingnames[k] > -1 and pos >= remainingnames[k] and not k in existingnames: | ||
| 241 | outputvalue(k, newlines, rewindcomments=True) | ||
| 242 | del remainingnames[k] | ||
| 243 | # Now change this variable, if it needs to be changed | ||
| 244 | if varname in existingnames and op in ['+=', '=', '=+']: | ||
| 245 | if varname in remainingnames: | ||
| 246 | outputvalue(varname, newlines) | ||
| 247 | del remainingnames[varname] | ||
| 248 | return None, None, 0, True | ||
| 249 | else: | ||
| 250 | if varname in values: | ||
| 251 | existingnames.append(varname) | ||
| 252 | return origvalue, None, 0, True | ||
| 253 | |||
| 254 | # First run - establish which values we want to set are already in the file | ||
| 255 | varlist = [re.escape(item) for item in values.keys()] | ||
| 256 | bb.utils.edit_metadata(fromlines, varlist, patch_recipe_varfunc) | ||
| 257 | # Second run - actually set everything | ||
| 258 | modifying = True | ||
| 259 | varlist.extend(recipe_progression_restrs) | ||
| 260 | changed, tolines = bb.utils.edit_metadata(fromlines, varlist, patch_recipe_varfunc, match_overrides=True) | ||
| 261 | |||
| 262 | if remainingnames: | ||
| 263 | if tolines and tolines[-1].strip() != '': | ||
| 264 | tolines.append('\n') | ||
| 265 | for k in remainingnames.keys(): | ||
| 266 | outputvalue(k, tolines) | ||
| 267 | |||
| 268 | return changed, tolines | ||
| 269 | |||
| 270 | |||
| 271 | def patch_recipe_file(fn, values, patch=False, relpath='', redirect_output=None): | ||
| 272 | """Update or insert variable values into a recipe file (assuming you | ||
| 273 | have already identified the exact file you want to update.) | ||
| 274 | Note that some manual inspection/intervention may be required | ||
| 275 | since this cannot handle all situations. | ||
| 276 | """ | ||
| 277 | |||
| 278 | with open(fn, 'r') as f: | ||
| 279 | fromlines = f.readlines() | ||
| 280 | |||
| 281 | _, tolines = patch_recipe_lines(fromlines, values) | ||
| 282 | |||
| 283 | if redirect_output: | ||
| 284 | with open(os.path.join(redirect_output, os.path.basename(fn)), 'w') as f: | ||
| 285 | f.writelines(tolines) | ||
| 286 | return None | ||
| 287 | elif patch: | ||
| 288 | relfn = os.path.relpath(fn, relpath) | ||
| 289 | diff = difflib.unified_diff(fromlines, tolines, 'a/%s' % relfn, 'b/%s' % relfn) | ||
| 290 | return diff | ||
| 291 | else: | ||
| 292 | with open(fn, 'w') as f: | ||
| 293 | f.writelines(tolines) | ||
| 294 | return None | ||
| 295 | |||
| 296 | |||
| 297 | def localise_file_vars(fn, varfiles, varlist): | ||
| 298 | """Given a list of variables and variable history (fetched with get_var_files()) | ||
| 299 | find where each variable should be set/changed. This handles for example where a | ||
| 300 | recipe includes an inc file where variables might be changed - in most cases | ||
| 301 | we want to update the inc file when changing the variable value rather than adding | ||
| 302 | it to the recipe itself. | ||
| 303 | """ | ||
| 304 | fndir = os.path.dirname(fn) + os.sep | ||
| 305 | |||
| 306 | first_meta_file = None | ||
| 307 | for v in meta_vars: | ||
| 308 | f = varfiles.get(v, None) | ||
| 309 | if f: | ||
| 310 | actualdir = os.path.dirname(f) + os.sep | ||
| 311 | if actualdir.startswith(fndir): | ||
| 312 | first_meta_file = f | ||
| 313 | break | ||
| 314 | |||
| 315 | filevars = defaultdict(list) | ||
| 316 | for v in varlist: | ||
| 317 | f = varfiles[v] | ||
| 318 | # Only return files that are in the same directory as the recipe or in some directory below there | ||
| 319 | # (this excludes bbclass files and common inc files that wouldn't be appropriate to set the variable | ||
| 320 | # in if we were going to set a value specific to this recipe) | ||
| 321 | if f: | ||
| 322 | actualfile = f | ||
| 323 | else: | ||
| 324 | # Variable isn't in a file, if it's one of the "meta" vars, use the first file with a meta var in it | ||
| 325 | if first_meta_file: | ||
| 326 | actualfile = first_meta_file | ||
| 327 | else: | ||
| 328 | actualfile = fn | ||
| 329 | |||
| 330 | actualdir = os.path.dirname(actualfile) + os.sep | ||
| 331 | if not actualdir.startswith(fndir): | ||
| 332 | actualfile = fn | ||
| 333 | filevars[actualfile].append(v) | ||
| 334 | |||
| 335 | return filevars | ||
| 336 | |||
| 337 | def patch_recipe(d, fn, varvalues, patch=False, relpath='', redirect_output=None): | ||
| 338 | """Modify a list of variable values in the specified recipe. Handles inc files if | ||
| 339 | used by the recipe. | ||
| 340 | """ | ||
| 341 | overrides = d.getVar('OVERRIDES').split(':') | ||
| 342 | def override_applicable(hevent): | ||
| 343 | op = hevent['op'] | ||
| 344 | if '[' in op: | ||
| 345 | opoverrides = op.split('[')[1].split(']')[0].split(':') | ||
| 346 | for opoverride in opoverrides: | ||
| 347 | if not opoverride in overrides: | ||
| 348 | return False | ||
| 349 | return True | ||
| 350 | |||
| 351 | varlist = varvalues.keys() | ||
| 352 | fn = os.path.abspath(fn) | ||
| 353 | varfiles = get_var_files(fn, varlist, d) | ||
| 354 | locs = localise_file_vars(fn, varfiles, varlist) | ||
| 355 | patches = [] | ||
| 356 | for f,v in locs.items(): | ||
| 357 | vals = {k: varvalues[k] for k in v} | ||
| 358 | f = os.path.abspath(f) | ||
| 359 | if f == fn: | ||
| 360 | extravals = {} | ||
| 361 | for var, value in vals.items(): | ||
| 362 | if var in list_vars: | ||
| 363 | history = simplify_history(d.varhistory.variable(var), d) | ||
| 364 | recipe_set = False | ||
| 365 | for event in history: | ||
| 366 | if os.path.abspath(event['file']) == fn: | ||
| 367 | if event['op'] == 'set': | ||
| 368 | recipe_set = True | ||
| 369 | if not recipe_set: | ||
| 370 | for event in history: | ||
| 371 | if event['op'].startswith(':remove'): | ||
| 372 | continue | ||
| 373 | if not override_applicable(event): | ||
| 374 | continue | ||
| 375 | newvalue = value.replace(event['detail'], '') | ||
| 376 | if newvalue == value and os.path.abspath(event['file']) == fn and event['op'].startswith(':'): | ||
| 377 | op = event['op'].replace('[', ':').replace(']', '') | ||
| 378 | extravals[var + op] = None | ||
| 379 | value = newvalue | ||
| 380 | vals[var] = ('+=', value) | ||
| 381 | vals.update(extravals) | ||
| 382 | patchdata = patch_recipe_file(f, vals, patch, relpath, redirect_output) | ||
| 383 | if patch: | ||
| 384 | patches.append(patchdata) | ||
| 385 | |||
| 386 | if patch: | ||
| 387 | return patches | ||
| 388 | else: | ||
| 389 | return None | ||
| 390 | |||
| 391 | |||
| 392 | |||
| 393 | def copy_recipe_files(d, tgt_dir, whole_dir=False, download=True, all_variants=False): | ||
| 394 | """Copy (local) recipe files, including both files included via include/require, | ||
| 395 | and files referred to in the SRC_URI variable.""" | ||
| 396 | import bb.fetch2 | ||
| 397 | import oe.path | ||
| 398 | |||
| 399 | # FIXME need a warning if the unexpanded SRC_URI value contains variable references | ||
| 400 | |||
| 401 | uri_values = [] | ||
| 402 | localpaths = [] | ||
| 403 | def fetch_urls(rdata): | ||
| 404 | # Collect the local paths from SRC_URI | ||
| 405 | srcuri = rdata.getVar('SRC_URI') or "" | ||
| 406 | if srcuri not in uri_values: | ||
| 407 | fetch = bb.fetch2.Fetch(srcuri.split(), rdata) | ||
| 408 | if download: | ||
| 409 | fetch.download() | ||
| 410 | for pth in fetch.localpaths(): | ||
| 411 | if pth not in localpaths: | ||
| 412 | localpaths.append(os.path.abspath(pth)) | ||
| 413 | uri_values.append(srcuri) | ||
| 414 | |||
| 415 | fetch_urls(d) | ||
| 416 | if all_variants: | ||
| 417 | # Get files for other variants e.g. in the case of a SRC_URI:append | ||
| 418 | localdata = bb.data.createCopy(d) | ||
| 419 | variants = (localdata.getVar('BBCLASSEXTEND') or '').split() | ||
| 420 | if variants: | ||
| 421 | # Ensure we handle class-target if we're dealing with one of the variants | ||
| 422 | variants.append('target') | ||
| 423 | for variant in variants: | ||
| 424 | localdata.setVar('CLASSOVERRIDE', 'class-%s' % variant) | ||
| 425 | fetch_urls(localdata) | ||
| 426 | |||
| 427 | # Copy local files to target directory and gather any remote files | ||
| 428 | bb_dir = os.path.abspath(os.path.dirname(d.getVar('FILE'))) + os.sep | ||
| 429 | remotes = [] | ||
| 430 | copied = [] | ||
| 431 | # Need to do this in two steps since we want to check against the absolute path | ||
| 432 | includes = [os.path.abspath(path) for path in d.getVar('BBINCLUDED').split() if os.path.exists(path)] | ||
| 433 | # We also check this below, but we don't want any items in this list being considered remotes | ||
| 434 | includes = [path for path in includes if path.startswith(bb_dir)] | ||
| 435 | for path in localpaths + includes: | ||
| 436 | # Only import files that are under the meta directory | ||
| 437 | if path.startswith(bb_dir): | ||
| 438 | if not whole_dir: | ||
| 439 | relpath = os.path.relpath(path, bb_dir) | ||
| 440 | subdir = os.path.join(tgt_dir, os.path.dirname(relpath)) | ||
| 441 | if not os.path.exists(subdir): | ||
| 442 | os.makedirs(subdir) | ||
| 443 | shutil.copy2(path, os.path.join(tgt_dir, relpath)) | ||
| 444 | copied.append(relpath) | ||
| 445 | else: | ||
| 446 | remotes.append(path) | ||
| 447 | # Simply copy whole meta dir, if requested | ||
| 448 | if whole_dir: | ||
| 449 | shutil.copytree(bb_dir, tgt_dir) | ||
| 450 | |||
| 451 | return copied, remotes | ||
| 452 | |||
| 453 | |||
| 454 | def get_recipe_local_files(d, patches=False, archives=False): | ||
| 455 | """Get a list of local files in SRC_URI within a recipe.""" | ||
| 456 | import oe.patch | ||
| 457 | uris = (d.getVar('SRC_URI') or "").split() | ||
| 458 | fetch = bb.fetch2.Fetch(uris, d) | ||
| 459 | # FIXME this list should be factored out somewhere else (such as the | ||
| 460 | # fetcher) though note that this only encompasses actual container formats | ||
| 461 | # i.e. that can contain multiple files as opposed to those that only | ||
| 462 | # contain a compressed stream (i.e. .tar.gz as opposed to just .gz) | ||
| 463 | archive_exts = ['.tar', '.tgz', '.tar.gz', '.tar.Z', '.tbz', '.tbz2', '.tar.bz2', '.txz', '.tar.xz', '.tar.lz', '.zip', '.jar', '.rpm', '.srpm', '.deb', '.ipk', '.tar.7z', '.7z'] | ||
| 464 | ret = {} | ||
| 465 | for uri in uris: | ||
| 466 | if fetch.ud[uri].type == 'file': | ||
| 467 | if (not patches and | ||
| 468 | oe.patch.patch_path(uri, fetch, '', expand=False)): | ||
| 469 | continue | ||
| 470 | # Skip files that are referenced by absolute path | ||
| 471 | fname = fetch.ud[uri].basepath | ||
| 472 | if os.path.isabs(fname): | ||
| 473 | continue | ||
| 474 | # Handle subdir= | ||
| 475 | subdir = fetch.ud[uri].parm.get('subdir', '') | ||
| 476 | if subdir: | ||
| 477 | if os.path.isabs(subdir): | ||
| 478 | continue | ||
| 479 | fname = os.path.join(subdir, fname) | ||
| 480 | localpath = fetch.localpath(uri) | ||
| 481 | if not archives: | ||
| 482 | # Ignore archives that will be unpacked | ||
| 483 | if localpath.endswith(tuple(archive_exts)): | ||
| 484 | unpack = fetch.ud[uri].parm.get('unpack', True) | ||
| 485 | if unpack: | ||
| 486 | continue | ||
| 487 | if os.path.isdir(localpath): | ||
| 488 | for root, dirs, files in os.walk(localpath): | ||
| 489 | for fname in files: | ||
| 490 | fileabspath = os.path.join(root,fname) | ||
| 491 | srcdir = os.path.dirname(localpath) | ||
| 492 | ret[os.path.relpath(fileabspath,srcdir)] = fileabspath | ||
| 493 | else: | ||
| 494 | ret[fname] = localpath | ||
| 495 | return ret | ||
| 496 | |||
| 497 | |||
| 498 | def get_recipe_patches(d): | ||
| 499 | """Get a list of the patches included in SRC_URI within a recipe.""" | ||
| 500 | import oe.patch | ||
| 501 | patches = oe.patch.src_patches(d, expand=False) | ||
| 502 | patchfiles = [] | ||
| 503 | for patch in patches: | ||
| 504 | _, _, local, _, _, parm = bb.fetch.decodeurl(patch) | ||
| 505 | patchfiles.append(local) | ||
| 506 | return patchfiles | ||
| 507 | |||
| 508 | |||
| 509 | def get_recipe_patched_files(d): | ||
| 510 | """ | ||
| 511 | Get the list of patches for a recipe along with the files each patch modifies. | ||
| 512 | Params: | ||
| 513 | d: the datastore for the recipe | ||
| 514 | Returns: | ||
| 515 | a dict mapping patch file path to a list of tuples of changed files and | ||
| 516 | change mode ('A' for add, 'D' for delete or 'M' for modify) | ||
| 517 | """ | ||
| 518 | import oe.patch | ||
| 519 | patches = oe.patch.src_patches(d, expand=False) | ||
| 520 | patchedfiles = {} | ||
| 521 | for patch in patches: | ||
| 522 | _, _, patchfile, _, _, parm = bb.fetch.decodeurl(patch) | ||
| 523 | striplevel = int(parm['striplevel']) | ||
| 524 | patchedfiles[patchfile] = oe.patch.PatchSet.getPatchedFiles(patchfile, striplevel, os.path.join(d.getVar('S'), parm.get('patchdir', ''))) | ||
| 525 | return patchedfiles | ||
| 526 | |||
| 527 | |||
| 528 | def validate_pn(pn): | ||
| 529 | """Perform validation on a recipe name (PN) for a new recipe.""" | ||
| 530 | reserved_names = ['forcevariable', 'append', 'prepend', 'remove'] | ||
| 531 | if not re.match('^[0-9a-z-.+]+$', pn): | ||
| 532 | return 'Recipe name "%s" is invalid: only characters 0-9, a-z, -, + and . are allowed' % pn | ||
| 533 | elif pn in reserved_names: | ||
| 534 | return 'Recipe name "%s" is invalid: is a reserved keyword' % pn | ||
| 535 | elif pn.startswith('pn-'): | ||
| 536 | return 'Recipe name "%s" is invalid: names starting with "pn-" are reserved' % pn | ||
| 537 | elif pn.endswith(('.bb', '.bbappend', '.bbclass', '.inc', '.conf')): | ||
| 538 | return 'Recipe name "%s" is invalid: should be just a name, not a file name' % pn | ||
| 539 | return '' | ||
| 540 | |||
| 541 | |||
| 542 | def get_bbfile_path(d, destdir, extrapathhint=None): | ||
| 543 | """ | ||
| 544 | Determine the correct path for a recipe within a layer | ||
| 545 | Parameters: | ||
| 546 | d: Recipe-specific datastore | ||
| 547 | destdir: destination directory. Can be the path to the base of the layer or a | ||
| 548 | partial path somewhere within the layer. | ||
| 549 | extrapathhint: a path relative to the base of the layer to try | ||
| 550 | """ | ||
| 551 | import bb.cookerdata | ||
| 552 | |||
| 553 | destdir = os.path.abspath(destdir) | ||
| 554 | destlayerdir = find_layerdir(destdir) | ||
| 555 | |||
| 556 | # Parse the specified layer's layer.conf file directly, in case the layer isn't in bblayers.conf | ||
| 557 | confdata = d.createCopy() | ||
| 558 | confdata.setVar('BBFILES', '') | ||
| 559 | confdata.setVar('LAYERDIR', destlayerdir) | ||
| 560 | destlayerconf = os.path.join(destlayerdir, "conf", "layer.conf") | ||
| 561 | confdata = bb.cookerdata.parse_config_file(destlayerconf, confdata) | ||
| 562 | pn = d.getVar('PN') | ||
| 563 | |||
| 564 | # Parse BBFILES_DYNAMIC and append to BBFILES | ||
| 565 | bbfiles_dynamic = (confdata.getVar('BBFILES_DYNAMIC') or "").split() | ||
| 566 | collections = (confdata.getVar('BBFILE_COLLECTIONS') or "").split() | ||
| 567 | invalid = [] | ||
| 568 | for entry in bbfiles_dynamic: | ||
| 569 | parts = entry.split(":", 1) | ||
| 570 | if len(parts) != 2: | ||
| 571 | invalid.append(entry) | ||
| 572 | continue | ||
| 573 | l, f = parts | ||
| 574 | invert = l[0] == "!" | ||
| 575 | if invert: | ||
| 576 | l = l[1:] | ||
| 577 | if (l in collections and not invert) or (l not in collections and invert): | ||
| 578 | confdata.appendVar("BBFILES", " " + f) | ||
| 579 | if invalid: | ||
| 580 | return None | ||
| 581 | bbfilespecs = (confdata.getVar('BBFILES') or '').split() | ||
| 582 | if destdir == destlayerdir: | ||
| 583 | for bbfilespec in bbfilespecs: | ||
| 584 | if not bbfilespec.endswith('.bbappend'): | ||
| 585 | for match in glob.glob(bbfilespec): | ||
| 586 | splitext = os.path.splitext(os.path.basename(match)) | ||
| 587 | if splitext[1] == '.bb': | ||
| 588 | mpn = splitext[0].split('_')[0] | ||
| 589 | if mpn == pn: | ||
| 590 | return os.path.dirname(match) | ||
| 591 | |||
| 592 | # Try to make up a path that matches BBFILES | ||
| 593 | # this is a little crude, but better than nothing | ||
| 594 | bpn = d.getVar('BPN') | ||
| 595 | recipefn = os.path.basename(d.getVar('FILE')) | ||
| 596 | pathoptions = [destdir] | ||
| 597 | if extrapathhint: | ||
| 598 | pathoptions.append(os.path.join(destdir, extrapathhint)) | ||
| 599 | if destdir == destlayerdir: | ||
| 600 | pathoptions.append(os.path.join(destdir, 'recipes-%s' % bpn, bpn)) | ||
| 601 | pathoptions.append(os.path.join(destdir, 'recipes', bpn)) | ||
| 602 | pathoptions.append(os.path.join(destdir, bpn)) | ||
| 603 | elif not destdir.endswith(('/' + pn, '/' + bpn)): | ||
| 604 | pathoptions.append(os.path.join(destdir, bpn)) | ||
| 605 | closepath = '' | ||
| 606 | for pathoption in pathoptions: | ||
| 607 | bbfilepath = os.path.join(pathoption, 'test.bb') | ||
| 608 | for bbfilespec in bbfilespecs: | ||
| 609 | if fnmatch.fnmatchcase(bbfilepath, bbfilespec): | ||
| 610 | return pathoption | ||
| 611 | return None | ||
| 612 | |||
| 613 | def get_bbappend_path(d, destlayerdir, wildcardver=False): | ||
| 614 | """Determine how a bbappend for a recipe should be named and located within another layer""" | ||
| 615 | |||
| 616 | import bb.cookerdata | ||
| 617 | |||
| 618 | destlayerdir = os.path.abspath(destlayerdir) | ||
| 619 | recipefile = d.getVar('FILE') | ||
| 620 | recipefn = os.path.splitext(os.path.basename(recipefile))[0] | ||
| 621 | if wildcardver and '_' in recipefn: | ||
| 622 | recipefn = recipefn.split('_', 1)[0] + '_%' | ||
| 623 | appendfn = recipefn + '.bbappend' | ||
| 624 | |||
| 625 | # Parse the specified layer's layer.conf file directly, in case the layer isn't in bblayers.conf | ||
| 626 | confdata = d.createCopy() | ||
| 627 | confdata.setVar('BBFILES', '') | ||
| 628 | confdata.setVar('LAYERDIR', destlayerdir) | ||
| 629 | destlayerconf = os.path.join(destlayerdir, "conf", "layer.conf") | ||
| 630 | confdata = bb.cookerdata.parse_config_file(destlayerconf, confdata) | ||
| 631 | |||
| 632 | origlayerdir = find_layerdir(recipefile) | ||
| 633 | if not origlayerdir: | ||
| 634 | return (None, False) | ||
| 635 | # Now join this to the path where the bbappend is going and check if it is covered by BBFILES | ||
| 636 | appendpath = os.path.join(destlayerdir, os.path.relpath(os.path.dirname(recipefile), origlayerdir), appendfn) | ||
| 637 | closepath = '' | ||
| 638 | pathok = True | ||
| 639 | for bbfilespec in confdata.getVar('BBFILES').split(): | ||
| 640 | if fnmatch.fnmatchcase(appendpath, bbfilespec): | ||
| 641 | # Our append path works, we're done | ||
| 642 | break | ||
| 643 | elif bbfilespec.startswith(destlayerdir) and fnmatch.fnmatchcase('test.bbappend', os.path.basename(bbfilespec)): | ||
| 644 | # Try to find the longest matching path | ||
| 645 | if len(bbfilespec) > len(closepath): | ||
| 646 | closepath = bbfilespec | ||
| 647 | else: | ||
| 648 | # Unfortunately the bbappend layer and the original recipe's layer don't have the same structure | ||
| 649 | if closepath: | ||
| 650 | # bbappend layer's layer.conf at least has a spec that picks up .bbappend files | ||
| 651 | # Now we just need to substitute out any wildcards | ||
| 652 | appendsubdir = os.path.relpath(os.path.dirname(closepath), destlayerdir) | ||
| 653 | if 'recipes-*' in appendsubdir: | ||
| 654 | # Try to copy this part from the original recipe path | ||
| 655 | res = re.search('/recipes-[^/]+/', recipefile) | ||
| 656 | if res: | ||
| 657 | appendsubdir = appendsubdir.replace('/recipes-*/', res.group(0)) | ||
| 658 | # This is crude, but we have to do something | ||
| 659 | appendsubdir = appendsubdir.replace('*', recipefn.split('_')[0]) | ||
| 660 | appendsubdir = appendsubdir.replace('?', 'a') | ||
| 661 | appendpath = os.path.join(destlayerdir, appendsubdir, appendfn) | ||
| 662 | else: | ||
| 663 | pathok = False | ||
| 664 | return (appendpath, pathok) | ||
| 665 | |||
| 666 | |||
| 667 | def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False, machine=None, extralines=None, removevalues=None, redirect_output=None, params=None, update_original_recipe=False): | ||
| 668 | """ | ||
| 669 | Writes a bbappend file for a recipe | ||
| 670 | Parameters: | ||
| 671 | rd: data dictionary for the recipe | ||
| 672 | destlayerdir: base directory of the layer to place the bbappend in | ||
| 673 | (subdirectory path from there will be determined automatically) | ||
| 674 | srcfiles: dict of source files to add to SRC_URI, where the key | ||
| 675 | is the full path to the file to be added, and the value is a | ||
| 676 | dict with following optional keys: | ||
| 677 | path: the original filename as it would appear in SRC_URI | ||
| 678 | or None if it isn't already present. | ||
| 679 | patchdir: the patchdir parameter | ||
| 680 | newname: the name to give to the new added file. None to use | ||
| 681 | the default value: basename(path) | ||
| 682 | You may pass None for this parameter if you simply want to specify | ||
| 683 | your own content via the extralines parameter. | ||
| 684 | install: dict mapping entries in srcfiles to a tuple of two elements: | ||
| 685 | install path (*without* ${D} prefix) and permission value (as a | ||
| 686 | string, e.g. '0644'). | ||
| 687 | wildcardver: True to use a % wildcard in the bbappend filename, or | ||
| 688 | False to make the bbappend specific to the recipe version. | ||
| 689 | machine: | ||
| 690 | If specified, make the changes in the bbappend specific to this | ||
| 691 | machine. This will also cause PACKAGE_ARCH = "${MACHINE_ARCH}" | ||
| 692 | to be added to the bbappend. | ||
| 693 | extralines: | ||
| 694 | Extra lines to add to the bbappend. This may be a dict of name | ||
| 695 | value pairs, or simply a list of the lines. | ||
| 696 | removevalues: | ||
| 697 | Variable values to remove - a dict of names/values. | ||
| 698 | redirect_output: | ||
| 699 | If specified, redirects writing the output file to the | ||
| 700 | specified directory (for dry-run purposes) | ||
| 701 | params: | ||
| 702 | Parameters to use when adding entries to SRC_URI. If specified, | ||
| 703 | should be a list of dicts with the same length as srcfiles. | ||
| 704 | update_original_recipe: | ||
| 705 | Force to update the original recipe instead of creating/updating | ||
| 706 | a bbapend. destlayerdir must contain the original recipe | ||
| 707 | """ | ||
| 708 | |||
| 709 | if not removevalues: | ||
| 710 | removevalues = {} | ||
| 711 | |||
| 712 | recipefile = rd.getVar('FILE') | ||
| 713 | if update_original_recipe: | ||
| 714 | if destlayerdir not in recipefile: | ||
| 715 | bb.error("destlayerdir %s doesn't contain the original recipe (%s), cannot update it" % (destlayerdir, recipefile)) | ||
| 716 | return (None, None) | ||
| 717 | |||
| 718 | appendpath = recipefile | ||
| 719 | else: | ||
| 720 | # Determine how the bbappend should be named | ||
| 721 | appendpath, pathok = get_bbappend_path(rd, destlayerdir, wildcardver) | ||
| 722 | if not appendpath: | ||
| 723 | bb.error('Unable to determine layer directory containing %s' % recipefile) | ||
| 724 | return (None, None) | ||
| 725 | if not pathok: | ||
| 726 | bb.warn('Unable to determine correct subdirectory path for bbappend file - check that what %s adds to BBFILES also matches .bbappend files. Using %s for now, but until you fix this the bbappend will not be applied.' % (os.path.join(destlayerdir, 'conf', 'layer.conf'), os.path.dirname(appendpath))) | ||
| 727 | |||
| 728 | appenddir = os.path.dirname(appendpath) | ||
| 729 | if not redirect_output: | ||
| 730 | bb.utils.mkdirhier(appenddir) | ||
| 731 | |||
| 732 | # FIXME check if the bbappend doesn't get overridden by a higher priority layer? | ||
| 733 | |||
| 734 | layerdirs = [os.path.abspath(layerdir) for layerdir in rd.getVar('BBLAYERS').split()] | ||
| 735 | if not os.path.abspath(destlayerdir) in layerdirs: | ||
| 736 | bb.warn('Specified layer is not currently enabled in bblayers.conf, you will need to add it before this bbappend will be active') | ||
| 737 | |||
| 738 | bbappendlines = [] | ||
| 739 | if extralines: | ||
| 740 | if isinstance(extralines, dict): | ||
| 741 | for name, value in extralines.items(): | ||
| 742 | bbappendlines.append((name, '=', value)) | ||
| 743 | else: | ||
| 744 | # Do our best to split it | ||
| 745 | for line in extralines: | ||
| 746 | if line[-1] == '\n': | ||
| 747 | line = line[:-1] | ||
| 748 | splitline = line.split(None, 2) | ||
| 749 | if len(splitline) == 3: | ||
| 750 | bbappendlines.append(tuple(splitline)) | ||
| 751 | else: | ||
| 752 | raise Exception('Invalid extralines value passed') | ||
| 753 | |||
| 754 | def popline(varname): | ||
| 755 | for i in range(0, len(bbappendlines)): | ||
| 756 | if bbappendlines[i][0] == varname: | ||
| 757 | line = bbappendlines.pop(i) | ||
| 758 | return line | ||
| 759 | return None | ||
| 760 | |||
| 761 | def appendline(varname, op, value): | ||
| 762 | for i in range(0, len(bbappendlines)): | ||
| 763 | item = bbappendlines[i] | ||
| 764 | if item[0] == varname: | ||
| 765 | bbappendlines[i] = (item[0], item[1], item[2] + ' ' + value) | ||
| 766 | break | ||
| 767 | else: | ||
| 768 | bbappendlines.append((varname, op, value)) | ||
| 769 | |||
| 770 | destsubdir = rd.getVar('PN') | ||
| 771 | if not update_original_recipe and srcfiles: | ||
| 772 | bbappendlines.append(('FILESEXTRAPATHS:prepend', ':=', '${THISDIR}/${PN}:')) | ||
| 773 | |||
| 774 | appendoverride = '' | ||
| 775 | if machine: | ||
| 776 | bbappendlines.append(('PACKAGE_ARCH', '=', '${MACHINE_ARCH}')) | ||
| 777 | appendoverride = ':%s' % machine | ||
| 778 | copyfiles = {} | ||
| 779 | if srcfiles: | ||
| 780 | instfunclines = [] | ||
| 781 | for i, (newfile, param) in enumerate(srcfiles.items()): | ||
| 782 | srcurientry = None | ||
| 783 | if not 'path' in param or not param['path']: | ||
| 784 | if 'newname' in param and param['newname']: | ||
| 785 | srcfile = param['newname'] | ||
| 786 | else: | ||
| 787 | srcfile = os.path.basename(newfile) | ||
| 788 | srcurientry = 'file://%s' % srcfile | ||
| 789 | oldentry = None | ||
| 790 | for uri in rd.getVar('SRC_URI').split(): | ||
| 791 | if srcurientry in uri: | ||
| 792 | oldentry = uri | ||
| 793 | if params and params[i]: | ||
| 794 | srcurientry = '%s;%s' % (srcurientry, ';'.join('%s=%s' % (k,v) for k,v in params[i].items())) | ||
| 795 | # Double-check it's not there already | ||
| 796 | # FIXME do we care if the entry is added by another bbappend that might go away? | ||
| 797 | if not srcurientry in rd.getVar('SRC_URI').split(): | ||
| 798 | if machine: | ||
| 799 | if oldentry: | ||
| 800 | appendline('SRC_URI:remove%s' % appendoverride, '=', ' ' + oldentry) | ||
| 801 | appendline('SRC_URI:append%s' % appendoverride, '=', ' ' + srcurientry) | ||
| 802 | else: | ||
| 803 | if oldentry: | ||
| 804 | if update_original_recipe: | ||
| 805 | removevalues['SRC_URI'] = oldentry | ||
| 806 | else: | ||
| 807 | appendline('SRC_URI:remove', '=', oldentry) | ||
| 808 | appendline('SRC_URI', '+=', srcurientry) | ||
| 809 | param['path'] = srcfile | ||
| 810 | else: | ||
| 811 | srcfile = param['path'] | ||
| 812 | copyfiles[newfile] = param | ||
| 813 | if install: | ||
| 814 | institem = install.pop(newfile, None) | ||
| 815 | if institem: | ||
| 816 | (destpath, perms) = institem | ||
| 817 | instdestpath = replace_dir_vars(destpath, rd) | ||
| 818 | instdirline = 'install -d ${D}%s' % os.path.dirname(instdestpath) | ||
| 819 | if not instdirline in instfunclines: | ||
| 820 | instfunclines.append(instdirline) | ||
| 821 | instfunclines.append('install -m %s ${WORKDIR}/%s ${D}%s' % (perms, os.path.basename(srcfile), instdestpath)) | ||
| 822 | if instfunclines: | ||
| 823 | bbappendlines.append(('do_install:append%s()' % appendoverride, '', instfunclines)) | ||
| 824 | |||
| 825 | if redirect_output: | ||
| 826 | bb.note('Writing append file %s (dry-run)' % appendpath) | ||
| 827 | outfile = os.path.join(redirect_output, os.path.basename(appendpath)) | ||
| 828 | # Only take a copy if the file isn't already there (this function may be called | ||
| 829 | # multiple times per operation when we're handling overrides) | ||
| 830 | if os.path.exists(appendpath) and not os.path.exists(outfile): | ||
| 831 | shutil.copy2(appendpath, outfile) | ||
| 832 | elif update_original_recipe: | ||
| 833 | outfile = recipefile | ||
| 834 | else: | ||
| 835 | bb.note('Writing append file %s' % appendpath) | ||
| 836 | outfile = appendpath | ||
| 837 | |||
| 838 | if os.path.exists(outfile): | ||
| 839 | # Work around lack of nonlocal in python 2 | ||
| 840 | extvars = {'destsubdir': destsubdir} | ||
| 841 | |||
| 842 | def appendfile_varfunc(varname, origvalue, op, newlines): | ||
| 843 | if varname == 'FILESEXTRAPATHS:prepend': | ||
| 844 | if origvalue.startswith('${THISDIR}/'): | ||
| 845 | popline('FILESEXTRAPATHS:prepend') | ||
| 846 | extvars['destsubdir'] = rd.expand(origvalue.split('${THISDIR}/', 1)[1].rstrip(':')) | ||
| 847 | elif varname == 'PACKAGE_ARCH': | ||
| 848 | if machine: | ||
| 849 | popline('PACKAGE_ARCH') | ||
| 850 | return (machine, None, 4, False) | ||
| 851 | elif varname.startswith('do_install:append'): | ||
| 852 | func = popline(varname) | ||
| 853 | if func: | ||
| 854 | instfunclines = [line.strip() for line in origvalue.strip('\n').splitlines()] | ||
| 855 | for line in func[2]: | ||
| 856 | if not line in instfunclines: | ||
| 857 | instfunclines.append(line) | ||
| 858 | return (instfunclines, None, 4, False) | ||
| 859 | else: | ||
| 860 | splitval = split_var_value(origvalue, assignment=False) | ||
| 861 | changed = False | ||
| 862 | removevar = varname | ||
| 863 | if varname in ['SRC_URI', 'SRC_URI:append%s' % appendoverride]: | ||
| 864 | removevar = 'SRC_URI' | ||
| 865 | line = popline(varname) | ||
| 866 | if line: | ||
| 867 | if line[2] not in splitval: | ||
| 868 | splitval.append(line[2]) | ||
| 869 | changed = True | ||
| 870 | else: | ||
| 871 | line = popline(varname) | ||
| 872 | if line: | ||
| 873 | splitval = [line[2]] | ||
| 874 | changed = True | ||
| 875 | |||
| 876 | if removevar in removevalues: | ||
| 877 | remove = removevalues[removevar] | ||
| 878 | if isinstance(remove, str): | ||
| 879 | if remove in splitval: | ||
| 880 | splitval.remove(remove) | ||
| 881 | changed = True | ||
| 882 | else: | ||
| 883 | for removeitem in remove: | ||
| 884 | if removeitem in splitval: | ||
| 885 | splitval.remove(removeitem) | ||
| 886 | changed = True | ||
| 887 | |||
| 888 | if changed: | ||
| 889 | newvalue = splitval | ||
| 890 | if len(newvalue) == 1: | ||
| 891 | # Ensure it's written out as one line | ||
| 892 | if ':append' in varname: | ||
| 893 | newvalue = ' ' + newvalue[0] | ||
| 894 | else: | ||
| 895 | newvalue = newvalue[0] | ||
| 896 | if not newvalue and (op in ['+=', '.='] or ':append' in varname): | ||
| 897 | # There's no point appending nothing | ||
| 898 | newvalue = None | ||
| 899 | if varname.endswith('()'): | ||
| 900 | indent = 4 | ||
| 901 | else: | ||
| 902 | indent = -1 | ||
| 903 | return (newvalue, None, indent, True) | ||
| 904 | return (origvalue, None, 4, False) | ||
| 905 | |||
| 906 | varnames = [item[0] for item in bbappendlines] | ||
| 907 | if removevalues: | ||
| 908 | varnames.extend(list(removevalues.keys())) | ||
| 909 | |||
| 910 | with open(outfile, 'r') as f: | ||
| 911 | (updated, newlines) = bb.utils.edit_metadata(f, varnames, appendfile_varfunc) | ||
| 912 | |||
| 913 | destsubdir = extvars['destsubdir'] | ||
| 914 | else: | ||
| 915 | updated = False | ||
| 916 | newlines = [] | ||
| 917 | |||
| 918 | if bbappendlines: | ||
| 919 | for line in bbappendlines: | ||
| 920 | if line[0].endswith('()'): | ||
| 921 | newlines.append('%s {\n %s\n}\n' % (line[0], '\n '.join(line[2]))) | ||
| 922 | else: | ||
| 923 | newlines.append('%s %s "%s"\n\n' % line) | ||
| 924 | updated = True | ||
| 925 | |||
| 926 | if updated: | ||
| 927 | with open(outfile, 'w') as f: | ||
| 928 | f.writelines(newlines) | ||
| 929 | |||
| 930 | if copyfiles: | ||
| 931 | if machine: | ||
| 932 | destsubdir = os.path.join(destsubdir, machine) | ||
| 933 | if redirect_output: | ||
| 934 | outdir = redirect_output | ||
| 935 | else: | ||
| 936 | outdir = appenddir | ||
| 937 | for newfile, param in copyfiles.items(): | ||
| 938 | srcfile = param['path'] | ||
| 939 | patchdir = param.get('patchdir', ".") | ||
| 940 | |||
| 941 | if patchdir != ".": | ||
| 942 | newfile = os.path.join(os.path.split(newfile)[0], patchdir, os.path.split(newfile)[1]) | ||
| 943 | filedest = os.path.join(outdir, destsubdir, os.path.basename(srcfile)) | ||
| 944 | if os.path.abspath(newfile) != os.path.abspath(filedest): | ||
| 945 | if newfile.startswith(tempfile.gettempdir()): | ||
| 946 | newfiledisp = os.path.basename(newfile) | ||
| 947 | else: | ||
| 948 | newfiledisp = newfile | ||
| 949 | if redirect_output: | ||
| 950 | bb.note('Copying %s to %s (dry-run)' % (newfiledisp, os.path.join(appenddir, destsubdir, os.path.basename(srcfile)))) | ||
| 951 | else: | ||
| 952 | bb.note('Copying %s to %s' % (newfiledisp, filedest)) | ||
| 953 | bb.utils.mkdirhier(os.path.dirname(filedest)) | ||
| 954 | shutil.copyfile(newfile, filedest) | ||
| 955 | |||
| 956 | return (appendpath, os.path.join(appenddir, destsubdir)) | ||
| 957 | |||
| 958 | |||
| 959 | def find_layerdir(fn): | ||
| 960 | """ Figure out the path to the base of the layer containing a file (e.g. a recipe)""" | ||
| 961 | pth = os.path.abspath(fn) | ||
| 962 | layerdir = '' | ||
| 963 | while pth: | ||
| 964 | if os.path.exists(os.path.join(pth, 'conf', 'layer.conf')): | ||
| 965 | layerdir = pth | ||
| 966 | break | ||
| 967 | pth = os.path.dirname(pth) | ||
| 968 | if pth == '/': | ||
| 969 | return None | ||
| 970 | return layerdir | ||
| 971 | |||
| 972 | |||
| 973 | def replace_dir_vars(path, d): | ||
| 974 | """Replace common directory paths with appropriate variable references (e.g. /etc becomes ${sysconfdir})""" | ||
| 975 | dirvars = {} | ||
| 976 | # Sort by length so we get the variables we're interested in first | ||
| 977 | for var in sorted(list(d.keys()), key=len): | ||
| 978 | if var.endswith('dir') and var.lower() == var: | ||
| 979 | value = d.getVar(var) | ||
| 980 | if value.startswith('/') and not '\n' in value and value not in dirvars: | ||
| 981 | dirvars[value] = var | ||
| 982 | for dirpath in sorted(list(dirvars.keys()), reverse=True): | ||
| 983 | path = path.replace(dirpath, '${%s}' % dirvars[dirpath]) | ||
| 984 | return path | ||
| 985 | |||
| 986 | def get_recipe_pv_with_pfx_sfx(pv, uri_type): | ||
| 987 | """ | ||
| 988 | Get PV separating prefix and suffix components. | ||
| 989 | |||
| 990 | Returns tuple with pv, prefix and suffix. | ||
| 991 | """ | ||
| 992 | pfx = '' | ||
| 993 | sfx = '' | ||
| 994 | |||
| 995 | if uri_type == 'git': | ||
| 996 | git_regex = re.compile(r"(?P<pfx>v?)(?P<ver>.*?)(?P<sfx>\+[^\+]*(git)?r?(AUTOINC\+)?)(?P<rev>.*)") | ||
| 997 | m = git_regex.match(pv) | ||
| 998 | |||
| 999 | if m: | ||
| 1000 | pv = m.group('ver') | ||
| 1001 | pfx = m.group('pfx') | ||
| 1002 | sfx = m.group('sfx') | ||
| 1003 | else: | ||
| 1004 | regex = re.compile(r"(?P<pfx>(v|r)?)(?P<ver>.*)") | ||
| 1005 | m = regex.match(pv) | ||
| 1006 | if m: | ||
| 1007 | pv = m.group('ver') | ||
| 1008 | pfx = m.group('pfx') | ||
| 1009 | |||
| 1010 | return (pv, pfx, sfx) | ||
| 1011 | |||
| 1012 | def get_recipe_upstream_version(rd): | ||
| 1013 | """ | ||
| 1014 | Get upstream version of recipe using bb.fetch2 methods with support for | ||
| 1015 | http, https, ftp and git. | ||
| 1016 | |||
| 1017 | bb.fetch2 exceptions can be raised, | ||
| 1018 | FetchError when don't have network access or upstream site don't response. | ||
| 1019 | NoMethodError when uri latest_versionstring method isn't implemented. | ||
| 1020 | |||
| 1021 | Returns a dictonary with version, repository revision, current_version, type and datetime. | ||
| 1022 | Type can be A for Automatic, M for Manual and U for Unknown. | ||
| 1023 | """ | ||
| 1024 | from bb.fetch2 import decodeurl | ||
| 1025 | from datetime import datetime | ||
| 1026 | |||
| 1027 | ru = {} | ||
| 1028 | ru['current_version'] = rd.getVar('PV') | ||
| 1029 | ru['version'] = '' | ||
| 1030 | ru['type'] = 'U' | ||
| 1031 | ru['datetime'] = '' | ||
| 1032 | ru['revision'] = '' | ||
| 1033 | |||
| 1034 | # XXX: If don't have SRC_URI means that don't have upstream sources so | ||
| 1035 | # returns the current recipe version, so that upstream version check | ||
| 1036 | # declares a match. | ||
| 1037 | src_uris = rd.getVar('SRC_URI') | ||
| 1038 | if not src_uris: | ||
| 1039 | ru['version'] = ru['current_version'] | ||
| 1040 | ru['type'] = 'M' | ||
| 1041 | ru['datetime'] = datetime.now() | ||
| 1042 | return ru | ||
| 1043 | |||
| 1044 | # XXX: we suppose that the first entry points to the upstream sources | ||
| 1045 | src_uri = src_uris.split()[0] | ||
| 1046 | uri_type, _, _, _, _, _ = decodeurl(src_uri) | ||
| 1047 | |||
| 1048 | (pv, pfx, sfx) = get_recipe_pv_with_pfx_sfx(rd.getVar('PV'), uri_type) | ||
| 1049 | ru['current_version'] = pv | ||
| 1050 | |||
| 1051 | manual_upstream_version = rd.getVar("RECIPE_UPSTREAM_VERSION") | ||
| 1052 | if manual_upstream_version: | ||
| 1053 | # manual tracking of upstream version. | ||
| 1054 | ru['version'] = manual_upstream_version | ||
| 1055 | ru['type'] = 'M' | ||
| 1056 | |||
| 1057 | manual_upstream_date = rd.getVar("CHECK_DATE") | ||
| 1058 | if manual_upstream_date: | ||
| 1059 | date = datetime.strptime(manual_upstream_date, "%b %d, %Y") | ||
| 1060 | else: | ||
| 1061 | date = datetime.now() | ||
| 1062 | ru['datetime'] = date | ||
| 1063 | |||
| 1064 | elif uri_type == "file": | ||
| 1065 | # files are always up-to-date | ||
| 1066 | ru['version'] = pv | ||
| 1067 | ru['type'] = 'A' | ||
| 1068 | ru['datetime'] = datetime.now() | ||
| 1069 | else: | ||
| 1070 | ud = bb.fetch2.FetchData(src_uri, rd) | ||
| 1071 | if rd.getVar("UPSTREAM_CHECK_COMMITS") == "1": | ||
| 1072 | bb.fetch2.get_srcrev(rd) | ||
| 1073 | revision = ud.method.latest_revision(ud, rd, 'default') | ||
| 1074 | upversion = pv | ||
| 1075 | if revision != rd.getVar("SRCREV"): | ||
| 1076 | upversion = upversion + "-new-commits-available" | ||
| 1077 | else: | ||
| 1078 | pupver = ud.method.latest_versionstring(ud, rd) | ||
| 1079 | (upversion, revision) = pupver | ||
| 1080 | |||
| 1081 | if upversion: | ||
| 1082 | ru['version'] = upversion | ||
| 1083 | ru['type'] = 'A' | ||
| 1084 | |||
| 1085 | if revision: | ||
| 1086 | ru['revision'] = revision | ||
| 1087 | |||
| 1088 | ru['datetime'] = datetime.now() | ||
| 1089 | |||
| 1090 | return ru | ||
| 1091 | |||
| 1092 | def _get_recipe_upgrade_status(data): | ||
| 1093 | uv = get_recipe_upstream_version(data) | ||
| 1094 | |||
| 1095 | pn = data.getVar('PN') | ||
| 1096 | cur_ver = uv['current_version'] | ||
| 1097 | |||
| 1098 | upstream_version_unknown = data.getVar('UPSTREAM_VERSION_UNKNOWN') | ||
| 1099 | if not uv['version']: | ||
| 1100 | status = "UNKNOWN" if upstream_version_unknown else "UNKNOWN_BROKEN" | ||
| 1101 | else: | ||
| 1102 | cmp = vercmp_string(uv['current_version'], uv['version']) | ||
| 1103 | if cmp == -1: | ||
| 1104 | status = "UPDATE" if not upstream_version_unknown else "KNOWN_BROKEN" | ||
| 1105 | elif cmp == 0: | ||
| 1106 | status = "MATCH" if not upstream_version_unknown else "KNOWN_BROKEN" | ||
| 1107 | else: | ||
| 1108 | status = "UNKNOWN" if upstream_version_unknown else "UNKNOWN_BROKEN" | ||
| 1109 | |||
| 1110 | next_ver = uv['version'] if uv['version'] else "N/A" | ||
| 1111 | revision = uv['revision'] if uv['revision'] else "N/A" | ||
| 1112 | maintainer = data.getVar('RECIPE_MAINTAINER') | ||
| 1113 | no_upgrade_reason = data.getVar('RECIPE_NO_UPDATE_REASON') | ||
| 1114 | |||
| 1115 | return (pn, status, cur_ver, next_ver, maintainer, revision, no_upgrade_reason) | ||
| 1116 | |||
| 1117 | def get_recipe_upgrade_status(recipes=None): | ||
| 1118 | pkgs_list = [] | ||
| 1119 | data_copy_list = [] | ||
| 1120 | copy_vars = ('SRC_URI', | ||
| 1121 | 'PV', | ||
| 1122 | 'DL_DIR', | ||
| 1123 | 'PN', | ||
| 1124 | 'CACHE', | ||
| 1125 | 'PERSISTENT_DIR', | ||
| 1126 | 'BB_URI_HEADREVS', | ||
| 1127 | 'UPSTREAM_CHECK_COMMITS', | ||
| 1128 | 'UPSTREAM_CHECK_GITTAGREGEX', | ||
| 1129 | 'UPSTREAM_CHECK_REGEX', | ||
| 1130 | 'UPSTREAM_CHECK_URI', | ||
| 1131 | 'UPSTREAM_VERSION_UNKNOWN', | ||
| 1132 | 'RECIPE_MAINTAINER', | ||
| 1133 | 'RECIPE_NO_UPDATE_REASON', | ||
| 1134 | 'RECIPE_UPSTREAM_VERSION', | ||
| 1135 | 'RECIPE_UPSTREAM_DATE', | ||
| 1136 | 'CHECK_DATE', | ||
| 1137 | 'FETCHCMD_bzr', | ||
| 1138 | 'FETCHCMD_ccrc', | ||
| 1139 | 'FETCHCMD_cvs', | ||
| 1140 | 'FETCHCMD_git', | ||
| 1141 | 'FETCHCMD_hg', | ||
| 1142 | 'FETCHCMD_npm', | ||
| 1143 | 'FETCHCMD_osc', | ||
| 1144 | 'FETCHCMD_p4', | ||
| 1145 | 'FETCHCMD_repo', | ||
| 1146 | 'FETCHCMD_s3', | ||
| 1147 | 'FETCHCMD_svn', | ||
| 1148 | 'FETCHCMD_wget', | ||
| 1149 | ) | ||
| 1150 | |||
| 1151 | with bb.tinfoil.Tinfoil() as tinfoil: | ||
| 1152 | tinfoil.prepare(config_only=False) | ||
| 1153 | |||
| 1154 | if not recipes: | ||
| 1155 | recipes = tinfoil.all_recipe_files(variants=False) | ||
| 1156 | |||
| 1157 | for fn in recipes: | ||
| 1158 | try: | ||
| 1159 | if fn.startswith("/"): | ||
| 1160 | data = tinfoil.parse_recipe_file(fn) | ||
| 1161 | else: | ||
| 1162 | data = tinfoil.parse_recipe(fn) | ||
| 1163 | except bb.providers.NoProvider: | ||
| 1164 | bb.note(" No provider for %s" % fn) | ||
| 1165 | continue | ||
| 1166 | |||
| 1167 | unreliable = data.getVar('UPSTREAM_CHECK_UNRELIABLE') | ||
| 1168 | if unreliable == "1": | ||
| 1169 | bb.note(" Skip package %s as upstream check unreliable" % pn) | ||
| 1170 | continue | ||
| 1171 | |||
| 1172 | data_copy = bb.data.init() | ||
| 1173 | for var in copy_vars: | ||
| 1174 | data_copy.setVar(var, data.getVar(var)) | ||
| 1175 | for k in data: | ||
| 1176 | if k.startswith('SRCREV'): | ||
| 1177 | data_copy.setVar(k, data.getVar(k)) | ||
| 1178 | |||
| 1179 | data_copy_list.append(data_copy) | ||
| 1180 | |||
| 1181 | from concurrent.futures import ProcessPoolExecutor | ||
| 1182 | with ProcessPoolExecutor(max_workers=utils.cpu_count()) as executor: | ||
| 1183 | pkgs_list = executor.map(_get_recipe_upgrade_status, data_copy_list) | ||
| 1184 | |||
| 1185 | return pkgs_list | ||
diff --git a/meta-xilinx-core/lib/oe/reproducible.py b/meta-xilinx-core/lib/oe/reproducible.py new file mode 100644 index 00000000..448befce --- /dev/null +++ b/meta-xilinx-core/lib/oe/reproducible.py | |||
| @@ -0,0 +1,197 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: GPL-2.0-only | ||
| 5 | # | ||
| 6 | import os | ||
| 7 | import subprocess | ||
| 8 | import bb | ||
| 9 | |||
| 10 | # For reproducible builds, this code sets the default SOURCE_DATE_EPOCH in each | ||
| 11 | # component's build environment. The format is number of seconds since the | ||
| 12 | # system epoch. | ||
| 13 | # | ||
| 14 | # Upstream components (generally) respect this environment variable, | ||
| 15 | # using it in place of the "current" date and time. | ||
| 16 | # See https://reproducible-builds.org/specs/source-date-epoch/ | ||
| 17 | # | ||
| 18 | # The default value of SOURCE_DATE_EPOCH comes from the function | ||
| 19 | # get_source_date_epoch_value which reads from the SDE_FILE, or if the file | ||
| 20 | # is not available will use the fallback of SOURCE_DATE_EPOCH_FALLBACK. | ||
| 21 | # | ||
| 22 | # The SDE_FILE is normally constructed from the function | ||
| 23 | # create_source_date_epoch_stamp which is typically added as a postfuncs to | ||
| 24 | # the do_unpack task. If a recipe does NOT have do_unpack, it should be added | ||
| 25 | # to a task that runs after the source is available and before the | ||
| 26 | # do_deploy_source_date_epoch task is executed. | ||
| 27 | # | ||
| 28 | # If a recipe wishes to override the default behavior it should set it's own | ||
| 29 | # SOURCE_DATE_EPOCH or override the do_deploy_source_date_epoch_stamp task | ||
| 30 | # with recipe-specific functionality to write the appropriate | ||
| 31 | # SOURCE_DATE_EPOCH into the SDE_FILE. | ||
| 32 | # | ||
| 33 | # SOURCE_DATE_EPOCH is intended to be a reproducible value. This value should | ||
| 34 | # be reproducible for anyone who builds the same revision from the same | ||
| 35 | # sources. | ||
| 36 | # | ||
| 37 | # There are 4 ways the create_source_date_epoch_stamp function determines what | ||
| 38 | # becomes SOURCE_DATE_EPOCH: | ||
| 39 | # | ||
| 40 | # 1. Use the value from __source_date_epoch.txt file if this file exists. | ||
| 41 | # This file was most likely created in the previous build by one of the | ||
| 42 | # following methods 2,3,4. | ||
| 43 | # Alternatively, it can be provided by a recipe via SRC_URI. | ||
| 44 | # | ||
| 45 | # If the file does not exist: | ||
| 46 | # | ||
| 47 | # 2. If there is a git checkout, use the last git commit timestamp. | ||
| 48 | # Git does not preserve file timestamps on checkout. | ||
| 49 | # | ||
| 50 | # 3. Use the mtime of "known" files such as NEWS, CHANGLELOG, ... | ||
| 51 | # This works for well-kept repositories distributed via tarball. | ||
| 52 | # | ||
| 53 | # 4. Use the modification time of the youngest file in the source tree, if | ||
| 54 | # there is one. | ||
| 55 | # This will be the newest file from the distribution tarball, if any. | ||
| 56 | # | ||
| 57 | # 5. Fall back to a fixed timestamp (SOURCE_DATE_EPOCH_FALLBACK). | ||
| 58 | # | ||
| 59 | # Once the value is determined, it is stored in the recipe's SDE_FILE. | ||
| 60 | |||
| 61 | def get_source_date_epoch_from_known_files(d, sourcedir): | ||
| 62 | source_date_epoch = None | ||
| 63 | newest_file = None | ||
| 64 | known_files = set(["NEWS", "ChangeLog", "Changelog", "CHANGES"]) | ||
| 65 | for file in known_files: | ||
| 66 | filepath = os.path.join(sourcedir, file) | ||
| 67 | if os.path.isfile(filepath): | ||
| 68 | mtime = int(os.lstat(filepath).st_mtime) | ||
| 69 | # There may be more than one "known_file" present, if so, use the youngest one | ||
| 70 | if not source_date_epoch or mtime > source_date_epoch: | ||
| 71 | source_date_epoch = mtime | ||
| 72 | newest_file = filepath | ||
| 73 | if newest_file: | ||
| 74 | bb.debug(1, "SOURCE_DATE_EPOCH taken from: %s" % newest_file) | ||
| 75 | return source_date_epoch | ||
| 76 | |||
| 77 | def find_git_folder(d, sourcedir): | ||
| 78 | # First guess: WORKDIR/git | ||
| 79 | # This is the default git fetcher unpack path | ||
| 80 | workdir = d.getVar('WORKDIR') | ||
| 81 | gitpath = os.path.join(workdir, "git/.git") | ||
| 82 | if os.path.isdir(gitpath): | ||
| 83 | return gitpath | ||
| 84 | |||
| 85 | # Second guess: ${S} | ||
| 86 | gitpath = os.path.join(sourcedir, ".git") | ||
| 87 | if os.path.isdir(gitpath): | ||
| 88 | return gitpath | ||
| 89 | |||
| 90 | # Perhaps there was a subpath or destsuffix specified. | ||
| 91 | # Go looking in the WORKDIR | ||
| 92 | exclude = set(["build", "image", "license-destdir", "patches", "pseudo", | ||
| 93 | "recipe-sysroot", "recipe-sysroot-native", "sysroot-destdir", "temp"]) | ||
| 94 | for root, dirs, files in os.walk(workdir, topdown=True): | ||
| 95 | dirs[:] = [d for d in dirs if d not in exclude] | ||
| 96 | if '.git' in dirs: | ||
| 97 | return os.path.join(root, ".git") | ||
| 98 | |||
| 99 | bb.warn("Failed to find a git repository in WORKDIR: %s" % workdir) | ||
| 100 | return None | ||
| 101 | |||
| 102 | def get_source_date_epoch_from_git(d, sourcedir): | ||
| 103 | if not "git://" in d.getVar('SRC_URI') and not "gitsm://" in d.getVar('SRC_URI'): | ||
| 104 | return None | ||
| 105 | |||
| 106 | gitpath = find_git_folder(d, sourcedir) | ||
| 107 | if not gitpath: | ||
| 108 | return None | ||
| 109 | |||
| 110 | # Check that the repository has a valid HEAD; it may not if subdir is used | ||
| 111 | # in SRC_URI | ||
| 112 | p = subprocess.run(['git', '--git-dir', gitpath, 'rev-parse', 'HEAD'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) | ||
| 113 | if p.returncode != 0: | ||
| 114 | bb.debug(1, "%s does not have a valid HEAD: %s" % (gitpath, p.stdout.decode('utf-8'))) | ||
| 115 | return None | ||
| 116 | |||
| 117 | bb.debug(1, "git repository: %s" % gitpath) | ||
| 118 | p = subprocess.run(['git', '-c', 'log.showSignature=false', '--git-dir', gitpath, 'log', '-1', '--pretty=%ct'], | ||
| 119 | check=True, stdout=subprocess.PIPE) | ||
| 120 | return int(p.stdout.decode('utf-8')) | ||
| 121 | |||
| 122 | def get_source_date_epoch_from_youngest_file(d, sourcedir): | ||
| 123 | if sourcedir == d.getVar('WORKDIR'): | ||
| 124 | # These sources are almost certainly not from a tarball | ||
| 125 | return None | ||
| 126 | |||
| 127 | # Do it the hard way: check all files and find the youngest one... | ||
| 128 | source_date_epoch = None | ||
| 129 | newest_file = None | ||
| 130 | for root, dirs, files in os.walk(sourcedir, topdown=True): | ||
| 131 | files = [f for f in files if not f[0] == '.'] | ||
| 132 | |||
| 133 | for fname in files: | ||
| 134 | if fname == "singletask.lock": | ||
| 135 | # Ignore externalsrc/devtool lockfile [YOCTO #14921] | ||
| 136 | continue | ||
| 137 | filename = os.path.join(root, fname) | ||
| 138 | try: | ||
| 139 | mtime = int(os.lstat(filename).st_mtime) | ||
| 140 | except ValueError: | ||
| 141 | mtime = 0 | ||
| 142 | if not source_date_epoch or mtime > source_date_epoch: | ||
| 143 | source_date_epoch = mtime | ||
| 144 | newest_file = filename | ||
| 145 | |||
| 146 | if newest_file: | ||
| 147 | bb.debug(1, "Newest file found: %s" % newest_file) | ||
| 148 | return source_date_epoch | ||
| 149 | |||
| 150 | def fixed_source_date_epoch(d): | ||
| 151 | bb.debug(1, "No tarball or git repo found to determine SOURCE_DATE_EPOCH") | ||
| 152 | source_date_epoch = d.getVar('SOURCE_DATE_EPOCH_FALLBACK') | ||
| 153 | if source_date_epoch: | ||
| 154 | bb.debug(1, "Using SOURCE_DATE_EPOCH_FALLBACK") | ||
| 155 | return int(source_date_epoch) | ||
| 156 | return 0 | ||
| 157 | |||
| 158 | def get_source_date_epoch(d, sourcedir): | ||
| 159 | return ( | ||
| 160 | get_source_date_epoch_from_git(d, sourcedir) or | ||
| 161 | get_source_date_epoch_from_youngest_file(d, sourcedir) or | ||
| 162 | fixed_source_date_epoch(d) # Last resort | ||
| 163 | ) | ||
| 164 | |||
| 165 | def epochfile_read(epochfile, d): | ||
| 166 | cached, efile = d.getVar('__CACHED_SOURCE_DATE_EPOCH') or (None, None) | ||
| 167 | if cached and efile == epochfile: | ||
| 168 | return cached | ||
| 169 | |||
| 170 | if cached and epochfile != efile: | ||
| 171 | bb.debug(1, "Epoch file changed from %s to %s" % (efile, epochfile)) | ||
| 172 | |||
| 173 | source_date_epoch = int(d.getVar('SOURCE_DATE_EPOCH_FALLBACK')) | ||
| 174 | try: | ||
| 175 | with open(epochfile, 'r') as f: | ||
| 176 | s = f.read() | ||
| 177 | try: | ||
| 178 | source_date_epoch = int(s) | ||
| 179 | except ValueError: | ||
| 180 | bb.warn("SOURCE_DATE_EPOCH value '%s' is invalid. Reverting to SOURCE_DATE_EPOCH_FALLBACK" % s) | ||
| 181 | source_date_epoch = int(d.getVar('SOURCE_DATE_EPOCH_FALLBACK')) | ||
| 182 | bb.debug(1, "SOURCE_DATE_EPOCH: %d" % source_date_epoch) | ||
| 183 | except FileNotFoundError: | ||
| 184 | bb.debug(1, "Cannot find %s. SOURCE_DATE_EPOCH will default to %d" % (epochfile, source_date_epoch)) | ||
| 185 | |||
| 186 | d.setVar('__CACHED_SOURCE_DATE_EPOCH', (str(source_date_epoch), epochfile)) | ||
| 187 | return str(source_date_epoch) | ||
| 188 | |||
| 189 | def epochfile_write(source_date_epoch, epochfile, d): | ||
| 190 | |||
| 191 | bb.debug(1, "SOURCE_DATE_EPOCH: %d" % source_date_epoch) | ||
| 192 | bb.utils.mkdirhier(os.path.dirname(epochfile)) | ||
| 193 | |||
| 194 | tmp_file = "%s.new" % epochfile | ||
| 195 | with open(tmp_file, 'w') as f: | ||
| 196 | f.write(str(source_date_epoch)) | ||
| 197 | os.rename(tmp_file, epochfile) | ||
diff --git a/meta-xilinx-core/lib/oe/rootfs.py b/meta-xilinx-core/lib/oe/rootfs.py new file mode 100644 index 00000000..5abce4ad --- /dev/null +++ b/meta-xilinx-core/lib/oe/rootfs.py | |||
| @@ -0,0 +1,438 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: GPL-2.0-only | ||
| 5 | # | ||
| 6 | from abc import ABCMeta, abstractmethod | ||
| 7 | from oe.utils import execute_pre_post_process | ||
| 8 | from oe.package_manager import * | ||
| 9 | from oe.manifest import * | ||
| 10 | import oe.path | ||
| 11 | import shutil | ||
| 12 | import os | ||
| 13 | import subprocess | ||
| 14 | import re | ||
| 15 | |||
| 16 | class Rootfs(object, metaclass=ABCMeta): | ||
| 17 | """ | ||
| 18 | This is an abstract class. Do not instantiate this directly. | ||
| 19 | """ | ||
| 20 | |||
| 21 | def __init__(self, d, progress_reporter=None, logcatcher=None): | ||
| 22 | self.d = d | ||
| 23 | self.pm = None | ||
| 24 | self.image_rootfs = self.d.getVar('IMAGE_ROOTFS') | ||
| 25 | self.deploydir = self.d.getVar('IMGDEPLOYDIR') | ||
| 26 | self.progress_reporter = progress_reporter | ||
| 27 | self.logcatcher = logcatcher | ||
| 28 | |||
| 29 | self.install_order = Manifest.INSTALL_ORDER | ||
| 30 | |||
| 31 | @abstractmethod | ||
| 32 | def _create(self): | ||
| 33 | pass | ||
| 34 | |||
| 35 | @abstractmethod | ||
| 36 | def _get_delayed_postinsts(self): | ||
| 37 | pass | ||
| 38 | |||
| 39 | @abstractmethod | ||
| 40 | def _save_postinsts(self): | ||
| 41 | pass | ||
| 42 | |||
| 43 | @abstractmethod | ||
| 44 | def _log_check(self): | ||
| 45 | pass | ||
| 46 | |||
| 47 | def _log_check_common(self, type, match): | ||
| 48 | # Ignore any lines containing log_check to avoid recursion, and ignore | ||
| 49 | # lines beginning with a + since sh -x may emit code which isn't | ||
| 50 | # actually executed, but may contain error messages | ||
| 51 | excludes = [ 'log_check', r'^\+' ] | ||
| 52 | if hasattr(self, 'log_check_expected_regexes'): | ||
| 53 | excludes.extend(self.log_check_expected_regexes) | ||
| 54 | # Insert custom log_check excludes | ||
| 55 | excludes += [x for x in (self.d.getVar("IMAGE_LOG_CHECK_EXCLUDES") or "").split(" ") if x] | ||
| 56 | excludes = [re.compile(x) for x in excludes] | ||
| 57 | r = re.compile(match) | ||
| 58 | log_path = self.d.expand("${T}/log.do_rootfs") | ||
| 59 | messages = [] | ||
| 60 | with open(log_path, 'r') as log: | ||
| 61 | for line in log: | ||
| 62 | if self.logcatcher and self.logcatcher.contains(line.rstrip()): | ||
| 63 | continue | ||
| 64 | for ee in excludes: | ||
| 65 | m = ee.search(line) | ||
| 66 | if m: | ||
| 67 | break | ||
| 68 | if m: | ||
| 69 | continue | ||
| 70 | |||
| 71 | m = r.search(line) | ||
| 72 | if m: | ||
| 73 | messages.append('[log_check] %s' % line) | ||
| 74 | if messages: | ||
| 75 | if len(messages) == 1: | ||
| 76 | msg = '1 %s message' % type | ||
| 77 | else: | ||
| 78 | msg = '%d %s messages' % (len(messages), type) | ||
| 79 | msg = '[log_check] %s: found %s in the logfile:\n%s' % \ | ||
| 80 | (self.d.getVar('PN'), msg, ''.join(messages)) | ||
| 81 | if type == 'error': | ||
| 82 | bb.fatal(msg) | ||
| 83 | else: | ||
| 84 | bb.warn(msg) | ||
| 85 | |||
| 86 | def _log_check_warn(self): | ||
| 87 | self._log_check_common('warning', '^(warn|Warn|WARNING:)') | ||
| 88 | |||
| 89 | def _log_check_error(self): | ||
| 90 | self._log_check_common('error', self.log_check_regex) | ||
| 91 | |||
| 92 | def _insert_feed_uris(self): | ||
| 93 | if bb.utils.contains("IMAGE_FEATURES", "package-management", | ||
| 94 | True, False, self.d): | ||
| 95 | self.pm.insert_feeds_uris(self.d.getVar('PACKAGE_FEED_URIS') or "", | ||
| 96 | self.d.getVar('PACKAGE_FEED_BASE_PATHS') or "", | ||
| 97 | self.d.getVar('PACKAGE_FEED_ARCHS')) | ||
| 98 | |||
| 99 | |||
| 100 | """ | ||
| 101 | The _cleanup() method should be used to clean-up stuff that we don't really | ||
| 102 | want to end up on target. For example, in the case of RPM, the DB locks. | ||
| 103 | The method is called, once, at the end of create() method. | ||
| 104 | """ | ||
| 105 | @abstractmethod | ||
| 106 | def _cleanup(self): | ||
| 107 | pass | ||
| 108 | |||
| 109 | def _setup_dbg_rootfs(self, package_paths): | ||
| 110 | gen_debugfs = self.d.getVar('IMAGE_GEN_DEBUGFS') or '0' | ||
| 111 | if gen_debugfs != '1': | ||
| 112 | return | ||
| 113 | |||
| 114 | bb.note(" Renaming the original rootfs...") | ||
| 115 | try: | ||
| 116 | shutil.rmtree(self.image_rootfs + '-orig') | ||
| 117 | except: | ||
| 118 | pass | ||
| 119 | bb.utils.rename(self.image_rootfs, self.image_rootfs + '-orig') | ||
| 120 | |||
| 121 | bb.note(" Creating debug rootfs...") | ||
| 122 | bb.utils.mkdirhier(self.image_rootfs) | ||
| 123 | |||
| 124 | bb.note(" Copying back package database...") | ||
| 125 | for path in package_paths: | ||
| 126 | bb.utils.mkdirhier(self.image_rootfs + os.path.dirname(path)) | ||
| 127 | if os.path.isdir(self.image_rootfs + '-orig' + path): | ||
| 128 | shutil.copytree(self.image_rootfs + '-orig' + path, self.image_rootfs + path, symlinks=True) | ||
| 129 | elif os.path.isfile(self.image_rootfs + '-orig' + path): | ||
| 130 | shutil.copyfile(self.image_rootfs + '-orig' + path, self.image_rootfs + path) | ||
| 131 | |||
| 132 | # Copy files located in /usr/lib/debug or /usr/src/debug | ||
| 133 | for dir in ["/usr/lib/debug", "/usr/src/debug"]: | ||
| 134 | src = self.image_rootfs + '-orig' + dir | ||
| 135 | if os.path.exists(src): | ||
| 136 | dst = self.image_rootfs + dir | ||
| 137 | bb.utils.mkdirhier(os.path.dirname(dst)) | ||
| 138 | shutil.copytree(src, dst) | ||
| 139 | |||
| 140 | # Copy files with suffix '.debug' or located in '.debug' dir. | ||
| 141 | for root, dirs, files in os.walk(self.image_rootfs + '-orig'): | ||
| 142 | relative_dir = root[len(self.image_rootfs + '-orig'):] | ||
| 143 | for f in files: | ||
| 144 | if f.endswith('.debug') or '/.debug' in relative_dir: | ||
| 145 | bb.utils.mkdirhier(self.image_rootfs + relative_dir) | ||
| 146 | shutil.copy(os.path.join(root, f), | ||
| 147 | self.image_rootfs + relative_dir) | ||
| 148 | |||
| 149 | bb.note(" Install complementary '*-dbg' packages...") | ||
| 150 | self.pm.install_complementary('*-dbg') | ||
| 151 | |||
| 152 | if self.d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-with-srcpkg': | ||
| 153 | bb.note(" Install complementary '*-src' packages...") | ||
| 154 | self.pm.install_complementary('*-src') | ||
| 155 | |||
| 156 | """ | ||
| 157 | Install additional debug packages. Possibility to install additional packages, | ||
| 158 | which are not automatically installed as complementary package of | ||
| 159 | standard one, e.g. debug package of static libraries. | ||
| 160 | """ | ||
| 161 | extra_debug_pkgs = self.d.getVar('IMAGE_INSTALL_DEBUGFS') | ||
| 162 | if extra_debug_pkgs: | ||
| 163 | bb.note(" Install extra debug packages...") | ||
| 164 | self.pm.install(extra_debug_pkgs.split(), True) | ||
| 165 | |||
| 166 | bb.note(" Removing package database...") | ||
| 167 | for path in package_paths: | ||
| 168 | if os.path.isdir(self.image_rootfs + path): | ||
| 169 | shutil.rmtree(self.image_rootfs + path) | ||
| 170 | elif os.path.isfile(self.image_rootfs + path): | ||
| 171 | os.remove(self.image_rootfs + path) | ||
| 172 | |||
| 173 | bb.note(" Rename debug rootfs...") | ||
| 174 | try: | ||
| 175 | shutil.rmtree(self.image_rootfs + '-dbg') | ||
| 176 | except: | ||
| 177 | pass | ||
| 178 | bb.utils.rename(self.image_rootfs, self.image_rootfs + '-dbg') | ||
| 179 | |||
| 180 | bb.note(" Restoring original rootfs...") | ||
| 181 | bb.utils.rename(self.image_rootfs + '-orig', self.image_rootfs) | ||
| 182 | |||
| 183 | def _exec_shell_cmd(self, cmd): | ||
| 184 | try: | ||
| 185 | subprocess.check_output(cmd, stderr=subprocess.STDOUT) | ||
| 186 | except subprocess.CalledProcessError as e: | ||
| 187 | return("Command '%s' returned %d:\n%s" % (e.cmd, e.returncode, e.output)) | ||
| 188 | |||
| 189 | return None | ||
| 190 | |||
| 191 | def create(self): | ||
| 192 | bb.note("###### Generate rootfs #######") | ||
| 193 | pre_process_cmds = self.d.getVar("ROOTFS_PREPROCESS_COMMAND") | ||
| 194 | post_process_cmds = self.d.getVar("ROOTFS_POSTPROCESS_COMMAND") | ||
| 195 | rootfs_post_install_cmds = self.d.getVar('ROOTFS_POSTINSTALL_COMMAND') | ||
| 196 | |||
| 197 | def make_last(command, commands): | ||
| 198 | commands = commands.split() | ||
| 199 | if command in commands: | ||
| 200 | commands.remove(command) | ||
| 201 | commands.append(command) | ||
| 202 | return "".join(commands) | ||
| 203 | |||
| 204 | # We want this to run as late as possible, in particular after | ||
| 205 | # systemd_sysusers_create and set_user_group. Using :append is not enough | ||
| 206 | make_last("tidy_shadowutils_files", post_process_cmds) | ||
| 207 | make_last("rootfs_reproducible", post_process_cmds) | ||
| 208 | |||
| 209 | execute_pre_post_process(self.d, pre_process_cmds) | ||
| 210 | |||
| 211 | if self.progress_reporter: | ||
| 212 | self.progress_reporter.next_stage() | ||
| 213 | |||
| 214 | # call the package manager dependent create method | ||
| 215 | self._create() | ||
| 216 | |||
| 217 | sysconfdir = self.image_rootfs + self.d.getVar('sysconfdir') | ||
| 218 | bb.utils.mkdirhier(sysconfdir) | ||
| 219 | with open(sysconfdir + "/version", "w+") as ver: | ||
| 220 | ver.write(self.d.getVar('BUILDNAME') + "\n") | ||
| 221 | |||
| 222 | execute_pre_post_process(self.d, rootfs_post_install_cmds) | ||
| 223 | |||
| 224 | self.pm.run_intercepts() | ||
| 225 | |||
| 226 | execute_pre_post_process(self.d, post_process_cmds) | ||
| 227 | |||
| 228 | if self.progress_reporter: | ||
| 229 | self.progress_reporter.next_stage() | ||
| 230 | |||
| 231 | if bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs", | ||
| 232 | True, False, self.d) and \ | ||
| 233 | not bb.utils.contains("IMAGE_FEATURES", | ||
| 234 | "read-only-rootfs-delayed-postinsts", | ||
| 235 | True, False, self.d): | ||
| 236 | delayed_postinsts = self._get_delayed_postinsts() | ||
| 237 | if delayed_postinsts is not None: | ||
| 238 | bb.fatal("The following packages could not be configured " | ||
| 239 | "offline and rootfs is read-only: %s" % | ||
| 240 | delayed_postinsts) | ||
| 241 | |||
| 242 | if self.d.getVar('USE_DEVFS') != "1": | ||
| 243 | self._create_devfs() | ||
| 244 | |||
| 245 | self._uninstall_unneeded() | ||
| 246 | |||
| 247 | if self.progress_reporter: | ||
| 248 | self.progress_reporter.next_stage() | ||
| 249 | |||
| 250 | self._insert_feed_uris() | ||
| 251 | |||
| 252 | self._run_ldconfig() | ||
| 253 | |||
| 254 | if self.d.getVar('USE_DEPMOD') != "0": | ||
| 255 | self._generate_kernel_module_deps() | ||
| 256 | |||
| 257 | self._cleanup() | ||
| 258 | self._log_check() | ||
| 259 | |||
| 260 | if self.progress_reporter: | ||
| 261 | self.progress_reporter.next_stage() | ||
| 262 | |||
| 263 | |||
| 264 | def _uninstall_unneeded(self): | ||
| 265 | # Remove the run-postinsts package if no delayed postinsts are found | ||
| 266 | delayed_postinsts = self._get_delayed_postinsts() | ||
| 267 | if delayed_postinsts is None: | ||
| 268 | if os.path.exists(self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/init.d/run-postinsts")) or os.path.exists(self.d.expand("${IMAGE_ROOTFS}${systemd_system_unitdir}/run-postinsts.service")): | ||
| 269 | self.pm.remove(["run-postinsts"]) | ||
| 270 | |||
| 271 | image_rorfs = bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs", | ||
| 272 | True, False, self.d) and \ | ||
| 273 | not bb.utils.contains("IMAGE_FEATURES", | ||
| 274 | "read-only-rootfs-delayed-postinsts", | ||
| 275 | True, False, self.d) | ||
| 276 | |||
| 277 | image_rorfs_force = self.d.getVar('FORCE_RO_REMOVE') | ||
| 278 | |||
| 279 | if image_rorfs or image_rorfs_force == "1": | ||
| 280 | # Remove components that we don't need if it's a read-only rootfs | ||
| 281 | unneeded_pkgs = self.d.getVar("ROOTFS_RO_UNNEEDED").split() | ||
| 282 | pkgs_installed = image_list_installed_packages(self.d) | ||
| 283 | # Make sure update-alternatives is removed last. This is | ||
| 284 | # because its database has to available while uninstalling | ||
| 285 | # other packages, allowing alternative symlinks of packages | ||
| 286 | # to be uninstalled or to be managed correctly otherwise. | ||
| 287 | provider = self.d.getVar("VIRTUAL-RUNTIME_update-alternatives") | ||
| 288 | pkgs_to_remove = sorted([pkg for pkg in pkgs_installed if pkg in unneeded_pkgs], key=lambda x: x == provider) | ||
| 289 | |||
| 290 | # update-alternatives provider is removed in its own remove() | ||
| 291 | # call because all package managers do not guarantee the packages | ||
| 292 | # are removed in the order they given in the list (which is | ||
| 293 | # passed to the command line). The sorting done earlier is | ||
| 294 | # utilized to implement the 2-stage removal. | ||
| 295 | if len(pkgs_to_remove) > 1: | ||
| 296 | self.pm.remove(pkgs_to_remove[:-1], False) | ||
| 297 | if len(pkgs_to_remove) > 0: | ||
| 298 | self.pm.remove([pkgs_to_remove[-1]], False) | ||
| 299 | |||
| 300 | if delayed_postinsts: | ||
| 301 | self._save_postinsts() | ||
| 302 | if image_rorfs: | ||
| 303 | bb.warn("There are post install scripts " | ||
| 304 | "in a read-only rootfs") | ||
| 305 | |||
| 306 | post_uninstall_cmds = self.d.getVar("ROOTFS_POSTUNINSTALL_COMMAND") | ||
| 307 | execute_pre_post_process(self.d, post_uninstall_cmds) | ||
| 308 | |||
| 309 | runtime_pkgmanage = bb.utils.contains("IMAGE_FEATURES", "package-management", | ||
| 310 | True, False, self.d) | ||
| 311 | if not runtime_pkgmanage: | ||
| 312 | # Remove the package manager data files | ||
| 313 | self.pm.remove_packaging_data() | ||
| 314 | |||
| 315 | def _run_ldconfig(self): | ||
| 316 | if self.d.getVar('LDCONFIGDEPEND'): | ||
| 317 | bb.note("Executing: ldconfig -r " + self.image_rootfs + " -c new -v -X") | ||
| 318 | self._exec_shell_cmd(['ldconfig', '-r', self.image_rootfs, '-c', | ||
| 319 | 'new', '-v', '-X']) | ||
| 320 | |||
| 321 | image_rorfs = bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs", | ||
| 322 | True, False, self.d) | ||
| 323 | ldconfig_in_features = bb.utils.contains("DISTRO_FEATURES", "ldconfig", | ||
| 324 | True, False, self.d) | ||
| 325 | if image_rorfs or not ldconfig_in_features: | ||
| 326 | ldconfig_cache_dir = os.path.join(self.image_rootfs, "var/cache/ldconfig") | ||
| 327 | if os.path.exists(ldconfig_cache_dir): | ||
| 328 | bb.note("Removing ldconfig auxiliary cache...") | ||
| 329 | shutil.rmtree(ldconfig_cache_dir) | ||
| 330 | |||
| 331 | def _check_for_kernel_modules(self, modules_dir): | ||
| 332 | for root, dirs, files in os.walk(modules_dir, topdown=True): | ||
| 333 | for name in files: | ||
| 334 | found_ko = name.endswith((".ko", ".ko.gz", ".ko.xz", ".ko.zst")) | ||
| 335 | if found_ko: | ||
| 336 | return found_ko | ||
| 337 | return False | ||
| 338 | |||
| 339 | def _generate_kernel_module_deps(self): | ||
| 340 | modules_dir = os.path.join(self.image_rootfs, 'lib', 'modules') | ||
| 341 | # if we don't have any modules don't bother to do the depmod | ||
| 342 | if not self._check_for_kernel_modules(modules_dir): | ||
| 343 | bb.note("No Kernel Modules found, not running depmod") | ||
| 344 | return | ||
| 345 | |||
| 346 | pkgdatadir = self.d.getVar('PKGDATA_DIR') | ||
| 347 | |||
| 348 | # PKGDATA_DIR can include multiple kernels so we run depmod for each | ||
| 349 | # one of them. | ||
| 350 | for direntry in os.listdir(pkgdatadir): | ||
| 351 | match = re.match('(.*)-depmod', direntry) | ||
| 352 | if not match: | ||
| 353 | continue | ||
| 354 | kernel_package_name = match.group(1) | ||
| 355 | |||
| 356 | kernel_abi_ver_file = oe.path.join(pkgdatadir, direntry, kernel_package_name + '-abiversion') | ||
| 357 | if not os.path.exists(kernel_abi_ver_file): | ||
| 358 | bb.fatal("No kernel-abiversion file found (%s), cannot run depmod, aborting" % kernel_abi_ver_file) | ||
| 359 | |||
| 360 | with open(kernel_abi_ver_file) as f: | ||
| 361 | kernel_ver = f.read().strip(' \n') | ||
| 362 | |||
| 363 | versioned_modules_dir = os.path.join(self.image_rootfs, modules_dir, kernel_ver) | ||
| 364 | |||
| 365 | bb.utils.mkdirhier(versioned_modules_dir) | ||
| 366 | |||
| 367 | bb.note("Running depmodwrapper for %s ..." % versioned_modules_dir) | ||
| 368 | if self._exec_shell_cmd(['depmodwrapper', '-a', '-b', self.image_rootfs, kernel_ver, kernel_package_name]): | ||
| 369 | bb.fatal("Kernel modules dependency generation failed") | ||
| 370 | |||
| 371 | """ | ||
| 372 | Create devfs: | ||
| 373 | * IMAGE_DEVICE_TABLE is the old name to an absolute path to a device table file | ||
| 374 | * IMAGE_DEVICE_TABLES is a new name for a file, or list of files, seached | ||
| 375 | for in the BBPATH | ||
| 376 | If neither are specified then the default name of files/device_table-minimal.txt | ||
| 377 | is searched for in the BBPATH (same as the old version.) | ||
| 378 | """ | ||
| 379 | def _create_devfs(self): | ||
| 380 | devtable_list = [] | ||
| 381 | devtable = self.d.getVar('IMAGE_DEVICE_TABLE') | ||
| 382 | if devtable is not None: | ||
| 383 | devtable_list.append(devtable) | ||
| 384 | else: | ||
| 385 | devtables = self.d.getVar('IMAGE_DEVICE_TABLES') | ||
| 386 | if devtables is None: | ||
| 387 | devtables = 'files/device_table-minimal.txt' | ||
| 388 | for devtable in devtables.split(): | ||
| 389 | devtable_list.append("%s" % bb.utils.which(self.d.getVar('BBPATH'), devtable)) | ||
| 390 | |||
| 391 | for devtable in devtable_list: | ||
| 392 | self._exec_shell_cmd(["makedevs", "-r", | ||
| 393 | self.image_rootfs, "-D", devtable]) | ||
| 394 | |||
| 395 | |||
| 396 | def get_class_for_type(imgtype): | ||
| 397 | import importlib | ||
| 398 | mod = importlib.import_module('oe.package_manager.' + imgtype + '.rootfs') | ||
| 399 | return mod.PkgRootfs | ||
| 400 | |||
| 401 | def variable_depends(d, manifest_dir=None): | ||
| 402 | img_type = d.getVar('IMAGE_PKGTYPE') | ||
| 403 | cls = get_class_for_type(img_type) | ||
| 404 | return cls._depends_list() | ||
| 405 | |||
| 406 | def create_rootfs(d, manifest_dir=None, progress_reporter=None, logcatcher=None): | ||
| 407 | env_bkp = os.environ.copy() | ||
| 408 | |||
| 409 | img_type = d.getVar('IMAGE_PKGTYPE') | ||
| 410 | |||
| 411 | cls = get_class_for_type(img_type) | ||
| 412 | cls(d, manifest_dir, progress_reporter, logcatcher).create() | ||
| 413 | os.environ.clear() | ||
| 414 | os.environ.update(env_bkp) | ||
| 415 | |||
| 416 | |||
| 417 | def image_list_installed_packages(d, rootfs_dir=None): | ||
| 418 | # Theres no rootfs for baremetal images | ||
| 419 | if bb.data.inherits_class('baremetal-image', d): | ||
| 420 | return "" | ||
| 421 | |||
| 422 | if not rootfs_dir: | ||
| 423 | rootfs_dir = d.getVar('IMAGE_ROOTFS') | ||
| 424 | |||
| 425 | img_type = d.getVar('IMAGE_PKGTYPE') | ||
| 426 | |||
| 427 | import importlib | ||
| 428 | cls = importlib.import_module('oe.package_manager.' + img_type) | ||
| 429 | return cls.PMPkgsList(d, rootfs_dir).list_pkgs() | ||
| 430 | |||
| 431 | if __name__ == "__main__": | ||
| 432 | """ | ||
| 433 | We should be able to run this as a standalone script, from outside bitbake | ||
| 434 | environment. | ||
| 435 | """ | ||
| 436 | """ | ||
| 437 | TBD | ||
| 438 | """ | ||
diff --git a/meta-xilinx-core/lib/oe/rust.py b/meta-xilinx-core/lib/oe/rust.py new file mode 100644 index 00000000..185553ee --- /dev/null +++ b/meta-xilinx-core/lib/oe/rust.py | |||
| @@ -0,0 +1,13 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | # Handle mismatches between `uname -m`-style output and Rust's arch names | ||
| 8 | def arch_to_rust_arch(arch): | ||
| 9 | if arch == "ppc64le": | ||
| 10 | return "powerpc64le" | ||
| 11 | if arch in ('riscv32', 'riscv64'): | ||
| 12 | return arch + 'gc' | ||
| 13 | return arch | ||
diff --git a/meta-xilinx-core/lib/oe/sbom.py b/meta-xilinx-core/lib/oe/sbom.py new file mode 100644 index 00000000..fd4b6895 --- /dev/null +++ b/meta-xilinx-core/lib/oe/sbom.py | |||
| @@ -0,0 +1,120 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: GPL-2.0-only | ||
| 5 | # | ||
| 6 | |||
| 7 | import collections | ||
| 8 | |||
| 9 | DepRecipe = collections.namedtuple("DepRecipe", ("doc", "doc_sha1", "recipe")) | ||
| 10 | DepSource = collections.namedtuple("DepSource", ("doc", "doc_sha1", "recipe", "file")) | ||
| 11 | |||
| 12 | |||
| 13 | def get_recipe_spdxid(d): | ||
| 14 | return "SPDXRef-%s-%s" % ("Recipe", d.getVar("PN")) | ||
| 15 | |||
| 16 | |||
| 17 | def get_download_spdxid(d, idx): | ||
| 18 | return "SPDXRef-Download-%s-%d" % (d.getVar("PN"), idx) | ||
| 19 | |||
| 20 | |||
| 21 | def get_package_spdxid(pkg): | ||
| 22 | return "SPDXRef-Package-%s" % pkg | ||
| 23 | |||
| 24 | |||
| 25 | def get_source_file_spdxid(d, idx): | ||
| 26 | return "SPDXRef-SourceFile-%s-%d" % (d.getVar("PN"), idx) | ||
| 27 | |||
| 28 | |||
| 29 | def get_packaged_file_spdxid(pkg, idx): | ||
| 30 | return "SPDXRef-PackagedFile-%s-%d" % (pkg, idx) | ||
| 31 | |||
| 32 | |||
| 33 | def get_image_spdxid(img): | ||
| 34 | return "SPDXRef-Image-%s" % img | ||
| 35 | |||
| 36 | |||
| 37 | def get_sdk_spdxid(sdk): | ||
| 38 | return "SPDXRef-SDK-%s" % sdk | ||
| 39 | |||
| 40 | |||
| 41 | def _doc_path_by_namespace(spdx_deploy, arch, doc_namespace): | ||
| 42 | return spdx_deploy / "by-namespace" / arch / doc_namespace.replace("/", "_") | ||
| 43 | |||
| 44 | |||
| 45 | def doc_find_by_namespace(spdx_deploy, search_arches, doc_namespace): | ||
| 46 | for pkgarch in search_arches: | ||
| 47 | p = _doc_path_by_namespace(spdx_deploy, pkgarch, doc_namespace) | ||
| 48 | if os.path.exists(p): | ||
| 49 | return p | ||
| 50 | return None | ||
| 51 | |||
| 52 | |||
| 53 | def _doc_path_by_hashfn(spdx_deploy, arch, doc_name, hashfn): | ||
| 54 | return ( | ||
| 55 | spdx_deploy / "by-hash" / arch / hashfn.split()[1] / (doc_name + ".spdx.json") | ||
| 56 | ) | ||
| 57 | |||
| 58 | |||
| 59 | def doc_find_by_hashfn(spdx_deploy, search_arches, doc_name, hashfn): | ||
| 60 | for pkgarch in search_arches: | ||
| 61 | p = _doc_path_by_hashfn(spdx_deploy, pkgarch, doc_name, hashfn) | ||
| 62 | if os.path.exists(p): | ||
| 63 | return p | ||
| 64 | return None | ||
| 65 | |||
| 66 | |||
| 67 | def doc_path(spdx_deploy, doc_name, arch, subdir): | ||
| 68 | return spdx_deploy / arch / subdir / (doc_name + ".spdx.json") | ||
| 69 | |||
| 70 | |||
| 71 | def write_doc(d, spdx_doc, arch, subdir, spdx_deploy=None, indent=None): | ||
| 72 | from pathlib import Path | ||
| 73 | |||
| 74 | if spdx_deploy is None: | ||
| 75 | spdx_deploy = Path(d.getVar("SPDXDEPLOY")) | ||
| 76 | |||
| 77 | dest = doc_path(spdx_deploy, spdx_doc.name, arch, subdir) | ||
| 78 | dest.parent.mkdir(exist_ok=True, parents=True) | ||
| 79 | with dest.open("wb") as f: | ||
| 80 | doc_sha1 = spdx_doc.to_json(f, sort_keys=True, indent=indent) | ||
| 81 | |||
| 82 | l = _doc_path_by_namespace(spdx_deploy, arch, spdx_doc.documentNamespace) | ||
| 83 | l.parent.mkdir(exist_ok=True, parents=True) | ||
| 84 | l.symlink_to(os.path.relpath(dest, l.parent)) | ||
| 85 | |||
| 86 | l = _doc_path_by_hashfn( | ||
| 87 | spdx_deploy, arch, spdx_doc.name, d.getVar("BB_HASHFILENAME") | ||
| 88 | ) | ||
| 89 | l.parent.mkdir(exist_ok=True, parents=True) | ||
| 90 | l.symlink_to(os.path.relpath(dest, l.parent)) | ||
| 91 | |||
| 92 | return doc_sha1 | ||
| 93 | |||
| 94 | |||
| 95 | def read_doc(fn): | ||
| 96 | import hashlib | ||
| 97 | import oe.spdx | ||
| 98 | import io | ||
| 99 | import contextlib | ||
| 100 | |||
| 101 | @contextlib.contextmanager | ||
| 102 | def get_file(): | ||
| 103 | if isinstance(fn, io.IOBase): | ||
| 104 | yield fn | ||
| 105 | else: | ||
| 106 | with fn.open("rb") as f: | ||
| 107 | yield f | ||
| 108 | |||
| 109 | with get_file() as f: | ||
| 110 | sha1 = hashlib.sha1() | ||
| 111 | while True: | ||
| 112 | chunk = f.read(4096) | ||
| 113 | if not chunk: | ||
| 114 | break | ||
| 115 | sha1.update(chunk) | ||
| 116 | |||
| 117 | f.seek(0) | ||
| 118 | doc = oe.spdx.SPDXDocument.from_json(f) | ||
| 119 | |||
| 120 | return (doc, sha1.hexdigest()) | ||
diff --git a/meta-xilinx-core/lib/oe/sdk.py b/meta-xilinx-core/lib/oe/sdk.py new file mode 100644 index 00000000..3dc36722 --- /dev/null +++ b/meta-xilinx-core/lib/oe/sdk.py | |||
| @@ -0,0 +1,160 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: GPL-2.0-only | ||
| 5 | # | ||
| 6 | |||
| 7 | from abc import ABCMeta, abstractmethod | ||
| 8 | from oe.utils import execute_pre_post_process | ||
| 9 | from oe.manifest import * | ||
| 10 | from oe.package_manager import * | ||
| 11 | import os | ||
| 12 | import traceback | ||
| 13 | |||
| 14 | class Sdk(object, metaclass=ABCMeta): | ||
| 15 | def __init__(self, d, manifest_dir): | ||
| 16 | self.d = d | ||
| 17 | self.sdk_output = self.d.getVar('SDK_OUTPUT') | ||
| 18 | self.sdk_native_path = self.d.getVar('SDKPATHNATIVE').strip('/') | ||
| 19 | self.target_path = self.d.getVar('SDKTARGETSYSROOT').strip('/') | ||
| 20 | self.sysconfdir = self.d.getVar('sysconfdir').strip('/') | ||
| 21 | |||
| 22 | self.sdk_target_sysroot = os.path.join(self.sdk_output, self.target_path) | ||
| 23 | self.sdk_host_sysroot = self.sdk_output | ||
| 24 | |||
| 25 | if manifest_dir is None: | ||
| 26 | self.manifest_dir = self.d.getVar("SDK_DIR") | ||
| 27 | else: | ||
| 28 | self.manifest_dir = manifest_dir | ||
| 29 | |||
| 30 | self.remove(self.sdk_output, True) | ||
| 31 | |||
| 32 | self.install_order = Manifest.INSTALL_ORDER | ||
| 33 | |||
| 34 | @abstractmethod | ||
| 35 | def _populate(self): | ||
| 36 | pass | ||
| 37 | |||
| 38 | def populate(self): | ||
| 39 | self.mkdirhier(self.sdk_output) | ||
| 40 | |||
| 41 | # call backend dependent implementation | ||
| 42 | self._populate() | ||
| 43 | |||
| 44 | # Don't ship any libGL in the SDK | ||
| 45 | self.remove(os.path.join(self.sdk_output, self.sdk_native_path, | ||
| 46 | self.d.getVar('libdir_nativesdk').strip('/'), | ||
| 47 | "libGL*")) | ||
| 48 | |||
| 49 | # Fix or remove broken .la files | ||
| 50 | self.remove(os.path.join(self.sdk_output, self.sdk_native_path, | ||
| 51 | self.d.getVar('libdir_nativesdk').strip('/'), | ||
| 52 | "*.la")) | ||
| 53 | |||
| 54 | # Link the ld.so.cache file into the hosts filesystem | ||
| 55 | link_name = os.path.join(self.sdk_output, self.sdk_native_path, | ||
| 56 | self.sysconfdir, "ld.so.cache") | ||
| 57 | self.mkdirhier(os.path.dirname(link_name)) | ||
| 58 | os.symlink("/etc/ld.so.cache", link_name) | ||
| 59 | |||
| 60 | execute_pre_post_process(self.d, self.d.getVar('SDK_POSTPROCESS_COMMAND')) | ||
| 61 | |||
| 62 | def movefile(self, sourcefile, destdir): | ||
| 63 | try: | ||
| 64 | # FIXME: this check of movefile's return code to None should be | ||
| 65 | # fixed within the function to use only exceptions to signal when | ||
| 66 | # something goes wrong | ||
| 67 | if (bb.utils.movefile(sourcefile, destdir) == None): | ||
| 68 | raise OSError("moving %s to %s failed" | ||
| 69 | %(sourcefile, destdir)) | ||
| 70 | #FIXME: using umbrella exc catching because bb.utils method raises it | ||
| 71 | except Exception as e: | ||
| 72 | bb.debug(1, "printing the stack trace\n %s" %traceback.format_exc()) | ||
| 73 | bb.fatal("unable to place %s in final SDK location" % sourcefile) | ||
| 74 | |||
| 75 | def mkdirhier(self, dirpath): | ||
| 76 | try: | ||
| 77 | bb.utils.mkdirhier(dirpath) | ||
| 78 | except OSError as e: | ||
| 79 | bb.debug(1, "printing the stack trace\n %s" %traceback.format_exc()) | ||
| 80 | bb.fatal("cannot make dir for SDK: %s" % dirpath) | ||
| 81 | |||
| 82 | def remove(self, path, recurse=False): | ||
| 83 | try: | ||
| 84 | bb.utils.remove(path, recurse) | ||
| 85 | #FIXME: using umbrella exc catching because bb.utils method raises it | ||
| 86 | except Exception as e: | ||
| 87 | bb.debug(1, "printing the stack trace\n %s" %traceback.format_exc()) | ||
| 88 | bb.warn("cannot remove SDK dir: %s" % path) | ||
| 89 | |||
| 90 | def install_locales(self, pm): | ||
| 91 | linguas = self.d.getVar("SDKIMAGE_LINGUAS") | ||
| 92 | if linguas: | ||
| 93 | import fnmatch | ||
| 94 | # Install the binary locales | ||
| 95 | if linguas == "all": | ||
| 96 | pm.install_glob("nativesdk-glibc-binary-localedata-*.utf-8", sdk=True) | ||
| 97 | else: | ||
| 98 | pm.install(["nativesdk-glibc-binary-localedata-%s.utf-8" % \ | ||
| 99 | lang for lang in linguas.split()]) | ||
| 100 | # Generate a locale archive of them | ||
| 101 | target_arch = self.d.getVar('SDK_ARCH') | ||
| 102 | rootfs = oe.path.join(self.sdk_host_sysroot, self.sdk_native_path) | ||
| 103 | localedir = oe.path.join(rootfs, self.d.getVar("libdir_nativesdk"), "locale") | ||
| 104 | generate_locale_archive(self.d, rootfs, target_arch, localedir) | ||
| 105 | # And now delete the binary locales | ||
| 106 | pkgs = fnmatch.filter(pm.list_installed(), "nativesdk-glibc-binary-localedata-*.utf-8") | ||
| 107 | pm.remove(pkgs) | ||
| 108 | else: | ||
| 109 | # No linguas so do nothing | ||
| 110 | pass | ||
| 111 | |||
| 112 | |||
| 113 | def sdk_list_installed_packages(d, target, rootfs_dir=None): | ||
| 114 | if rootfs_dir is None: | ||
| 115 | sdk_output = d.getVar('SDK_OUTPUT') | ||
| 116 | target_path = d.getVar('SDKTARGETSYSROOT').strip('/') | ||
| 117 | |||
| 118 | rootfs_dir = [sdk_output, os.path.join(sdk_output, target_path)][target is True] | ||
| 119 | |||
| 120 | if target is False: | ||
| 121 | ipkgconf_sdk_target = d.getVar("IPKGCONF_SDK") | ||
| 122 | d.setVar("IPKGCONF_TARGET", ipkgconf_sdk_target) | ||
| 123 | |||
| 124 | img_type = d.getVar('IMAGE_PKGTYPE') | ||
| 125 | import importlib | ||
| 126 | cls = importlib.import_module('oe.package_manager.' + img_type) | ||
| 127 | return cls.PMPkgsList(d, rootfs_dir).list_pkgs() | ||
| 128 | |||
| 129 | def populate_sdk(d, manifest_dir=None): | ||
| 130 | env_bkp = os.environ.copy() | ||
| 131 | |||
| 132 | img_type = d.getVar('IMAGE_PKGTYPE') | ||
| 133 | import importlib | ||
| 134 | cls = importlib.import_module('oe.package_manager.' + img_type + '.sdk') | ||
| 135 | cls.PkgSdk(d, manifest_dir).populate() | ||
| 136 | |||
| 137 | os.environ.clear() | ||
| 138 | os.environ.update(env_bkp) | ||
| 139 | |||
| 140 | def get_extra_sdkinfo(sstate_dir): | ||
| 141 | """ | ||
| 142 | This function is going to be used for generating the target and host manifest files packages of eSDK. | ||
| 143 | """ | ||
| 144 | import math | ||
| 145 | |||
| 146 | extra_info = {} | ||
| 147 | extra_info['tasksizes'] = {} | ||
| 148 | extra_info['filesizes'] = {} | ||
| 149 | for root, _, files in os.walk(sstate_dir): | ||
| 150 | for fn in files: | ||
| 151 | if fn.endswith('.tgz'): | ||
| 152 | fsize = int(math.ceil(float(os.path.getsize(os.path.join(root, fn))) / 1024)) | ||
| 153 | task = fn.rsplit(':',1)[1].split('_',1)[1].split(',')[0] | ||
| 154 | origtotal = extra_info['tasksizes'].get(task, 0) | ||
| 155 | extra_info['tasksizes'][task] = origtotal + fsize | ||
| 156 | extra_info['filesizes'][fn] = fsize | ||
| 157 | return extra_info | ||
| 158 | |||
| 159 | if __name__ == "__main__": | ||
| 160 | pass | ||
diff --git a/meta-xilinx-core/lib/oe/spdx.py b/meta-xilinx-core/lib/oe/spdx.py new file mode 100644 index 00000000..7aaf2af5 --- /dev/null +++ b/meta-xilinx-core/lib/oe/spdx.py | |||
| @@ -0,0 +1,357 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: GPL-2.0-only | ||
| 5 | # | ||
| 6 | |||
| 7 | # | ||
| 8 | # This library is intended to capture the JSON SPDX specification in a type | ||
| 9 | # safe manner. It is not intended to encode any particular OE specific | ||
| 10 | # behaviors, see the sbom.py for that. | ||
| 11 | # | ||
| 12 | # The documented SPDX spec document doesn't cover the JSON syntax for | ||
| 13 | # particular configuration, which can make it hard to determine what the JSON | ||
| 14 | # syntax should be. I've found it is actually much simpler to read the official | ||
| 15 | # SPDX JSON schema which can be found here: https://github.com/spdx/spdx-spec | ||
| 16 | # in schemas/spdx-schema.json | ||
| 17 | # | ||
| 18 | |||
| 19 | import hashlib | ||
| 20 | import itertools | ||
| 21 | import json | ||
| 22 | |||
| 23 | SPDX_VERSION = "2.2" | ||
| 24 | |||
| 25 | |||
| 26 | # | ||
| 27 | # The following are the support classes that are used to implement SPDX object | ||
| 28 | # | ||
| 29 | |||
| 30 | class _Property(object): | ||
| 31 | """ | ||
| 32 | A generic SPDX object property. The different types will derive from this | ||
| 33 | class | ||
| 34 | """ | ||
| 35 | |||
| 36 | def __init__(self, *, default=None): | ||
| 37 | self.default = default | ||
| 38 | |||
| 39 | def setdefault(self, dest, name): | ||
| 40 | if self.default is not None: | ||
| 41 | dest.setdefault(name, self.default) | ||
| 42 | |||
| 43 | |||
| 44 | class _String(_Property): | ||
| 45 | """ | ||
| 46 | A scalar string property for an SPDX object | ||
| 47 | """ | ||
| 48 | |||
| 49 | def __init__(self, **kwargs): | ||
| 50 | super().__init__(**kwargs) | ||
| 51 | |||
| 52 | def set_property(self, attrs, name): | ||
| 53 | def get_helper(obj): | ||
| 54 | return obj._spdx[name] | ||
| 55 | |||
| 56 | def set_helper(obj, value): | ||
| 57 | obj._spdx[name] = value | ||
| 58 | |||
| 59 | def del_helper(obj): | ||
| 60 | del obj._spdx[name] | ||
| 61 | |||
| 62 | attrs[name] = property(get_helper, set_helper, del_helper) | ||
| 63 | |||
| 64 | def init(self, source): | ||
| 65 | return source | ||
| 66 | |||
| 67 | |||
| 68 | class _Object(_Property): | ||
| 69 | """ | ||
| 70 | A scalar SPDX object property of a SPDX object | ||
| 71 | """ | ||
| 72 | |||
| 73 | def __init__(self, cls, **kwargs): | ||
| 74 | super().__init__(**kwargs) | ||
| 75 | self.cls = cls | ||
| 76 | |||
| 77 | def set_property(self, attrs, name): | ||
| 78 | def get_helper(obj): | ||
| 79 | if not name in obj._spdx: | ||
| 80 | obj._spdx[name] = self.cls() | ||
| 81 | return obj._spdx[name] | ||
| 82 | |||
| 83 | def set_helper(obj, value): | ||
| 84 | obj._spdx[name] = value | ||
| 85 | |||
| 86 | def del_helper(obj): | ||
| 87 | del obj._spdx[name] | ||
| 88 | |||
| 89 | attrs[name] = property(get_helper, set_helper) | ||
| 90 | |||
| 91 | def init(self, source): | ||
| 92 | return self.cls(**source) | ||
| 93 | |||
| 94 | |||
| 95 | class _ListProperty(_Property): | ||
| 96 | """ | ||
| 97 | A list of SPDX properties | ||
| 98 | """ | ||
| 99 | |||
| 100 | def __init__(self, prop, **kwargs): | ||
| 101 | super().__init__(**kwargs) | ||
| 102 | self.prop = prop | ||
| 103 | |||
| 104 | def set_property(self, attrs, name): | ||
| 105 | def get_helper(obj): | ||
| 106 | if not name in obj._spdx: | ||
| 107 | obj._spdx[name] = [] | ||
| 108 | return obj._spdx[name] | ||
| 109 | |||
| 110 | def set_helper(obj, value): | ||
| 111 | obj._spdx[name] = list(value) | ||
| 112 | |||
| 113 | def del_helper(obj): | ||
| 114 | del obj._spdx[name] | ||
| 115 | |||
| 116 | attrs[name] = property(get_helper, set_helper, del_helper) | ||
| 117 | |||
| 118 | def init(self, source): | ||
| 119 | return [self.prop.init(o) for o in source] | ||
| 120 | |||
| 121 | |||
| 122 | class _StringList(_ListProperty): | ||
| 123 | """ | ||
| 124 | A list of strings as a property for an SPDX object | ||
| 125 | """ | ||
| 126 | |||
| 127 | def __init__(self, **kwargs): | ||
| 128 | super().__init__(_String(), **kwargs) | ||
| 129 | |||
| 130 | |||
| 131 | class _ObjectList(_ListProperty): | ||
| 132 | """ | ||
| 133 | A list of SPDX objects as a property for an SPDX object | ||
| 134 | """ | ||
| 135 | |||
| 136 | def __init__(self, cls, **kwargs): | ||
| 137 | super().__init__(_Object(cls), **kwargs) | ||
| 138 | |||
| 139 | |||
| 140 | class MetaSPDXObject(type): | ||
| 141 | """ | ||
| 142 | A metaclass that allows properties (anything derived from a _Property | ||
| 143 | class) to be defined for a SPDX object | ||
| 144 | """ | ||
| 145 | def __new__(mcls, name, bases, attrs): | ||
| 146 | attrs["_properties"] = {} | ||
| 147 | |||
| 148 | for key in attrs.keys(): | ||
| 149 | if isinstance(attrs[key], _Property): | ||
| 150 | prop = attrs[key] | ||
| 151 | attrs["_properties"][key] = prop | ||
| 152 | prop.set_property(attrs, key) | ||
| 153 | |||
| 154 | return super().__new__(mcls, name, bases, attrs) | ||
| 155 | |||
| 156 | |||
| 157 | class SPDXObject(metaclass=MetaSPDXObject): | ||
| 158 | """ | ||
| 159 | The base SPDX object; all SPDX spec classes must derive from this class | ||
| 160 | """ | ||
| 161 | def __init__(self, **d): | ||
| 162 | self._spdx = {} | ||
| 163 | |||
| 164 | for name, prop in self._properties.items(): | ||
| 165 | prop.setdefault(self._spdx, name) | ||
| 166 | if name in d: | ||
| 167 | self._spdx[name] = prop.init(d[name]) | ||
| 168 | |||
| 169 | def serializer(self): | ||
| 170 | return self._spdx | ||
| 171 | |||
| 172 | def __setattr__(self, name, value): | ||
| 173 | if name in self._properties or name == "_spdx": | ||
| 174 | super().__setattr__(name, value) | ||
| 175 | return | ||
| 176 | raise KeyError("%r is not a valid SPDX property" % name) | ||
| 177 | |||
| 178 | # | ||
| 179 | # These are the SPDX objects implemented from the spec. The *only* properties | ||
| 180 | # that can be added to these objects are ones directly specified in the SPDX | ||
| 181 | # spec, however you may add helper functions to make operations easier. | ||
| 182 | # | ||
| 183 | # Defaults should *only* be specified if the SPDX spec says there is a certain | ||
| 184 | # required value for a field (e.g. dataLicense), or if the field is mandatory | ||
| 185 | # and has some sane "this field is unknown" (e.g. "NOASSERTION") | ||
| 186 | # | ||
| 187 | |||
| 188 | class SPDXAnnotation(SPDXObject): | ||
| 189 | annotationDate = _String() | ||
| 190 | annotationType = _String() | ||
| 191 | annotator = _String() | ||
| 192 | comment = _String() | ||
| 193 | |||
| 194 | class SPDXChecksum(SPDXObject): | ||
| 195 | algorithm = _String() | ||
| 196 | checksumValue = _String() | ||
| 197 | |||
| 198 | |||
| 199 | class SPDXRelationship(SPDXObject): | ||
| 200 | spdxElementId = _String() | ||
| 201 | relatedSpdxElement = _String() | ||
| 202 | relationshipType = _String() | ||
| 203 | comment = _String() | ||
| 204 | annotations = _ObjectList(SPDXAnnotation) | ||
| 205 | |||
| 206 | |||
| 207 | class SPDXExternalReference(SPDXObject): | ||
| 208 | referenceCategory = _String() | ||
| 209 | referenceType = _String() | ||
| 210 | referenceLocator = _String() | ||
| 211 | |||
| 212 | |||
| 213 | class SPDXPackageVerificationCode(SPDXObject): | ||
| 214 | packageVerificationCodeValue = _String() | ||
| 215 | packageVerificationCodeExcludedFiles = _StringList() | ||
| 216 | |||
| 217 | |||
| 218 | class SPDXPackage(SPDXObject): | ||
| 219 | ALLOWED_CHECKSUMS = [ | ||
| 220 | "SHA1", | ||
| 221 | "SHA224", | ||
| 222 | "SHA256", | ||
| 223 | "SHA384", | ||
| 224 | "SHA512", | ||
| 225 | "MD2", | ||
| 226 | "MD4", | ||
| 227 | "MD5", | ||
| 228 | "MD6", | ||
| 229 | ] | ||
| 230 | |||
| 231 | name = _String() | ||
| 232 | SPDXID = _String() | ||
| 233 | versionInfo = _String() | ||
| 234 | downloadLocation = _String(default="NOASSERTION") | ||
| 235 | supplier = _String(default="NOASSERTION") | ||
| 236 | homepage = _String() | ||
| 237 | licenseConcluded = _String(default="NOASSERTION") | ||
| 238 | licenseDeclared = _String(default="NOASSERTION") | ||
| 239 | summary = _String() | ||
| 240 | description = _String() | ||
| 241 | sourceInfo = _String() | ||
| 242 | copyrightText = _String(default="NOASSERTION") | ||
| 243 | licenseInfoFromFiles = _StringList(default=["NOASSERTION"]) | ||
| 244 | externalRefs = _ObjectList(SPDXExternalReference) | ||
| 245 | packageVerificationCode = _Object(SPDXPackageVerificationCode) | ||
| 246 | hasFiles = _StringList() | ||
| 247 | packageFileName = _String() | ||
| 248 | annotations = _ObjectList(SPDXAnnotation) | ||
| 249 | checksums = _ObjectList(SPDXChecksum) | ||
| 250 | |||
| 251 | |||
| 252 | class SPDXFile(SPDXObject): | ||
| 253 | SPDXID = _String() | ||
| 254 | fileName = _String() | ||
| 255 | licenseConcluded = _String(default="NOASSERTION") | ||
| 256 | copyrightText = _String(default="NOASSERTION") | ||
| 257 | licenseInfoInFiles = _StringList(default=["NOASSERTION"]) | ||
| 258 | checksums = _ObjectList(SPDXChecksum) | ||
| 259 | fileTypes = _StringList() | ||
| 260 | |||
| 261 | |||
| 262 | class SPDXCreationInfo(SPDXObject): | ||
| 263 | created = _String() | ||
| 264 | licenseListVersion = _String() | ||
| 265 | comment = _String() | ||
| 266 | creators = _StringList() | ||
| 267 | |||
| 268 | |||
| 269 | class SPDXExternalDocumentRef(SPDXObject): | ||
| 270 | externalDocumentId = _String() | ||
| 271 | spdxDocument = _String() | ||
| 272 | checksum = _Object(SPDXChecksum) | ||
| 273 | |||
| 274 | |||
| 275 | class SPDXExtractedLicensingInfo(SPDXObject): | ||
| 276 | name = _String() | ||
| 277 | comment = _String() | ||
| 278 | licenseId = _String() | ||
| 279 | extractedText = _String() | ||
| 280 | |||
| 281 | |||
| 282 | class SPDXDocument(SPDXObject): | ||
| 283 | spdxVersion = _String(default="SPDX-" + SPDX_VERSION) | ||
| 284 | dataLicense = _String(default="CC0-1.0") | ||
| 285 | SPDXID = _String(default="SPDXRef-DOCUMENT") | ||
| 286 | name = _String() | ||
| 287 | documentNamespace = _String() | ||
| 288 | creationInfo = _Object(SPDXCreationInfo) | ||
| 289 | packages = _ObjectList(SPDXPackage) | ||
| 290 | files = _ObjectList(SPDXFile) | ||
| 291 | relationships = _ObjectList(SPDXRelationship) | ||
| 292 | externalDocumentRefs = _ObjectList(SPDXExternalDocumentRef) | ||
| 293 | hasExtractedLicensingInfos = _ObjectList(SPDXExtractedLicensingInfo) | ||
| 294 | |||
| 295 | def __init__(self, **d): | ||
| 296 | super().__init__(**d) | ||
| 297 | |||
| 298 | def to_json(self, f, *, sort_keys=False, indent=None, separators=None): | ||
| 299 | class Encoder(json.JSONEncoder): | ||
| 300 | def default(self, o): | ||
| 301 | if isinstance(o, SPDXObject): | ||
| 302 | return o.serializer() | ||
| 303 | |||
| 304 | return super().default(o) | ||
| 305 | |||
| 306 | sha1 = hashlib.sha1() | ||
| 307 | for chunk in Encoder( | ||
| 308 | sort_keys=sort_keys, | ||
| 309 | indent=indent, | ||
| 310 | separators=separators, | ||
| 311 | ).iterencode(self): | ||
| 312 | chunk = chunk.encode("utf-8") | ||
| 313 | f.write(chunk) | ||
| 314 | sha1.update(chunk) | ||
| 315 | |||
| 316 | return sha1.hexdigest() | ||
| 317 | |||
| 318 | @classmethod | ||
| 319 | def from_json(cls, f): | ||
| 320 | return cls(**json.load(f)) | ||
| 321 | |||
| 322 | def add_relationship(self, _from, relationship, _to, *, comment=None, annotation=None): | ||
| 323 | if isinstance(_from, SPDXObject): | ||
| 324 | from_spdxid = _from.SPDXID | ||
| 325 | else: | ||
| 326 | from_spdxid = _from | ||
| 327 | |||
| 328 | if isinstance(_to, SPDXObject): | ||
| 329 | to_spdxid = _to.SPDXID | ||
| 330 | else: | ||
| 331 | to_spdxid = _to | ||
| 332 | |||
| 333 | r = SPDXRelationship( | ||
| 334 | spdxElementId=from_spdxid, | ||
| 335 | relatedSpdxElement=to_spdxid, | ||
| 336 | relationshipType=relationship, | ||
| 337 | ) | ||
| 338 | |||
| 339 | if comment is not None: | ||
| 340 | r.comment = comment | ||
| 341 | |||
| 342 | if annotation is not None: | ||
| 343 | r.annotations.append(annotation) | ||
| 344 | |||
| 345 | self.relationships.append(r) | ||
| 346 | |||
| 347 | def find_by_spdxid(self, spdxid): | ||
| 348 | for o in itertools.chain(self.packages, self.files): | ||
| 349 | if o.SPDXID == spdxid: | ||
| 350 | return o | ||
| 351 | return None | ||
| 352 | |||
| 353 | def find_external_document_ref(self, namespace): | ||
| 354 | for r in self.externalDocumentRefs: | ||
| 355 | if r.spdxDocument == namespace: | ||
| 356 | return r | ||
| 357 | return None | ||
diff --git a/meta-xilinx-core/lib/oe/sstatesig.py b/meta-xilinx-core/lib/oe/sstatesig.py new file mode 100644 index 00000000..d818fce8 --- /dev/null +++ b/meta-xilinx-core/lib/oe/sstatesig.py | |||
| @@ -0,0 +1,691 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: GPL-2.0-only | ||
| 5 | # | ||
| 6 | import bb.siggen | ||
| 7 | import bb.runqueue | ||
| 8 | import oe | ||
| 9 | import netrc | ||
| 10 | |||
| 11 | def sstate_rundepfilter(siggen, fn, recipename, task, dep, depname, dataCaches): | ||
| 12 | # Return True if we should keep the dependency, False to drop it | ||
| 13 | def isNative(x): | ||
| 14 | return x.endswith("-native") | ||
| 15 | def isCross(x): | ||
| 16 | return "-cross-" in x | ||
| 17 | def isNativeSDK(x): | ||
| 18 | return x.startswith("nativesdk-") | ||
| 19 | def isKernel(mc, fn): | ||
| 20 | inherits = " ".join(dataCaches[mc].inherits[fn]) | ||
| 21 | return inherits.find("/module-base.bbclass") != -1 or inherits.find("/linux-kernel-base.bbclass") != -1 | ||
| 22 | def isPackageGroup(mc, fn): | ||
| 23 | inherits = " ".join(dataCaches[mc].inherits[fn]) | ||
| 24 | return "/packagegroup.bbclass" in inherits | ||
| 25 | def isAllArch(mc, fn): | ||
| 26 | inherits = " ".join(dataCaches[mc].inherits[fn]) | ||
| 27 | return "/allarch.bbclass" in inherits | ||
| 28 | def isImage(mc, fn): | ||
| 29 | return "/image.bbclass" in " ".join(dataCaches[mc].inherits[fn]) | ||
| 30 | |||
| 31 | depmc, _, deptaskname, depmcfn = bb.runqueue.split_tid_mcfn(dep) | ||
| 32 | mc, _ = bb.runqueue.split_mc(fn) | ||
| 33 | |||
| 34 | # We can skip the rm_work task signature to avoid running the task | ||
| 35 | # when we remove some tasks from the dependencie chain | ||
| 36 | # i.e INHERIT:remove = "create-spdx" will trigger the do_rm_work | ||
| 37 | if task == "do_rm_work": | ||
| 38 | return False | ||
| 39 | |||
| 40 | # (Almost) always include our own inter-task dependencies (unless it comes | ||
| 41 | # from a mcdepends). The exception is the special | ||
| 42 | # do_kernel_configme->do_unpack_and_patch dependency from archiver.bbclass. | ||
| 43 | if recipename == depname and depmc == mc: | ||
| 44 | if task == "do_kernel_configme" and deptaskname == "do_unpack_and_patch": | ||
| 45 | return False | ||
| 46 | return True | ||
| 47 | |||
| 48 | # Exclude well defined recipe->dependency | ||
| 49 | if "%s->%s" % (recipename, depname) in siggen.saferecipedeps: | ||
| 50 | return False | ||
| 51 | |||
| 52 | # Check for special wildcard | ||
| 53 | if "*->%s" % depname in siggen.saferecipedeps and recipename != depname: | ||
| 54 | return False | ||
| 55 | |||
| 56 | # Don't change native/cross/nativesdk recipe dependencies any further | ||
| 57 | if isNative(recipename) or isCross(recipename) or isNativeSDK(recipename): | ||
| 58 | return True | ||
| 59 | |||
| 60 | # Only target packages beyond here | ||
| 61 | |||
| 62 | # allarch packagegroups are assumed to have well behaved names which don't change between architecures/tunes | ||
| 63 | if isPackageGroup(mc, fn) and isAllArch(mc, fn) and not isNative(depname): | ||
| 64 | return False | ||
| 65 | |||
| 66 | # Exclude well defined machine specific configurations which don't change ABI | ||
| 67 | if depname in siggen.abisaferecipes and not isImage(mc, fn): | ||
| 68 | return False | ||
| 69 | |||
| 70 | # Kernel modules are well namespaced. We don't want to depend on the kernel's checksum | ||
| 71 | # if we're just doing an RRECOMMENDS:xxx = "kernel-module-*", not least because the checksum | ||
| 72 | # is machine specific. | ||
| 73 | # Therefore if we're not a kernel or a module recipe (inheriting the kernel classes) | ||
| 74 | # and we reccomend a kernel-module, we exclude the dependency. | ||
| 75 | if dataCaches and isKernel(depmc, depmcfn) and not isKernel(mc, fn): | ||
| 76 | for pkg in dataCaches[mc].runrecs[fn]: | ||
| 77 | if " ".join(dataCaches[mc].runrecs[fn][pkg]).find("kernel-module-") != -1: | ||
| 78 | return False | ||
| 79 | |||
| 80 | # Default to keep dependencies | ||
| 81 | return True | ||
| 82 | |||
| 83 | def sstate_lockedsigs(d): | ||
| 84 | sigs = {} | ||
| 85 | types = (d.getVar("SIGGEN_LOCKEDSIGS_TYPES") or "").split() | ||
| 86 | for t in types: | ||
| 87 | siggen_lockedsigs_var = "SIGGEN_LOCKEDSIGS_%s" % t | ||
| 88 | lockedsigs = (d.getVar(siggen_lockedsigs_var) or "").split() | ||
| 89 | for ls in lockedsigs: | ||
| 90 | pn, task, h = ls.split(":", 2) | ||
| 91 | if pn not in sigs: | ||
| 92 | sigs[pn] = {} | ||
| 93 | sigs[pn][task] = [h, siggen_lockedsigs_var] | ||
| 94 | return sigs | ||
| 95 | |||
| 96 | class SignatureGeneratorOEBasicHashMixIn(object): | ||
| 97 | supports_multiconfig_datacaches = True | ||
| 98 | |||
| 99 | def init_rundepcheck(self, data): | ||
| 100 | self.abisaferecipes = (data.getVar("SIGGEN_EXCLUDERECIPES_ABISAFE") or "").split() | ||
| 101 | self.saferecipedeps = (data.getVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS") or "").split() | ||
| 102 | self.lockedsigs = sstate_lockedsigs(data) | ||
| 103 | self.lockedhashes = {} | ||
| 104 | self.lockedpnmap = {} | ||
| 105 | self.lockedhashfn = {} | ||
| 106 | self.machine = data.getVar("MACHINE") | ||
| 107 | self.mismatch_msgs = [] | ||
| 108 | self.mismatch_number = 0 | ||
| 109 | self.lockedsigs_msgs = "" | ||
| 110 | self.unlockedrecipes = (data.getVar("SIGGEN_UNLOCKED_RECIPES") or | ||
| 111 | "").split() | ||
| 112 | self.unlockedrecipes = { k: "" for k in self.unlockedrecipes } | ||
| 113 | self._internal = False | ||
| 114 | pass | ||
| 115 | |||
| 116 | def tasks_resolved(self, virtmap, virtpnmap, dataCache): | ||
| 117 | # Translate virtual/xxx entries to PN values | ||
| 118 | newabisafe = [] | ||
| 119 | for a in self.abisaferecipes: | ||
| 120 | if a in virtpnmap: | ||
| 121 | newabisafe.append(virtpnmap[a]) | ||
| 122 | else: | ||
| 123 | newabisafe.append(a) | ||
| 124 | self.abisaferecipes = newabisafe | ||
| 125 | newsafedeps = [] | ||
| 126 | for a in self.saferecipedeps: | ||
| 127 | a1, a2 = a.split("->") | ||
| 128 | if a1 in virtpnmap: | ||
| 129 | a1 = virtpnmap[a1] | ||
| 130 | if a2 in virtpnmap: | ||
| 131 | a2 = virtpnmap[a2] | ||
| 132 | newsafedeps.append(a1 + "->" + a2) | ||
| 133 | self.saferecipedeps = newsafedeps | ||
| 134 | |||
| 135 | def rundep_check(self, fn, recipename, task, dep, depname, dataCaches = None): | ||
| 136 | return sstate_rundepfilter(self, fn, recipename, task, dep, depname, dataCaches) | ||
| 137 | |||
| 138 | def get_taskdata(self): | ||
| 139 | return (self.lockedpnmap, self.lockedhashfn, self.lockedhashes) + super().get_taskdata() | ||
| 140 | |||
| 141 | def set_taskdata(self, data): | ||
| 142 | self.lockedpnmap, self.lockedhashfn, self.lockedhashes = data[:3] | ||
| 143 | super().set_taskdata(data[3:]) | ||
| 144 | |||
| 145 | def dump_sigs(self, dataCache, options): | ||
| 146 | if 'lockedsigs' in options: | ||
| 147 | sigfile = os.getcwd() + "/locked-sigs.inc" | ||
| 148 | bb.plain("Writing locked sigs to %s" % sigfile) | ||
| 149 | self.dump_lockedsigs(sigfile) | ||
| 150 | return super(bb.siggen.SignatureGeneratorBasicHash, self).dump_sigs(dataCache, options) | ||
| 151 | |||
| 152 | |||
| 153 | def get_taskhash(self, tid, deps, dataCaches): | ||
| 154 | if tid in self.lockedhashes: | ||
| 155 | if self.lockedhashes[tid]: | ||
| 156 | return self.lockedhashes[tid] | ||
| 157 | else: | ||
| 158 | return super().get_taskhash(tid, deps, dataCaches) | ||
| 159 | |||
| 160 | h = super().get_taskhash(tid, deps, dataCaches) | ||
| 161 | |||
| 162 | (mc, _, task, fn) = bb.runqueue.split_tid_mcfn(tid) | ||
| 163 | |||
| 164 | recipename = dataCaches[mc].pkg_fn[fn] | ||
| 165 | self.lockedpnmap[fn] = recipename | ||
| 166 | self.lockedhashfn[fn] = dataCaches[mc].hashfn[fn] | ||
| 167 | |||
| 168 | unlocked = False | ||
| 169 | if recipename in self.unlockedrecipes: | ||
| 170 | unlocked = True | ||
| 171 | else: | ||
| 172 | def recipename_from_dep(dep): | ||
| 173 | (depmc, _, _, depfn) = bb.runqueue.split_tid_mcfn(dep) | ||
| 174 | return dataCaches[depmc].pkg_fn[depfn] | ||
| 175 | |||
| 176 | # If any unlocked recipe is in the direct dependencies then the | ||
| 177 | # current recipe should be unlocked as well. | ||
| 178 | depnames = [ recipename_from_dep(x) for x in deps if mc == bb.runqueue.mc_from_tid(x)] | ||
| 179 | if any(x in y for y in depnames for x in self.unlockedrecipes): | ||
| 180 | self.unlockedrecipes[recipename] = '' | ||
| 181 | unlocked = True | ||
| 182 | |||
| 183 | if not unlocked and recipename in self.lockedsigs: | ||
| 184 | if task in self.lockedsigs[recipename]: | ||
| 185 | h_locked = self.lockedsigs[recipename][task][0] | ||
| 186 | var = self.lockedsigs[recipename][task][1] | ||
| 187 | self.lockedhashes[tid] = h_locked | ||
| 188 | self._internal = True | ||
| 189 | unihash = self.get_unihash(tid) | ||
| 190 | self._internal = False | ||
| 191 | #bb.warn("Using %s %s %s" % (recipename, task, h)) | ||
| 192 | |||
| 193 | if h != h_locked and h_locked != unihash: | ||
| 194 | self.mismatch_number += 1 | ||
| 195 | self.mismatch_msgs.append('The %s:%s sig is computed to be %s, but the sig is locked to %s in %s' | ||
| 196 | % (recipename, task, h, h_locked, var)) | ||
| 197 | |||
| 198 | return h_locked | ||
| 199 | |||
| 200 | self.lockedhashes[tid] = False | ||
| 201 | #bb.warn("%s %s %s" % (recipename, task, h)) | ||
| 202 | return h | ||
| 203 | |||
| 204 | def get_stampfile_hash(self, tid): | ||
| 205 | if tid in self.lockedhashes and self.lockedhashes[tid]: | ||
| 206 | return self.lockedhashes[tid] | ||
| 207 | return super().get_stampfile_hash(tid) | ||
| 208 | |||
| 209 | def get_cached_unihash(self, tid): | ||
| 210 | if tid in self.lockedhashes and self.lockedhashes[tid] and not self._internal: | ||
| 211 | return self.lockedhashes[tid] | ||
| 212 | return super().get_cached_unihash(tid) | ||
| 213 | |||
| 214 | def dump_sigtask(self, fn, task, stampbase, runtime): | ||
| 215 | tid = fn + ":" + task | ||
| 216 | if tid in self.lockedhashes and self.lockedhashes[tid]: | ||
| 217 | return | ||
| 218 | super(bb.siggen.SignatureGeneratorBasicHash, self).dump_sigtask(fn, task, stampbase, runtime) | ||
| 219 | |||
| 220 | def dump_lockedsigs(self, sigfile, taskfilter=None): | ||
| 221 | types = {} | ||
| 222 | for tid in self.runtaskdeps: | ||
| 223 | # Bitbake changed this to a tuple in newer versions | ||
| 224 | if isinstance(tid, tuple): | ||
| 225 | tid = tid[1] | ||
| 226 | if taskfilter: | ||
| 227 | if not tid in taskfilter: | ||
| 228 | continue | ||
| 229 | fn = bb.runqueue.fn_from_tid(tid) | ||
| 230 | t = self.lockedhashfn[fn].split(" ")[1].split(":")[5] | ||
| 231 | t = 't-' + t.replace('_', '-') | ||
| 232 | if t not in types: | ||
| 233 | types[t] = [] | ||
| 234 | types[t].append(tid) | ||
| 235 | |||
| 236 | with open(sigfile, "w") as f: | ||
| 237 | l = sorted(types) | ||
| 238 | for t in l: | ||
| 239 | f.write('SIGGEN_LOCKEDSIGS_%s = "\\\n' % t) | ||
| 240 | types[t].sort() | ||
| 241 | sortedtid = sorted(types[t], key=lambda tid: self.lockedpnmap[bb.runqueue.fn_from_tid(tid)]) | ||
| 242 | for tid in sortedtid: | ||
| 243 | (_, _, task, fn) = bb.runqueue.split_tid_mcfn(tid) | ||
| 244 | if tid not in self.taskhash: | ||
| 245 | continue | ||
| 246 | f.write(" " + self.lockedpnmap[fn] + ":" + task + ":" + self.get_unihash(tid) + " \\\n") | ||
| 247 | f.write(' "\n') | ||
| 248 | f.write('SIGGEN_LOCKEDSIGS_TYPES:%s = "%s"' % (self.machine, " ".join(l))) | ||
| 249 | |||
| 250 | def dump_siglist(self, sigfile, path_prefix_strip=None): | ||
| 251 | def strip_fn(fn): | ||
| 252 | nonlocal path_prefix_strip | ||
| 253 | if not path_prefix_strip: | ||
| 254 | return fn | ||
| 255 | |||
| 256 | fn_exp = fn.split(":") | ||
| 257 | if fn_exp[-1].startswith(path_prefix_strip): | ||
| 258 | fn_exp[-1] = fn_exp[-1][len(path_prefix_strip):] | ||
| 259 | |||
| 260 | return ":".join(fn_exp) | ||
| 261 | |||
| 262 | with open(sigfile, "w") as f: | ||
| 263 | tasks = [] | ||
| 264 | for taskitem in self.taskhash: | ||
| 265 | (fn, task) = taskitem.rsplit(":", 1) | ||
| 266 | pn = self.lockedpnmap[fn] | ||
| 267 | tasks.append((pn, task, strip_fn(fn), self.taskhash[taskitem])) | ||
| 268 | for (pn, task, fn, taskhash) in sorted(tasks): | ||
| 269 | f.write('%s:%s %s %s\n' % (pn, task, fn, taskhash)) | ||
| 270 | |||
| 271 | def checkhashes(self, sq_data, missed, found, d): | ||
| 272 | warn_msgs = [] | ||
| 273 | error_msgs = [] | ||
| 274 | sstate_missing_msgs = [] | ||
| 275 | info_msgs = None | ||
| 276 | |||
| 277 | if self.lockedsigs: | ||
| 278 | if len(self.lockedsigs) > 10: | ||
| 279 | self.lockedsigs_msgs = "There are %s recipes with locked tasks (%s task(s) have non matching signature)" % (len(self.lockedsigs), self.mismatch_number) | ||
| 280 | else: | ||
| 281 | self.lockedsigs_msgs = "The following recipes have locked tasks:" | ||
| 282 | for pn in self.lockedsigs: | ||
| 283 | self.lockedsigs_msgs += " %s" % (pn) | ||
| 284 | |||
| 285 | for tid in sq_data['hash']: | ||
| 286 | if tid not in found: | ||
| 287 | for pn in self.lockedsigs: | ||
| 288 | taskname = bb.runqueue.taskname_from_tid(tid) | ||
| 289 | if sq_data['hash'][tid] in iter(self.lockedsigs[pn].values()): | ||
| 290 | if taskname == 'do_shared_workdir': | ||
| 291 | continue | ||
| 292 | sstate_missing_msgs.append("Locked sig is set for %s:%s (%s) yet not in sstate cache?" | ||
| 293 | % (pn, taskname, sq_data['hash'][tid])) | ||
| 294 | |||
| 295 | checklevel = d.getVar("SIGGEN_LOCKEDSIGS_TASKSIG_CHECK") | ||
| 296 | if checklevel == 'info': | ||
| 297 | info_msgs = self.lockedsigs_msgs | ||
| 298 | if checklevel == 'warn' or checklevel == 'info': | ||
| 299 | warn_msgs += self.mismatch_msgs | ||
| 300 | elif checklevel == 'error': | ||
| 301 | error_msgs += self.mismatch_msgs | ||
| 302 | |||
| 303 | checklevel = d.getVar("SIGGEN_LOCKEDSIGS_SSTATE_EXISTS_CHECK") | ||
| 304 | if checklevel == 'warn': | ||
| 305 | warn_msgs += sstate_missing_msgs | ||
| 306 | elif checklevel == 'error': | ||
| 307 | error_msgs += sstate_missing_msgs | ||
| 308 | |||
| 309 | if info_msgs: | ||
| 310 | bb.note(info_msgs) | ||
| 311 | if warn_msgs: | ||
| 312 | bb.warn("\n".join(warn_msgs)) | ||
| 313 | if error_msgs: | ||
| 314 | bb.fatal("\n".join(error_msgs)) | ||
| 315 | |||
| 316 | class SignatureGeneratorOEBasicHash(SignatureGeneratorOEBasicHashMixIn, bb.siggen.SignatureGeneratorBasicHash): | ||
| 317 | name = "OEBasicHash" | ||
| 318 | |||
| 319 | class SignatureGeneratorOEEquivHash(SignatureGeneratorOEBasicHashMixIn, bb.siggen.SignatureGeneratorUniHashMixIn, bb.siggen.SignatureGeneratorBasicHash): | ||
| 320 | name = "OEEquivHash" | ||
| 321 | |||
| 322 | def init_rundepcheck(self, data): | ||
| 323 | super().init_rundepcheck(data) | ||
| 324 | self.server = data.getVar('BB_HASHSERVE') | ||
| 325 | if not self.server: | ||
| 326 | bb.fatal("OEEquivHash requires BB_HASHSERVE to be set") | ||
| 327 | self.method = data.getVar('SSTATE_HASHEQUIV_METHOD') | ||
| 328 | if not self.method: | ||
| 329 | bb.fatal("OEEquivHash requires SSTATE_HASHEQUIV_METHOD to be set") | ||
| 330 | self.max_parallel = int(data.getVar('BB_HASHSERVE_MAX_PARALLEL') or 1) | ||
| 331 | self.username = data.getVar("BB_HASHSERVE_USERNAME") | ||
| 332 | self.password = data.getVar("BB_HASHSERVE_PASSWORD") | ||
| 333 | if not self.username or not self.password: | ||
| 334 | try: | ||
| 335 | n = netrc.netrc() | ||
| 336 | auth = n.authenticators(self.server) | ||
| 337 | if auth is not None: | ||
| 338 | self.username, _, self.password = auth | ||
| 339 | except FileNotFoundError: | ||
| 340 | pass | ||
| 341 | except netrc.NetrcParseError as e: | ||
| 342 | bb.warn("Error parsing %s:%s: %s" % (e.filename, str(e.lineno), e.msg)) | ||
| 343 | |||
| 344 | # Insert these classes into siggen's namespace so it can see and select them | ||
| 345 | bb.siggen.SignatureGeneratorOEBasicHash = SignatureGeneratorOEBasicHash | ||
| 346 | bb.siggen.SignatureGeneratorOEEquivHash = SignatureGeneratorOEEquivHash | ||
| 347 | |||
| 348 | |||
| 349 | def find_siginfo(pn, taskname, taskhashlist, d): | ||
| 350 | """ Find signature data files for comparison purposes """ | ||
| 351 | |||
| 352 | import fnmatch | ||
| 353 | import glob | ||
| 354 | |||
| 355 | if not taskname: | ||
| 356 | # We have to derive pn and taskname | ||
| 357 | key = pn | ||
| 358 | if key.startswith("mc:"): | ||
| 359 | # mc:<mc>:<pn>:<task> | ||
| 360 | _, _, pn, taskname = key.split(':', 3) | ||
| 361 | else: | ||
| 362 | # <pn>:<task> | ||
| 363 | pn, taskname = key.split(':', 1) | ||
| 364 | |||
| 365 | hashfiles = {} | ||
| 366 | |||
| 367 | def get_hashval(siginfo): | ||
| 368 | if siginfo.endswith('.siginfo'): | ||
| 369 | return siginfo.rpartition(':')[2].partition('_')[0] | ||
| 370 | else: | ||
| 371 | return siginfo.rpartition('.')[2] | ||
| 372 | |||
| 373 | def get_time(fullpath): | ||
| 374 | return os.stat(fullpath).st_mtime | ||
| 375 | |||
| 376 | # First search in stamps dir | ||
| 377 | localdata = d.createCopy() | ||
| 378 | localdata.setVar('MULTIMACH_TARGET_SYS', '*') | ||
| 379 | localdata.setVar('PN', pn) | ||
| 380 | localdata.setVar('PV', '*') | ||
| 381 | localdata.setVar('PR', '*') | ||
| 382 | localdata.setVar('EXTENDPE', '') | ||
| 383 | stamp = localdata.getVar('STAMP') | ||
| 384 | if pn.startswith("gcc-source"): | ||
| 385 | # gcc-source shared workdir is a special case :( | ||
| 386 | stamp = localdata.expand("${STAMPS_DIR}/work-shared/gcc-${PV}-${PR}") | ||
| 387 | |||
| 388 | filespec = '%s.%s.sigdata.*' % (stamp, taskname) | ||
| 389 | foundall = False | ||
| 390 | import glob | ||
| 391 | bb.debug(1, "Calling glob.glob on {}".format(filespec)) | ||
| 392 | for fullpath in glob.glob(filespec): | ||
| 393 | match = False | ||
| 394 | if taskhashlist: | ||
| 395 | for taskhash in taskhashlist: | ||
| 396 | if fullpath.endswith('.%s' % taskhash): | ||
| 397 | hashfiles[taskhash] = {'path':fullpath, 'sstate':False, 'time':get_time(fullpath)} | ||
| 398 | if len(hashfiles) == len(taskhashlist): | ||
| 399 | foundall = True | ||
| 400 | break | ||
| 401 | else: | ||
| 402 | hashval = get_hashval(fullpath) | ||
| 403 | hashfiles[hashval] = {'path':fullpath, 'sstate':False, 'time':get_time(fullpath)} | ||
| 404 | |||
| 405 | if not taskhashlist or (len(hashfiles) < 2 and not foundall): | ||
| 406 | # That didn't work, look in sstate-cache | ||
| 407 | hashes = taskhashlist or ['?' * 64] | ||
| 408 | localdata = bb.data.createCopy(d) | ||
| 409 | for hashval in hashes: | ||
| 410 | localdata.setVar('PACKAGE_ARCH', '*') | ||
| 411 | localdata.setVar('TARGET_VENDOR', '*') | ||
| 412 | localdata.setVar('TARGET_OS', '*') | ||
| 413 | localdata.setVar('PN', pn) | ||
| 414 | # gcc-source is a special case, same as with local stamps above | ||
| 415 | if pn.startswith("gcc-source"): | ||
| 416 | localdata.setVar('PN', "gcc") | ||
| 417 | localdata.setVar('PV', '*') | ||
| 418 | localdata.setVar('PR', '*') | ||
| 419 | localdata.setVar('BB_TASKHASH', hashval) | ||
| 420 | localdata.setVar('SSTATE_CURRTASK', taskname[3:]) | ||
| 421 | swspec = localdata.getVar('SSTATE_SWSPEC') | ||
| 422 | if taskname in ['do_fetch', 'do_unpack', 'do_patch', 'do_populate_lic', 'do_preconfigure'] and swspec: | ||
| 423 | localdata.setVar('SSTATE_PKGSPEC', '${SSTATE_SWSPEC}') | ||
| 424 | elif pn.endswith('-native') or "-cross-" in pn or "-crosssdk-" in pn: | ||
| 425 | localdata.setVar('SSTATE_EXTRAPATH', "${NATIVELSBSTRING}/") | ||
| 426 | filespec = '%s.siginfo' % localdata.getVar('SSTATE_PKG') | ||
| 427 | |||
| 428 | bb.debug(1, "Calling glob.glob on {}".format(filespec)) | ||
| 429 | matchedfiles = glob.glob(filespec) | ||
| 430 | for fullpath in matchedfiles: | ||
| 431 | actual_hashval = get_hashval(fullpath) | ||
| 432 | if actual_hashval in hashfiles: | ||
| 433 | continue | ||
| 434 | hashfiles[actual_hashval] = {'path':fullpath, 'sstate':True, 'time':get_time(fullpath)} | ||
| 435 | |||
| 436 | return hashfiles | ||
| 437 | |||
| 438 | bb.siggen.find_siginfo = find_siginfo | ||
| 439 | bb.siggen.find_siginfo_version = 2 | ||
| 440 | |||
| 441 | |||
| 442 | def sstate_get_manifest_filename(task, d): | ||
| 443 | """ | ||
| 444 | Return the sstate manifest file path for a particular task. | ||
| 445 | Also returns the datastore that can be used to query related variables. | ||
| 446 | """ | ||
| 447 | d2 = d.createCopy() | ||
| 448 | extrainf = d.getVarFlag("do_" + task, 'stamp-extra-info') | ||
| 449 | if extrainf: | ||
| 450 | d2.setVar("SSTATE_MANMACH", extrainf) | ||
| 451 | return (d2.expand("${SSTATE_MANFILEPREFIX}.%s" % task), d2) | ||
| 452 | |||
| 453 | def find_sstate_manifest(taskdata, taskdata2, taskname, d, multilibcache): | ||
| 454 | d2 = d | ||
| 455 | variant = '' | ||
| 456 | curr_variant = '' | ||
| 457 | if d.getVar("BBEXTENDCURR") == "multilib": | ||
| 458 | curr_variant = d.getVar("BBEXTENDVARIANT") | ||
| 459 | if "virtclass-multilib" not in d.getVar("OVERRIDES"): | ||
| 460 | curr_variant = "invalid" | ||
| 461 | if taskdata2.startswith("virtual:multilib"): | ||
| 462 | variant = taskdata2.split(":")[2] | ||
| 463 | if curr_variant != variant: | ||
| 464 | if variant not in multilibcache: | ||
| 465 | multilibcache[variant] = oe.utils.get_multilib_datastore(variant, d) | ||
| 466 | d2 = multilibcache[variant] | ||
| 467 | |||
| 468 | if taskdata.endswith("-native"): | ||
| 469 | pkgarchs = ["${BUILD_ARCH}", "${BUILD_ARCH}_${ORIGNATIVELSBSTRING}"] | ||
| 470 | elif taskdata.startswith("nativesdk-"): | ||
| 471 | pkgarchs = ["${SDK_ARCH}_${SDK_OS}", "allarch"] | ||
| 472 | elif "-cross-canadian" in taskdata: | ||
| 473 | pkgarchs = ["${SDK_ARCH}_${SDK_ARCH}-${SDKPKGSUFFIX}"] | ||
| 474 | elif "-cross-" in taskdata: | ||
| 475 | pkgarchs = ["${BUILD_ARCH}"] | ||
| 476 | elif "-crosssdk" in taskdata: | ||
| 477 | pkgarchs = ["${BUILD_ARCH}_${SDK_ARCH}_${SDK_OS}"] | ||
| 478 | else: | ||
| 479 | pkgarchs = ['${MACHINE_ARCH}'] | ||
| 480 | pkgarchs = pkgarchs + list(reversed(d2.getVar("PACKAGE_EXTRA_ARCHS").split())) | ||
| 481 | pkgarchs.append('allarch') | ||
| 482 | pkgarchs.append('${SDK_ARCH}_${SDK_ARCH}-${SDKPKGSUFFIX}') | ||
| 483 | |||
| 484 | searched_manifests = [] | ||
| 485 | |||
| 486 | for pkgarch in pkgarchs: | ||
| 487 | manifest = d2.expand("${SSTATE_MANIFESTS}/manifest-%s-%s.%s" % (pkgarch, taskdata, taskname)) | ||
| 488 | if os.path.exists(manifest): | ||
| 489 | return manifest, d2 | ||
| 490 | searched_manifests.append(manifest) | ||
| 491 | bb.fatal("The sstate manifest for task '%s:%s' (multilib variant '%s') could not be found.\nThe pkgarchs considered were: %s.\nBut none of these manifests exists:\n %s" | ||
| 492 | % (taskdata, taskname, variant, d2.expand(", ".join(pkgarchs)),"\n ".join(searched_manifests))) | ||
| 493 | return None, d2 | ||
| 494 | |||
| 495 | def OEOuthashBasic(path, sigfile, task, d): | ||
| 496 | """ | ||
| 497 | Basic output hash function | ||
| 498 | |||
| 499 | Calculates the output hash of a task by hashing all output file metadata, | ||
| 500 | and file contents. | ||
| 501 | """ | ||
| 502 | import hashlib | ||
| 503 | import stat | ||
| 504 | import pwd | ||
| 505 | import grp | ||
| 506 | import re | ||
| 507 | import fnmatch | ||
| 508 | |||
| 509 | def update_hash(s): | ||
| 510 | s = s.encode('utf-8') | ||
| 511 | h.update(s) | ||
| 512 | if sigfile: | ||
| 513 | sigfile.write(s) | ||
| 514 | |||
| 515 | h = hashlib.sha256() | ||
| 516 | prev_dir = os.getcwd() | ||
| 517 | corebase = d.getVar("COREBASE") | ||
| 518 | tmpdir = d.getVar("TMPDIR") | ||
| 519 | include_owners = os.environ.get('PSEUDO_DISABLED') == '0' | ||
| 520 | if "package_write_" in task or task == "package_qa": | ||
| 521 | include_owners = False | ||
| 522 | include_timestamps = False | ||
| 523 | include_root = True | ||
| 524 | if task == "package": | ||
| 525 | include_timestamps = True | ||
| 526 | include_root = False | ||
| 527 | source_date_epoch = float(d.getVar("SOURCE_DATE_EPOCH")) | ||
| 528 | hash_version = d.getVar('HASHEQUIV_HASH_VERSION') | ||
| 529 | extra_sigdata = d.getVar("HASHEQUIV_EXTRA_SIGDATA") | ||
| 530 | |||
| 531 | filemaps = {} | ||
| 532 | for m in (d.getVar('SSTATE_HASHEQUIV_FILEMAP') or '').split(): | ||
| 533 | entry = m.split(":") | ||
| 534 | if len(entry) != 3 or entry[0] != task: | ||
| 535 | continue | ||
| 536 | filemaps.setdefault(entry[1], []) | ||
| 537 | filemaps[entry[1]].append(entry[2]) | ||
| 538 | |||
| 539 | try: | ||
| 540 | os.chdir(path) | ||
| 541 | basepath = os.path.normpath(path) | ||
| 542 | |||
| 543 | update_hash("OEOuthashBasic\n") | ||
| 544 | if hash_version: | ||
| 545 | update_hash(hash_version + "\n") | ||
| 546 | |||
| 547 | if extra_sigdata: | ||
| 548 | update_hash(extra_sigdata + "\n") | ||
| 549 | |||
| 550 | # It is only currently useful to get equivalent hashes for things that | ||
| 551 | # can be restored from sstate. Since the sstate object is named using | ||
| 552 | # SSTATE_PKGSPEC and the task name, those should be included in the | ||
| 553 | # output hash calculation. | ||
| 554 | update_hash("SSTATE_PKGSPEC=%s\n" % d.getVar('SSTATE_PKGSPEC')) | ||
| 555 | update_hash("task=%s\n" % task) | ||
| 556 | |||
| 557 | for root, dirs, files in os.walk('.', topdown=True): | ||
| 558 | # Sort directories to ensure consistent ordering when recursing | ||
| 559 | dirs.sort() | ||
| 560 | files.sort() | ||
| 561 | |||
| 562 | def process(path): | ||
| 563 | s = os.lstat(path) | ||
| 564 | |||
| 565 | if stat.S_ISDIR(s.st_mode): | ||
| 566 | update_hash('d') | ||
| 567 | elif stat.S_ISCHR(s.st_mode): | ||
| 568 | update_hash('c') | ||
| 569 | elif stat.S_ISBLK(s.st_mode): | ||
| 570 | update_hash('b') | ||
| 571 | elif stat.S_ISSOCK(s.st_mode): | ||
| 572 | update_hash('s') | ||
| 573 | elif stat.S_ISLNK(s.st_mode): | ||
| 574 | update_hash('l') | ||
| 575 | elif stat.S_ISFIFO(s.st_mode): | ||
| 576 | update_hash('p') | ||
| 577 | else: | ||
| 578 | update_hash('-') | ||
| 579 | |||
| 580 | def add_perm(mask, on, off='-'): | ||
| 581 | if mask & s.st_mode: | ||
| 582 | update_hash(on) | ||
| 583 | else: | ||
| 584 | update_hash(off) | ||
| 585 | |||
| 586 | add_perm(stat.S_IRUSR, 'r') | ||
| 587 | add_perm(stat.S_IWUSR, 'w') | ||
| 588 | if stat.S_ISUID & s.st_mode: | ||
| 589 | add_perm(stat.S_IXUSR, 's', 'S') | ||
| 590 | else: | ||
| 591 | add_perm(stat.S_IXUSR, 'x') | ||
| 592 | |||
| 593 | if include_owners: | ||
| 594 | # Group/other permissions are only relevant in pseudo context | ||
| 595 | add_perm(stat.S_IRGRP, 'r') | ||
| 596 | add_perm(stat.S_IWGRP, 'w') | ||
| 597 | if stat.S_ISGID & s.st_mode: | ||
| 598 | add_perm(stat.S_IXGRP, 's', 'S') | ||
| 599 | else: | ||
| 600 | add_perm(stat.S_IXGRP, 'x') | ||
| 601 | |||
| 602 | add_perm(stat.S_IROTH, 'r') | ||
| 603 | add_perm(stat.S_IWOTH, 'w') | ||
| 604 | if stat.S_ISVTX & s.st_mode: | ||
| 605 | update_hash('t') | ||
| 606 | else: | ||
| 607 | add_perm(stat.S_IXOTH, 'x') | ||
| 608 | |||
| 609 | try: | ||
| 610 | update_hash(" %10s" % pwd.getpwuid(s.st_uid).pw_name) | ||
| 611 | update_hash(" %10s" % grp.getgrgid(s.st_gid).gr_name) | ||
| 612 | except KeyError as e: | ||
| 613 | msg = ("KeyError: %s\nPath %s is owned by uid %d, gid %d, which doesn't match " | ||
| 614 | "any user/group on target. This may be due to host contamination." % | ||
| 615 | (e, os.path.abspath(path), s.st_uid, s.st_gid)) | ||
| 616 | raise Exception(msg).with_traceback(e.__traceback__) | ||
| 617 | |||
| 618 | if include_timestamps: | ||
| 619 | # Need to clamp to SOURCE_DATE_EPOCH | ||
| 620 | if s.st_mtime > source_date_epoch: | ||
| 621 | update_hash(" %10d" % source_date_epoch) | ||
| 622 | else: | ||
| 623 | update_hash(" %10d" % s.st_mtime) | ||
| 624 | |||
| 625 | update_hash(" ") | ||
| 626 | if stat.S_ISBLK(s.st_mode) or stat.S_ISCHR(s.st_mode): | ||
| 627 | update_hash("%9s" % ("%d.%d" % (os.major(s.st_rdev), os.minor(s.st_rdev)))) | ||
| 628 | else: | ||
| 629 | update_hash(" " * 9) | ||
| 630 | |||
| 631 | filterfile = False | ||
| 632 | for entry in filemaps: | ||
| 633 | if fnmatch.fnmatch(path, entry): | ||
| 634 | filterfile = True | ||
| 635 | |||
| 636 | update_hash(" ") | ||
| 637 | if stat.S_ISREG(s.st_mode) and not filterfile: | ||
| 638 | update_hash("%10d" % s.st_size) | ||
| 639 | else: | ||
| 640 | update_hash(" " * 10) | ||
| 641 | |||
| 642 | update_hash(" ") | ||
| 643 | fh = hashlib.sha256() | ||
| 644 | if stat.S_ISREG(s.st_mode): | ||
| 645 | # Hash file contents | ||
| 646 | if filterfile: | ||
| 647 | # Need to ignore paths in crossscripts and postinst-useradd files. | ||
| 648 | with open(path, 'rb') as d: | ||
| 649 | chunk = d.read() | ||
| 650 | chunk = chunk.replace(bytes(basepath, encoding='utf8'), b'') | ||
| 651 | for entry in filemaps: | ||
| 652 | if not fnmatch.fnmatch(path, entry): | ||
| 653 | continue | ||
| 654 | for r in filemaps[entry]: | ||
| 655 | if r.startswith("regex-"): | ||
| 656 | chunk = re.sub(bytes(r[6:], encoding='utf8'), b'', chunk) | ||
| 657 | else: | ||
| 658 | chunk = chunk.replace(bytes(r, encoding='utf8'), b'') | ||
| 659 | fh.update(chunk) | ||
| 660 | else: | ||
| 661 | with open(path, 'rb') as d: | ||
| 662 | for chunk in iter(lambda: d.read(4096), b""): | ||
| 663 | fh.update(chunk) | ||
| 664 | update_hash(fh.hexdigest()) | ||
| 665 | else: | ||
| 666 | update_hash(" " * len(fh.hexdigest())) | ||
| 667 | |||
| 668 | update_hash(" %s" % path) | ||
| 669 | |||
| 670 | if stat.S_ISLNK(s.st_mode): | ||
| 671 | update_hash(" -> %s" % os.readlink(path)) | ||
| 672 | |||
| 673 | update_hash("\n") | ||
| 674 | |||
| 675 | # Process this directory and all its child files | ||
| 676 | if include_root or root != ".": | ||
| 677 | process(root) | ||
| 678 | for f in files: | ||
| 679 | if f == 'fixmepath': | ||
| 680 | continue | ||
| 681 | process(os.path.join(root, f)) | ||
| 682 | |||
| 683 | for dir in dirs: | ||
| 684 | if os.path.islink(os.path.join(root, dir)): | ||
| 685 | process(os.path.join(root, dir)) | ||
| 686 | finally: | ||
| 687 | os.chdir(prev_dir) | ||
| 688 | |||
| 689 | return h.hexdigest() | ||
| 690 | |||
| 691 | |||
diff --git a/meta-xilinx-core/lib/oe/terminal.py b/meta-xilinx-core/lib/oe/terminal.py new file mode 100644 index 00000000..4412bc14 --- /dev/null +++ b/meta-xilinx-core/lib/oe/terminal.py | |||
| @@ -0,0 +1,332 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: GPL-2.0-only | ||
| 5 | # | ||
| 6 | import logging | ||
| 7 | import oe.classutils | ||
| 8 | import shlex | ||
| 9 | from bb.process import Popen, ExecutionError | ||
| 10 | |||
| 11 | logger = logging.getLogger('BitBake.OE.Terminal') | ||
| 12 | |||
| 13 | |||
| 14 | class UnsupportedTerminal(Exception): | ||
| 15 | pass | ||
| 16 | |||
| 17 | class NoSupportedTerminals(Exception): | ||
| 18 | def __init__(self, terms): | ||
| 19 | self.terms = terms | ||
| 20 | |||
| 21 | |||
| 22 | class Registry(oe.classutils.ClassRegistry): | ||
| 23 | command = None | ||
| 24 | |||
| 25 | def __init__(cls, name, bases, attrs): | ||
| 26 | super(Registry, cls).__init__(name.lower(), bases, attrs) | ||
| 27 | |||
| 28 | @property | ||
| 29 | def implemented(cls): | ||
| 30 | return bool(cls.command) | ||
| 31 | |||
| 32 | |||
| 33 | class Terminal(Popen, metaclass=Registry): | ||
| 34 | def __init__(self, sh_cmd, title=None, env=None, d=None): | ||
| 35 | from subprocess import STDOUT | ||
| 36 | fmt_sh_cmd = self.format_command(sh_cmd, title) | ||
| 37 | try: | ||
| 38 | Popen.__init__(self, fmt_sh_cmd, env=env, stderr=STDOUT) | ||
| 39 | except OSError as exc: | ||
| 40 | import errno | ||
| 41 | if exc.errno == errno.ENOENT: | ||
| 42 | raise UnsupportedTerminal(self.name) | ||
| 43 | else: | ||
| 44 | raise | ||
| 45 | |||
| 46 | def format_command(self, sh_cmd, title): | ||
| 47 | fmt = {'title': title or 'Terminal', 'command': sh_cmd, 'cwd': os.getcwd() } | ||
| 48 | if isinstance(self.command, str): | ||
| 49 | return shlex.split(self.command.format(**fmt)) | ||
| 50 | else: | ||
| 51 | return [element.format(**fmt) for element in self.command] | ||
| 52 | |||
| 53 | class XTerminal(Terminal): | ||
| 54 | def __init__(self, sh_cmd, title=None, env=None, d=None): | ||
| 55 | Terminal.__init__(self, sh_cmd, title, env, d) | ||
| 56 | if not os.environ.get('DISPLAY'): | ||
| 57 | raise UnsupportedTerminal(self.name) | ||
| 58 | |||
| 59 | class Gnome(XTerminal): | ||
| 60 | command = 'gnome-terminal -t "{title}" -- {command}' | ||
| 61 | priority = 2 | ||
| 62 | |||
| 63 | def __init__(self, sh_cmd, title=None, env=None, d=None): | ||
| 64 | # Recent versions of gnome-terminal does not support non-UTF8 charset: | ||
| 65 | # https://bugzilla.gnome.org/show_bug.cgi?id=732127; as a workaround, | ||
| 66 | # clearing the LC_ALL environment variable so it uses the locale. | ||
| 67 | # Once fixed on the gnome-terminal project, this should be removed. | ||
| 68 | if os.getenv('LC_ALL'): os.putenv('LC_ALL','') | ||
| 69 | |||
| 70 | XTerminal.__init__(self, sh_cmd, title, env, d) | ||
| 71 | |||
| 72 | class Mate(XTerminal): | ||
| 73 | command = 'mate-terminal --disable-factory -t "{title}" -x {command}' | ||
| 74 | priority = 2 | ||
| 75 | |||
| 76 | class Xfce(XTerminal): | ||
| 77 | command = 'xfce4-terminal -T "{title}" -e "{command}"' | ||
| 78 | priority = 2 | ||
| 79 | |||
| 80 | class Terminology(XTerminal): | ||
| 81 | command = 'terminology -T="{title}" -e {command}' | ||
| 82 | priority = 2 | ||
| 83 | |||
| 84 | class Konsole(XTerminal): | ||
| 85 | command = 'konsole --separate --workdir . -p tabtitle="{title}" -e {command}' | ||
| 86 | priority = 2 | ||
| 87 | |||
| 88 | def __init__(self, sh_cmd, title=None, env=None, d=None): | ||
| 89 | # Check version | ||
| 90 | vernum = check_terminal_version("konsole") | ||
| 91 | if vernum and bb.utils.vercmp_string_op(vernum, "2.0.0", "<"): | ||
| 92 | # Konsole from KDE 3.x | ||
| 93 | self.command = 'konsole -T "{title}" -e {command}' | ||
| 94 | elif vernum and bb.utils.vercmp_string_op(vernum, "16.08.1", "<"): | ||
| 95 | # Konsole pre 16.08.01 Has nofork | ||
| 96 | self.command = 'konsole --nofork --workdir . -p tabtitle="{title}" -e {command}' | ||
| 97 | XTerminal.__init__(self, sh_cmd, title, env, d) | ||
| 98 | |||
| 99 | class XTerm(XTerminal): | ||
| 100 | command = 'xterm -T "{title}" -e {command}' | ||
| 101 | priority = 1 | ||
| 102 | |||
| 103 | class Rxvt(XTerminal): | ||
| 104 | command = 'rxvt -T "{title}" -e {command}' | ||
| 105 | priority = 1 | ||
| 106 | |||
| 107 | class URxvt(XTerminal): | ||
| 108 | command = 'urxvt -T "{title}" -e {command}' | ||
| 109 | priority = 1 | ||
| 110 | |||
| 111 | class Screen(Terminal): | ||
| 112 | command = 'screen -D -m -t "{title}" -S devshell {command}' | ||
| 113 | |||
| 114 | def __init__(self, sh_cmd, title=None, env=None, d=None): | ||
| 115 | s_id = "devshell_%i" % os.getpid() | ||
| 116 | self.command = "screen -D -m -t \"{title}\" -S %s {command}" % s_id | ||
| 117 | Terminal.__init__(self, sh_cmd, title, env, d) | ||
| 118 | msg = 'Screen started. Please connect in another terminal with ' \ | ||
| 119 | '"screen -r %s"' % s_id | ||
| 120 | if (d): | ||
| 121 | bb.event.fire(bb.event.LogExecTTY(msg, "screen -r %s" % s_id, | ||
| 122 | 0.5, 10), d) | ||
| 123 | else: | ||
| 124 | logger.warning(msg) | ||
| 125 | |||
| 126 | class TmuxRunning(Terminal): | ||
| 127 | """Open a new pane in the current running tmux window""" | ||
| 128 | name = 'tmux-running' | ||
| 129 | command = 'tmux split-window -c "{cwd}" "{command}"' | ||
| 130 | priority = 2.75 | ||
| 131 | |||
| 132 | def __init__(self, sh_cmd, title=None, env=None, d=None): | ||
| 133 | if not bb.utils.which(os.getenv('PATH'), 'tmux'): | ||
| 134 | raise UnsupportedTerminal('tmux is not installed') | ||
| 135 | |||
| 136 | if not os.getenv('TMUX'): | ||
| 137 | raise UnsupportedTerminal('tmux is not running') | ||
| 138 | |||
| 139 | if not check_tmux_pane_size('tmux'): | ||
| 140 | raise UnsupportedTerminal('tmux pane too small or tmux < 1.9 version is being used') | ||
| 141 | |||
| 142 | Terminal.__init__(self, sh_cmd, title, env, d) | ||
| 143 | |||
| 144 | class TmuxNewWindow(Terminal): | ||
| 145 | """Open a new window in the current running tmux session""" | ||
| 146 | name = 'tmux-new-window' | ||
| 147 | command = 'tmux new-window -c "{cwd}" -n "{title}" "{command}"' | ||
| 148 | priority = 2.70 | ||
| 149 | |||
| 150 | def __init__(self, sh_cmd, title=None, env=None, d=None): | ||
| 151 | if not bb.utils.which(os.getenv('PATH'), 'tmux'): | ||
| 152 | raise UnsupportedTerminal('tmux is not installed') | ||
| 153 | |||
| 154 | if not os.getenv('TMUX'): | ||
| 155 | raise UnsupportedTerminal('tmux is not running') | ||
| 156 | |||
| 157 | Terminal.__init__(self, sh_cmd, title, env, d) | ||
| 158 | |||
| 159 | class Tmux(Terminal): | ||
| 160 | """Start a new tmux session and window""" | ||
| 161 | command = 'tmux new -c "{cwd}" -d -s devshell -n devshell "{command}"' | ||
| 162 | priority = 0.75 | ||
| 163 | |||
| 164 | def __init__(self, sh_cmd, title=None, env=None, d=None): | ||
| 165 | if not bb.utils.which(os.getenv('PATH'), 'tmux'): | ||
| 166 | raise UnsupportedTerminal('tmux is not installed') | ||
| 167 | |||
| 168 | # TODO: consider using a 'devshell' session shared amongst all | ||
| 169 | # devshells, if it's already there, add a new window to it. | ||
| 170 | window_name = 'devshell-%i' % os.getpid() | ||
| 171 | |||
| 172 | self.command = 'tmux new -c "{{cwd}}" -d -s {0} -n {0} "{{command}}"' | ||
| 173 | if not check_tmux_version('1.9'): | ||
| 174 | # `tmux new-session -c` was added in 1.9; | ||
| 175 | # older versions fail with that flag | ||
| 176 | self.command = 'tmux new -d -s {0} -n {0} "{{command}}"' | ||
| 177 | self.command = self.command.format(window_name) | ||
| 178 | Terminal.__init__(self, sh_cmd, title, env, d) | ||
| 179 | |||
| 180 | attach_cmd = 'tmux att -t {0}'.format(window_name) | ||
| 181 | msg = 'Tmux started. Please connect in another terminal with `tmux att -t {0}`'.format(window_name) | ||
| 182 | if d: | ||
| 183 | bb.event.fire(bb.event.LogExecTTY(msg, attach_cmd, 0.5, 10), d) | ||
| 184 | else: | ||
| 185 | logger.warning(msg) | ||
| 186 | |||
| 187 | class Custom(Terminal): | ||
| 188 | command = 'false' # This is a placeholder | ||
| 189 | priority = 3 | ||
| 190 | |||
| 191 | def __init__(self, sh_cmd, title=None, env=None, d=None): | ||
| 192 | self.command = d and d.getVar('OE_TERMINAL_CUSTOMCMD') | ||
| 193 | if self.command: | ||
| 194 | if not '{command}' in self.command: | ||
| 195 | self.command += ' {command}' | ||
| 196 | Terminal.__init__(self, sh_cmd, title, env, d) | ||
| 197 | logger.warning('Custom terminal was started.') | ||
| 198 | else: | ||
| 199 | logger.debug('No custom terminal (OE_TERMINAL_CUSTOMCMD) set') | ||
| 200 | raise UnsupportedTerminal('OE_TERMINAL_CUSTOMCMD not set') | ||
| 201 | |||
| 202 | |||
| 203 | def prioritized(): | ||
| 204 | return Registry.prioritized() | ||
| 205 | |||
| 206 | def get_cmd_list(): | ||
| 207 | terms = Registry.prioritized() | ||
| 208 | cmds = [] | ||
| 209 | for term in terms: | ||
| 210 | if term.command: | ||
| 211 | cmds.append(term.command) | ||
| 212 | return cmds | ||
| 213 | |||
| 214 | def spawn_preferred(sh_cmd, title=None, env=None, d=None): | ||
| 215 | """Spawn the first supported terminal, by priority""" | ||
| 216 | for terminal in prioritized(): | ||
| 217 | try: | ||
| 218 | spawn(terminal.name, sh_cmd, title, env, d) | ||
| 219 | break | ||
| 220 | except UnsupportedTerminal: | ||
| 221 | pass | ||
| 222 | except: | ||
| 223 | bb.warn("Terminal %s is supported but did not start" % (terminal.name)) | ||
| 224 | # when we've run out of options | ||
| 225 | else: | ||
| 226 | raise NoSupportedTerminals(get_cmd_list()) | ||
| 227 | |||
| 228 | def spawn(name, sh_cmd, title=None, env=None, d=None): | ||
| 229 | """Spawn the specified terminal, by name""" | ||
| 230 | logger.debug('Attempting to spawn terminal "%s"', name) | ||
| 231 | try: | ||
| 232 | terminal = Registry.registry[name] | ||
| 233 | except KeyError: | ||
| 234 | raise UnsupportedTerminal(name) | ||
| 235 | |||
| 236 | # We need to know when the command completes but some terminals (at least | ||
| 237 | # gnome and tmux) gives us no way to do this. We therefore write the pid | ||
| 238 | # to a file using a "phonehome" wrapper script, then monitor the pid | ||
| 239 | # until it exits. | ||
| 240 | import tempfile | ||
| 241 | import time | ||
| 242 | pidfile = tempfile.NamedTemporaryFile(delete = False).name | ||
| 243 | try: | ||
| 244 | sh_cmd = bb.utils.which(os.getenv('PATH'), "oe-gnome-terminal-phonehome") + " " + pidfile + " " + sh_cmd | ||
| 245 | pipe = terminal(sh_cmd, title, env, d) | ||
| 246 | output = pipe.communicate()[0] | ||
| 247 | if output: | ||
| 248 | output = output.decode("utf-8") | ||
| 249 | if pipe.returncode != 0: | ||
| 250 | raise ExecutionError(sh_cmd, pipe.returncode, output) | ||
| 251 | |||
| 252 | while os.stat(pidfile).st_size <= 0: | ||
| 253 | time.sleep(0.01) | ||
| 254 | continue | ||
| 255 | with open(pidfile, "r") as f: | ||
| 256 | pid = int(f.readline()) | ||
| 257 | finally: | ||
| 258 | os.unlink(pidfile) | ||
| 259 | |||
| 260 | while True: | ||
| 261 | try: | ||
| 262 | os.kill(pid, 0) | ||
| 263 | time.sleep(0.1) | ||
| 264 | except OSError: | ||
| 265 | return | ||
| 266 | |||
| 267 | def check_tmux_version(desired): | ||
| 268 | vernum = check_terminal_version("tmux") | ||
| 269 | if vernum and bb.utils.vercmp_string_op(vernum, desired, "<"): | ||
| 270 | return False | ||
| 271 | return vernum | ||
| 272 | |||
| 273 | def check_tmux_pane_size(tmux): | ||
| 274 | import subprocess as sub | ||
| 275 | # On older tmux versions (<1.9), return false. The reason | ||
| 276 | # is that there is no easy way to get the height of the active panel | ||
| 277 | # on current window without nested formats (available from version 1.9) | ||
| 278 | if not check_tmux_version('1.9'): | ||
| 279 | return False | ||
| 280 | try: | ||
| 281 | p = sub.Popen('%s list-panes -F "#{?pane_active,#{pane_height},}"' % tmux, | ||
| 282 | shell=True,stdout=sub.PIPE,stderr=sub.PIPE) | ||
| 283 | out, err = p.communicate() | ||
| 284 | size = int(out.strip()) | ||
| 285 | except OSError as exc: | ||
| 286 | import errno | ||
| 287 | if exc.errno == errno.ENOENT: | ||
| 288 | return None | ||
| 289 | else: | ||
| 290 | raise | ||
| 291 | |||
| 292 | return size/2 >= 19 | ||
| 293 | |||
| 294 | def check_terminal_version(terminalName): | ||
| 295 | import subprocess as sub | ||
| 296 | try: | ||
| 297 | cmdversion = '%s --version' % terminalName | ||
| 298 | if terminalName.startswith('tmux'): | ||
| 299 | cmdversion = '%s -V' % terminalName | ||
| 300 | newenv = os.environ.copy() | ||
| 301 | newenv["LANG"] = "C" | ||
| 302 | p = sub.Popen(['sh', '-c', cmdversion], stdout=sub.PIPE, stderr=sub.PIPE, env=newenv) | ||
| 303 | out, err = p.communicate() | ||
| 304 | ver_info = out.decode().rstrip().split('\n') | ||
| 305 | except OSError as exc: | ||
| 306 | import errno | ||
| 307 | if exc.errno == errno.ENOENT: | ||
| 308 | return None | ||
| 309 | else: | ||
| 310 | raise | ||
| 311 | vernum = None | ||
| 312 | for ver in ver_info: | ||
| 313 | if ver.startswith('Konsole'): | ||
| 314 | vernum = ver.split(' ')[-1] | ||
| 315 | if ver.startswith('GNOME Terminal'): | ||
| 316 | vernum = ver.split(' ')[-1] | ||
| 317 | if ver.startswith('MATE Terminal'): | ||
| 318 | vernum = ver.split(' ')[-1] | ||
| 319 | if ver.startswith('tmux'): | ||
| 320 | vernum = ver.split()[-1] | ||
| 321 | if ver.startswith('tmux next-'): | ||
| 322 | vernum = ver.split()[-1][5:] | ||
| 323 | return vernum | ||
| 324 | |||
| 325 | def distro_name(): | ||
| 326 | try: | ||
| 327 | p = Popen(['lsb_release', '-i']) | ||
| 328 | out, err = p.communicate() | ||
| 329 | distro = out.split(':')[1].strip().lower() | ||
| 330 | except: | ||
| 331 | distro = "unknown" | ||
| 332 | return distro | ||
diff --git a/meta-xilinx-core/lib/oe/types.py b/meta-xilinx-core/lib/oe/types.py new file mode 100644 index 00000000..b929afb1 --- /dev/null +++ b/meta-xilinx-core/lib/oe/types.py | |||
| @@ -0,0 +1,188 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: GPL-2.0-only | ||
| 5 | # | ||
| 6 | |||
| 7 | import errno | ||
| 8 | import re | ||
| 9 | import os | ||
| 10 | |||
| 11 | |||
| 12 | class OEList(list): | ||
| 13 | """OpenEmbedded 'list' type | ||
| 14 | |||
| 15 | Acts as an ordinary list, but is constructed from a string value and a | ||
| 16 | separator (optional), and re-joins itself when converted to a string with | ||
| 17 | str(). Set the variable type flag to 'list' to use this type, and the | ||
| 18 | 'separator' flag may be specified (defaulting to whitespace).""" | ||
| 19 | |||
| 20 | name = "list" | ||
| 21 | |||
| 22 | def __init__(self, value, separator = None): | ||
| 23 | if value is not None: | ||
| 24 | list.__init__(self, value.split(separator)) | ||
| 25 | else: | ||
| 26 | list.__init__(self) | ||
| 27 | |||
| 28 | if separator is None: | ||
| 29 | self.separator = " " | ||
| 30 | else: | ||
| 31 | self.separator = separator | ||
| 32 | |||
| 33 | def __str__(self): | ||
| 34 | return self.separator.join(self) | ||
| 35 | |||
| 36 | def choice(value, choices): | ||
| 37 | """OpenEmbedded 'choice' type | ||
| 38 | |||
| 39 | Acts as a multiple choice for the user. To use this, set the variable | ||
| 40 | type flag to 'choice', and set the 'choices' flag to a space separated | ||
| 41 | list of valid values.""" | ||
| 42 | if not isinstance(value, str): | ||
| 43 | raise TypeError("choice accepts a string, not '%s'" % type(value)) | ||
| 44 | |||
| 45 | value = value.lower() | ||
| 46 | choices = choices.lower() | ||
| 47 | if value not in choices.split(): | ||
| 48 | raise ValueError("Invalid choice '%s'. Valid choices: %s" % | ||
| 49 | (value, choices)) | ||
| 50 | return value | ||
| 51 | |||
| 52 | class NoMatch(object): | ||
| 53 | """Stub python regex pattern object which never matches anything""" | ||
| 54 | def findall(self, string, flags=0): | ||
| 55 | return None | ||
| 56 | |||
| 57 | def finditer(self, string, flags=0): | ||
| 58 | return None | ||
| 59 | |||
| 60 | def match(self, flags=0): | ||
| 61 | return None | ||
| 62 | |||
| 63 | def search(self, string, flags=0): | ||
| 64 | return None | ||
| 65 | |||
| 66 | def split(self, string, maxsplit=0): | ||
| 67 | return None | ||
| 68 | |||
| 69 | def sub(pattern, repl, string, count=0): | ||
| 70 | return None | ||
| 71 | |||
| 72 | def subn(pattern, repl, string, count=0): | ||
| 73 | return None | ||
| 74 | |||
| 75 | NoMatch = NoMatch() | ||
| 76 | |||
| 77 | def regex(value, regexflags=None): | ||
| 78 | """OpenEmbedded 'regex' type | ||
| 79 | |||
| 80 | Acts as a regular expression, returning the pre-compiled regular | ||
| 81 | expression pattern object. To use this type, set the variable type flag | ||
| 82 | to 'regex', and optionally, set the 'regexflags' type to a space separated | ||
| 83 | list of the flags to control the regular expression matching (e.g. | ||
| 84 | FOO[regexflags] += 'ignorecase'). See the python documentation on the | ||
| 85 | 're' module for a list of valid flags.""" | ||
| 86 | |||
| 87 | flagval = 0 | ||
| 88 | if regexflags: | ||
| 89 | for flag in regexflags.split(): | ||
| 90 | flag = flag.upper() | ||
| 91 | try: | ||
| 92 | flagval |= getattr(re, flag) | ||
| 93 | except AttributeError: | ||
| 94 | raise ValueError("Invalid regex flag '%s'" % flag) | ||
| 95 | |||
| 96 | if not value: | ||
| 97 | # Let's ensure that the default behavior for an undefined or empty | ||
| 98 | # variable is to match nothing. If the user explicitly wants to match | ||
| 99 | # anything, they can match '.*' instead. | ||
| 100 | return NoMatch | ||
| 101 | |||
| 102 | try: | ||
| 103 | return re.compile(value, flagval) | ||
| 104 | except re.error as exc: | ||
| 105 | raise ValueError("Invalid regex value '%s': %s" % | ||
| 106 | (value, exc.args[0])) | ||
| 107 | |||
| 108 | def boolean(value): | ||
| 109 | """OpenEmbedded 'boolean' type | ||
| 110 | |||
| 111 | Valid values for true: 'yes', 'y', 'true', 't', '1' | ||
| 112 | Valid values for false: 'no', 'n', 'false', 'f', '0', None | ||
| 113 | """ | ||
| 114 | if value is None: | ||
| 115 | return False | ||
| 116 | |||
| 117 | if isinstance(value, bool): | ||
| 118 | return value | ||
| 119 | |||
| 120 | if not isinstance(value, str): | ||
| 121 | raise TypeError("boolean accepts a string, not '%s'" % type(value)) | ||
| 122 | |||
| 123 | value = value.lower() | ||
| 124 | if value in ('yes', 'y', 'true', 't', '1'): | ||
| 125 | return True | ||
| 126 | elif value in ('no', 'n', 'false', 'f', '0'): | ||
| 127 | return False | ||
| 128 | raise ValueError("Invalid boolean value '%s'" % value) | ||
| 129 | |||
| 130 | def integer(value, numberbase=10): | ||
| 131 | """OpenEmbedded 'integer' type | ||
| 132 | |||
| 133 | Defaults to base 10, but this can be specified using the optional | ||
| 134 | 'numberbase' flag.""" | ||
| 135 | |||
| 136 | return int(value, int(numberbase)) | ||
| 137 | |||
| 138 | _float = float | ||
| 139 | def float(value, fromhex='false'): | ||
| 140 | """OpenEmbedded floating point type | ||
| 141 | |||
| 142 | To use this type, set the type flag to 'float', and optionally set the | ||
| 143 | 'fromhex' flag to a true value (obeying the same rules as for the | ||
| 144 | 'boolean' type) if the value is in base 16 rather than base 10.""" | ||
| 145 | |||
| 146 | if boolean(fromhex): | ||
| 147 | return _float.fromhex(value) | ||
| 148 | else: | ||
| 149 | return _float(value) | ||
| 150 | |||
| 151 | def path(value, relativeto='', normalize='true', mustexist='false'): | ||
| 152 | value = os.path.join(relativeto, value) | ||
| 153 | |||
| 154 | if boolean(normalize): | ||
| 155 | value = os.path.normpath(value) | ||
| 156 | |||
| 157 | if boolean(mustexist): | ||
| 158 | try: | ||
| 159 | with open(value, 'r'): | ||
| 160 | pass | ||
| 161 | except IOError as exc: | ||
| 162 | if exc.errno == errno.ENOENT: | ||
| 163 | raise ValueError("{0}: {1}".format(value, os.strerror(errno.ENOENT))) | ||
| 164 | |||
| 165 | return value | ||
| 166 | |||
| 167 | def is_x86(arch): | ||
| 168 | """ | ||
| 169 | Check whether arch is x86 or x86_64 | ||
| 170 | """ | ||
| 171 | if arch.startswith('x86_') or re.match('i.*86', arch): | ||
| 172 | return True | ||
| 173 | else: | ||
| 174 | return False | ||
| 175 | |||
| 176 | def qemu_use_kvm(kvm, target_arch): | ||
| 177 | """ | ||
| 178 | Enable kvm if target_arch == build_arch or both of them are x86 archs. | ||
| 179 | """ | ||
| 180 | |||
| 181 | use_kvm = False | ||
| 182 | if kvm and boolean(kvm): | ||
| 183 | build_arch = os.uname()[4] | ||
| 184 | if is_x86(build_arch) and is_x86(target_arch): | ||
| 185 | use_kvm = True | ||
| 186 | elif build_arch == target_arch: | ||
| 187 | use_kvm = True | ||
| 188 | return use_kvm | ||
diff --git a/meta-xilinx-core/lib/oe/useradd.py b/meta-xilinx-core/lib/oe/useradd.py new file mode 100644 index 00000000..54aa86fe --- /dev/null +++ b/meta-xilinx-core/lib/oe/useradd.py | |||
| @@ -0,0 +1,71 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: GPL-2.0-only | ||
| 5 | # | ||
| 6 | import argparse | ||
| 7 | import re | ||
| 8 | |||
| 9 | class myArgumentParser(argparse.ArgumentParser): | ||
| 10 | def _print_message(self, message, file=None): | ||
| 11 | bb.warn("%s - %s: %s" % (d.getVar('PN'), pkg, message)) | ||
| 12 | |||
| 13 | # This should never be called... | ||
| 14 | def exit(self, status=0, message=None): | ||
| 15 | message = message or ("%s - %s: useradd.bbclass: Argument parsing exited" % (d.getVar('PN'), pkg)) | ||
| 16 | error(message) | ||
| 17 | |||
| 18 | def error(self, message): | ||
| 19 | bb.fatal(message) | ||
| 20 | |||
| 21 | def split_commands(params): | ||
| 22 | params = re.split('''[ \t]*;[ \t]*(?=(?:[^'"]|'[^']*'|"[^"]*")*$)''', params.strip()) | ||
| 23 | # Remove any empty items | ||
| 24 | return [x for x in params if x] | ||
| 25 | |||
| 26 | def split_args(params): | ||
| 27 | params = re.split('''[ \t]+(?=(?:[^'"]|'[^']*'|"[^"]*")*$)''', params.strip()) | ||
| 28 | # Remove any empty items | ||
| 29 | return [x for x in params if x] | ||
| 30 | |||
| 31 | def build_useradd_parser(): | ||
| 32 | # The following comes from --help on useradd from shadow | ||
| 33 | parser = myArgumentParser(prog='useradd') | ||
| 34 | parser.add_argument("-b", "--base-dir", metavar="BASE_DIR", help="base directory for the home directory of the new account") | ||
| 35 | parser.add_argument("-c", "--comment", metavar="COMMENT", help="GECOS field of the new account") | ||
| 36 | parser.add_argument("-d", "--home-dir", metavar="HOME_DIR", help="home directory of the new account") | ||
| 37 | parser.add_argument("-D", "--defaults", help="print or change default useradd configuration", action="store_true") | ||
| 38 | parser.add_argument("-e", "--expiredate", metavar="EXPIRE_DATE", help="expiration date of the new account") | ||
| 39 | parser.add_argument("-f", "--inactive", metavar="INACTIVE", help="password inactivity period of the new account") | ||
| 40 | parser.add_argument("-g", "--gid", metavar="GROUP", help="name or ID of the primary group of the new account") | ||
| 41 | parser.add_argument("-G", "--groups", metavar="GROUPS", help="list of supplementary groups of the new account") | ||
| 42 | parser.add_argument("-k", "--skel", metavar="SKEL_DIR", help="use this alternative skeleton directory") | ||
| 43 | parser.add_argument("-K", "--key", metavar="KEY=VALUE", help="override /etc/login.defs defaults") | ||
| 44 | parser.add_argument("-l", "--no-log-init", help="do not add the user to the lastlog and faillog databases", action="store_true") | ||
| 45 | parser.add_argument("-m", "--create-home", help="create the user's home directory", action="store_const", const=True) | ||
| 46 | parser.add_argument("-M", "--no-create-home", dest="create_home", help="do not create the user's home directory", action="store_const", const=False) | ||
| 47 | parser.add_argument("-N", "--no-user-group", dest="user_group", help="do not create a group with the same name as the user", action="store_const", const=False) | ||
| 48 | parser.add_argument("-o", "--non-unique", help="allow to create users with duplicate (non-unique UID)", action="store_true") | ||
| 49 | parser.add_argument("-p", "--password", metavar="PASSWORD", help="encrypted password of the new account") | ||
| 50 | parser.add_argument("-R", "--root", metavar="CHROOT_DIR", help="directory to chroot into") | ||
| 51 | parser.add_argument("-r", "--system", help="create a system account", action="store_true") | ||
| 52 | parser.add_argument("-s", "--shell", metavar="SHELL", help="login shell of the new account") | ||
| 53 | parser.add_argument("-u", "--uid", metavar="UID", help="user ID of the new account") | ||
| 54 | parser.add_argument("-U", "--user-group", help="create a group with the same name as the user", action="store_const", const=True) | ||
| 55 | parser.add_argument("LOGIN", help="Login name of the new user") | ||
| 56 | |||
| 57 | return parser | ||
| 58 | |||
| 59 | def build_groupadd_parser(): | ||
| 60 | # The following comes from --help on groupadd from shadow | ||
| 61 | parser = myArgumentParser(prog='groupadd') | ||
| 62 | parser.add_argument("-f", "--force", help="exit successfully if the group already exists, and cancel -g if the GID is already used", action="store_true") | ||
| 63 | parser.add_argument("-g", "--gid", metavar="GID", help="use GID for the new group") | ||
| 64 | parser.add_argument("-K", "--key", metavar="KEY=VALUE", help="override /etc/login.defs defaults") | ||
| 65 | parser.add_argument("-o", "--non-unique", help="allow to create groups with duplicate (non-unique) GID", action="store_true") | ||
| 66 | parser.add_argument("-p", "--password", metavar="PASSWORD", help="use this encrypted password for the new group") | ||
| 67 | parser.add_argument("-R", "--root", metavar="CHROOT_DIR", help="directory to chroot into") | ||
| 68 | parser.add_argument("-r", "--system", help="create a system account", action="store_true") | ||
| 69 | parser.add_argument("GROUP", help="Group name of the new group") | ||
| 70 | |||
| 71 | return parser | ||
diff --git a/meta-xilinx-core/lib/oe/utils.py b/meta-xilinx-core/lib/oe/utils.py new file mode 100644 index 00000000..c9c7a470 --- /dev/null +++ b/meta-xilinx-core/lib/oe/utils.py | |||
| @@ -0,0 +1,529 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: GPL-2.0-only | ||
| 5 | # | ||
| 6 | |||
| 7 | import subprocess | ||
| 8 | import multiprocessing | ||
| 9 | import traceback | ||
| 10 | import errno | ||
| 11 | |||
| 12 | def read_file(filename): | ||
| 13 | try: | ||
| 14 | f = open( filename, "r" ) | ||
| 15 | except IOError as reason: | ||
| 16 | return "" # WARNING: can't raise an error now because of the new RDEPENDS handling. This is a bit ugly. :M: | ||
| 17 | else: | ||
| 18 | data = f.read().strip() | ||
| 19 | f.close() | ||
| 20 | return data | ||
| 21 | return None | ||
| 22 | |||
| 23 | def ifelse(condition, iftrue = True, iffalse = False): | ||
| 24 | if condition: | ||
| 25 | return iftrue | ||
| 26 | else: | ||
| 27 | return iffalse | ||
| 28 | |||
| 29 | def conditional(variable, checkvalue, truevalue, falsevalue, d): | ||
| 30 | if d.getVar(variable) == checkvalue: | ||
| 31 | return truevalue | ||
| 32 | else: | ||
| 33 | return falsevalue | ||
| 34 | |||
| 35 | def vartrue(var, iftrue, iffalse, d): | ||
| 36 | import oe.types | ||
| 37 | if oe.types.boolean(d.getVar(var)): | ||
| 38 | return iftrue | ||
| 39 | else: | ||
| 40 | return iffalse | ||
| 41 | |||
| 42 | def less_or_equal(variable, checkvalue, truevalue, falsevalue, d): | ||
| 43 | if float(d.getVar(variable)) <= float(checkvalue): | ||
| 44 | return truevalue | ||
| 45 | else: | ||
| 46 | return falsevalue | ||
| 47 | |||
| 48 | def version_less_or_equal(variable, checkvalue, truevalue, falsevalue, d): | ||
| 49 | result = bb.utils.vercmp_string(d.getVar(variable), checkvalue) | ||
| 50 | if result <= 0: | ||
| 51 | return truevalue | ||
| 52 | else: | ||
| 53 | return falsevalue | ||
| 54 | |||
| 55 | def both_contain(variable1, variable2, checkvalue, d): | ||
| 56 | val1 = d.getVar(variable1) | ||
| 57 | val2 = d.getVar(variable2) | ||
| 58 | val1 = set(val1.split()) | ||
| 59 | val2 = set(val2.split()) | ||
| 60 | if isinstance(checkvalue, str): | ||
| 61 | checkvalue = set(checkvalue.split()) | ||
| 62 | else: | ||
| 63 | checkvalue = set(checkvalue) | ||
| 64 | if checkvalue.issubset(val1) and checkvalue.issubset(val2): | ||
| 65 | return " ".join(checkvalue) | ||
| 66 | else: | ||
| 67 | return "" | ||
| 68 | |||
| 69 | def set_intersect(variable1, variable2, d): | ||
| 70 | """ | ||
| 71 | Expand both variables, interpret them as lists of strings, and return the | ||
| 72 | intersection as a flattened string. | ||
| 73 | |||
| 74 | For example: | ||
| 75 | s1 = "a b c" | ||
| 76 | s2 = "b c d" | ||
| 77 | s3 = set_intersect(s1, s2) | ||
| 78 | => s3 = "b c" | ||
| 79 | """ | ||
| 80 | val1 = set(d.getVar(variable1).split()) | ||
| 81 | val2 = set(d.getVar(variable2).split()) | ||
| 82 | return " ".join(val1 & val2) | ||
| 83 | |||
| 84 | def prune_suffix(var, suffixes, d): | ||
| 85 | # See if var ends with any of the suffixes listed and | ||
| 86 | # remove it if found | ||
| 87 | for suffix in suffixes: | ||
| 88 | if suffix and var.endswith(suffix): | ||
| 89 | var = var[:-len(suffix)] | ||
| 90 | |||
| 91 | prefix = d.getVar("MLPREFIX") | ||
| 92 | if prefix and var.startswith(prefix): | ||
| 93 | var = var[len(prefix):] | ||
| 94 | |||
| 95 | return var | ||
| 96 | |||
| 97 | def str_filter(f, str, d): | ||
| 98 | from re import match | ||
| 99 | return " ".join([x for x in str.split() if match(f, x, 0)]) | ||
| 100 | |||
| 101 | def str_filter_out(f, str, d): | ||
| 102 | from re import match | ||
| 103 | return " ".join([x for x in str.split() if not match(f, x, 0)]) | ||
| 104 | |||
| 105 | def build_depends_string(depends, task): | ||
| 106 | """Append a taskname to a string of dependencies as used by the [depends] flag""" | ||
| 107 | return " ".join(dep + ":" + task for dep in depends.split()) | ||
| 108 | |||
| 109 | def inherits(d, *classes): | ||
| 110 | """Return True if the metadata inherits any of the specified classes""" | ||
| 111 | return any(bb.data.inherits_class(cls, d) for cls in classes) | ||
| 112 | |||
| 113 | def features_backfill(var,d): | ||
| 114 | # This construct allows the addition of new features to variable specified | ||
| 115 | # as var | ||
| 116 | # Example for var = "DISTRO_FEATURES" | ||
| 117 | # This construct allows the addition of new features to DISTRO_FEATURES | ||
| 118 | # that if not present would disable existing functionality, without | ||
| 119 | # disturbing distributions that have already set DISTRO_FEATURES. | ||
| 120 | # Distributions wanting to elide a value in DISTRO_FEATURES_BACKFILL should | ||
| 121 | # add the feature to DISTRO_FEATURES_BACKFILL_CONSIDERED | ||
| 122 | features = (d.getVar(var) or "").split() | ||
| 123 | backfill = (d.getVar(var+"_BACKFILL") or "").split() | ||
| 124 | considered = (d.getVar(var+"_BACKFILL_CONSIDERED") or "").split() | ||
| 125 | |||
| 126 | addfeatures = [] | ||
| 127 | for feature in backfill: | ||
| 128 | if feature not in features and feature not in considered: | ||
| 129 | addfeatures.append(feature) | ||
| 130 | |||
| 131 | if addfeatures: | ||
| 132 | d.appendVar(var, " " + " ".join(addfeatures)) | ||
| 133 | |||
| 134 | def all_distro_features(d, features, truevalue="1", falsevalue=""): | ||
| 135 | """ | ||
| 136 | Returns truevalue if *all* given features are set in DISTRO_FEATURES, | ||
| 137 | else falsevalue. The features can be given as single string or anything | ||
| 138 | that can be turned into a set. | ||
| 139 | |||
| 140 | This is a shorter, more flexible version of | ||
| 141 | bb.utils.contains("DISTRO_FEATURES", features, truevalue, falsevalue, d). | ||
| 142 | |||
| 143 | Without explicit true/false values it can be used directly where | ||
| 144 | Python expects a boolean: | ||
| 145 | if oe.utils.all_distro_features(d, "foo bar"): | ||
| 146 | bb.fatal("foo and bar are mutually exclusive DISTRO_FEATURES") | ||
| 147 | |||
| 148 | With just a truevalue, it can be used to include files that are meant to be | ||
| 149 | used only when requested via DISTRO_FEATURES: | ||
| 150 | require ${@ oe.utils.all_distro_features(d, "foo bar", "foo-and-bar.inc") | ||
| 151 | """ | ||
| 152 | return bb.utils.contains("DISTRO_FEATURES", features, truevalue, falsevalue, d) | ||
| 153 | |||
| 154 | def any_distro_features(d, features, truevalue="1", falsevalue=""): | ||
| 155 | """ | ||
| 156 | Returns truevalue if at least *one* of the given features is set in DISTRO_FEATURES, | ||
| 157 | else falsevalue. The features can be given as single string or anything | ||
| 158 | that can be turned into a set. | ||
| 159 | |||
| 160 | This is a shorter, more flexible version of | ||
| 161 | bb.utils.contains_any("DISTRO_FEATURES", features, truevalue, falsevalue, d). | ||
| 162 | |||
| 163 | Without explicit true/false values it can be used directly where | ||
| 164 | Python expects a boolean: | ||
| 165 | if not oe.utils.any_distro_features(d, "foo bar"): | ||
| 166 | bb.fatal("foo, bar or both must be set in DISTRO_FEATURES") | ||
| 167 | |||
| 168 | With just a truevalue, it can be used to include files that are meant to be | ||
| 169 | used only when requested via DISTRO_FEATURES: | ||
| 170 | require ${@ oe.utils.any_distro_features(d, "foo bar", "foo-or-bar.inc") | ||
| 171 | |||
| 172 | """ | ||
| 173 | return bb.utils.contains_any("DISTRO_FEATURES", features, truevalue, falsevalue, d) | ||
| 174 | |||
| 175 | def parallel_make(d, makeinst=False): | ||
| 176 | """ | ||
| 177 | Return the integer value for the number of parallel threads to use when | ||
| 178 | building, scraped out of PARALLEL_MAKE. If no parallelization option is | ||
| 179 | found, returns None | ||
| 180 | |||
| 181 | e.g. if PARALLEL_MAKE = "-j 10", this will return 10 as an integer. | ||
| 182 | """ | ||
| 183 | if makeinst: | ||
| 184 | pm = (d.getVar('PARALLEL_MAKEINST') or '').split() | ||
| 185 | else: | ||
| 186 | pm = (d.getVar('PARALLEL_MAKE') or '').split() | ||
| 187 | # look for '-j' and throw other options (e.g. '-l') away | ||
| 188 | while pm: | ||
| 189 | opt = pm.pop(0) | ||
| 190 | if opt == '-j': | ||
| 191 | v = pm.pop(0) | ||
| 192 | elif opt.startswith('-j'): | ||
| 193 | v = opt[2:].strip() | ||
| 194 | else: | ||
| 195 | continue | ||
| 196 | |||
| 197 | return int(v) | ||
| 198 | |||
| 199 | return '' | ||
| 200 | |||
| 201 | def parallel_make_argument(d, fmt, limit=None, makeinst=False): | ||
| 202 | """ | ||
| 203 | Helper utility to construct a parallel make argument from the number of | ||
| 204 | parallel threads specified in PARALLEL_MAKE. | ||
| 205 | |||
| 206 | Returns the input format string `fmt` where a single '%d' will be expanded | ||
| 207 | with the number of parallel threads to use. If `limit` is specified, the | ||
| 208 | number of parallel threads will be no larger than it. If no parallelization | ||
| 209 | option is found in PARALLEL_MAKE, returns an empty string | ||
| 210 | |||
| 211 | e.g. if PARALLEL_MAKE = "-j 10", parallel_make_argument(d, "-n %d") will return | ||
| 212 | "-n 10" | ||
| 213 | """ | ||
| 214 | v = parallel_make(d, makeinst) | ||
| 215 | if v: | ||
| 216 | if limit: | ||
| 217 | v = min(limit, v) | ||
| 218 | return fmt % v | ||
| 219 | return '' | ||
| 220 | |||
| 221 | def packages_filter_out_system(d): | ||
| 222 | """ | ||
| 223 | Return a list of packages from PACKAGES with the "system" packages such as | ||
| 224 | PN-dbg PN-doc PN-locale-eb-gb removed. | ||
| 225 | """ | ||
| 226 | pn = d.getVar('PN') | ||
| 227 | pkgfilter = [pn + suffix for suffix in ('', '-dbg', '-dev', '-doc', '-locale', '-staticdev', '-src')] | ||
| 228 | localepkg = pn + "-locale-" | ||
| 229 | pkgs = [] | ||
| 230 | |||
| 231 | for pkg in d.getVar('PACKAGES').split(): | ||
| 232 | if pkg not in pkgfilter and localepkg not in pkg: | ||
| 233 | pkgs.append(pkg) | ||
| 234 | return pkgs | ||
| 235 | |||
| 236 | def getstatusoutput(cmd): | ||
| 237 | return subprocess.getstatusoutput(cmd) | ||
| 238 | |||
| 239 | |||
| 240 | def trim_version(version, num_parts=2): | ||
| 241 | """ | ||
| 242 | Return just the first <num_parts> of <version>, split by periods. For | ||
| 243 | example, trim_version("1.2.3", 2) will return "1.2". | ||
| 244 | """ | ||
| 245 | if type(version) is not str: | ||
| 246 | raise TypeError("Version should be a string") | ||
| 247 | if num_parts < 1: | ||
| 248 | raise ValueError("Cannot split to parts < 1") | ||
| 249 | |||
| 250 | parts = version.split(".") | ||
| 251 | trimmed = ".".join(parts[:num_parts]) | ||
| 252 | return trimmed | ||
| 253 | |||
| 254 | def cpu_count(at_least=1, at_most=64): | ||
| 255 | cpus = len(os.sched_getaffinity(0)) | ||
| 256 | return max(min(cpus, at_most), at_least) | ||
| 257 | |||
| 258 | def execute_pre_post_process(d, cmds): | ||
| 259 | if cmds is None: | ||
| 260 | return | ||
| 261 | |||
| 262 | cmds = cmds.replace(";", " ") | ||
| 263 | |||
| 264 | for cmd in cmds.split(): | ||
| 265 | bb.note("Executing %s ..." % cmd) | ||
| 266 | bb.build.exec_func(cmd, d) | ||
| 267 | |||
| 268 | def get_bb_number_threads(d): | ||
| 269 | return int(d.getVar("BB_NUMBER_THREADS") or os.cpu_count() or 1) | ||
| 270 | |||
| 271 | def multiprocess_launch(target, items, d, extraargs=None): | ||
| 272 | max_process = get_bb_number_threads(d) | ||
| 273 | return multiprocess_launch_mp(target, items, max_process, extraargs) | ||
| 274 | |||
| 275 | # For each item in items, call the function 'target' with item as the first | ||
| 276 | # argument, extraargs as the other arguments and handle any exceptions in the | ||
| 277 | # parent thread | ||
| 278 | def multiprocess_launch_mp(target, items, max_process, extraargs=None): | ||
| 279 | |||
| 280 | class ProcessLaunch(multiprocessing.Process): | ||
| 281 | def __init__(self, *args, **kwargs): | ||
| 282 | multiprocessing.Process.__init__(self, *args, **kwargs) | ||
| 283 | self._pconn, self._cconn = multiprocessing.Pipe() | ||
| 284 | self._exception = None | ||
| 285 | self._result = None | ||
| 286 | |||
| 287 | def run(self): | ||
| 288 | try: | ||
| 289 | ret = self._target(*self._args, **self._kwargs) | ||
| 290 | self._cconn.send((None, ret)) | ||
| 291 | except Exception as e: | ||
| 292 | tb = traceback.format_exc() | ||
| 293 | self._cconn.send((e, tb)) | ||
| 294 | |||
| 295 | def update(self): | ||
| 296 | if self._pconn.poll(): | ||
| 297 | (e, tb) = self._pconn.recv() | ||
| 298 | if e is not None: | ||
| 299 | self._exception = (e, tb) | ||
| 300 | else: | ||
| 301 | self._result = tb | ||
| 302 | |||
| 303 | @property | ||
| 304 | def exception(self): | ||
| 305 | self.update() | ||
| 306 | return self._exception | ||
| 307 | |||
| 308 | @property | ||
| 309 | def result(self): | ||
| 310 | self.update() | ||
| 311 | return self._result | ||
| 312 | |||
| 313 | launched = [] | ||
| 314 | errors = [] | ||
| 315 | results = [] | ||
| 316 | items = list(items) | ||
| 317 | while (items and not errors) or launched: | ||
| 318 | if not errors and items and len(launched) < max_process: | ||
| 319 | args = (items.pop(),) | ||
| 320 | if extraargs is not None: | ||
| 321 | args = args + extraargs | ||
| 322 | p = ProcessLaunch(target=target, args=args) | ||
| 323 | p.start() | ||
| 324 | launched.append(p) | ||
| 325 | for q in launched: | ||
| 326 | # Have to manually call update() to avoid deadlocks. The pipe can be full and | ||
| 327 | # transfer stalled until we try and read the results object but the subprocess won't exit | ||
| 328 | # as it still has data to write (https://bugs.python.org/issue8426) | ||
| 329 | q.update() | ||
| 330 | # The finished processes are joined when calling is_alive() | ||
| 331 | if not q.is_alive(): | ||
| 332 | if q.exception: | ||
| 333 | errors.append(q.exception) | ||
| 334 | if q.result: | ||
| 335 | results.append(q.result) | ||
| 336 | launched.remove(q) | ||
| 337 | # Paranoia doesn't hurt | ||
| 338 | for p in launched: | ||
| 339 | p.join() | ||
| 340 | if errors: | ||
| 341 | msg = "" | ||
| 342 | for (e, tb) in errors: | ||
| 343 | if isinstance(e, subprocess.CalledProcessError) and e.output: | ||
| 344 | msg = msg + str(e) + "\n" | ||
| 345 | msg = msg + "Subprocess output:" | ||
| 346 | msg = msg + e.output.decode("utf-8", errors="ignore") | ||
| 347 | else: | ||
| 348 | msg = msg + str(e) + ": " + str(tb) + "\n" | ||
| 349 | bb.fatal("Fatal errors occurred in subprocesses:\n%s" % msg) | ||
| 350 | return results | ||
| 351 | |||
| 352 | def squashspaces(string): | ||
| 353 | import re | ||
| 354 | return re.sub(r"\s+", " ", string).strip() | ||
| 355 | |||
| 356 | def rprovides_map(pkgdata_dir, pkg_dict): | ||
| 357 | # Map file -> pkg provider | ||
| 358 | rprov_map = {} | ||
| 359 | |||
| 360 | for pkg in pkg_dict: | ||
| 361 | path_to_pkgfile = os.path.join(pkgdata_dir, 'runtime-reverse', pkg) | ||
| 362 | if not os.path.isfile(path_to_pkgfile): | ||
| 363 | continue | ||
| 364 | with open(path_to_pkgfile) as f: | ||
| 365 | for line in f: | ||
| 366 | if line.startswith('RPROVIDES') or line.startswith('FILERPROVIDES'): | ||
| 367 | # List all components provided by pkg. | ||
| 368 | # Exclude version strings, i.e. those starting with ( | ||
| 369 | provides = [x for x in line.split()[1:] if not x.startswith('(')] | ||
| 370 | for prov in provides: | ||
| 371 | if prov in rprov_map: | ||
| 372 | rprov_map[prov].append(pkg) | ||
| 373 | else: | ||
| 374 | rprov_map[prov] = [pkg] | ||
| 375 | |||
| 376 | return rprov_map | ||
| 377 | |||
| 378 | def format_pkg_list(pkg_dict, ret_format=None, pkgdata_dir=None): | ||
| 379 | output = [] | ||
| 380 | |||
| 381 | if ret_format == "arch": | ||
| 382 | for pkg in sorted(pkg_dict): | ||
| 383 | output.append("%s %s" % (pkg, pkg_dict[pkg]["arch"])) | ||
| 384 | elif ret_format == "file": | ||
| 385 | for pkg in sorted(pkg_dict): | ||
| 386 | output.append("%s %s %s" % (pkg, pkg_dict[pkg]["filename"], pkg_dict[pkg]["arch"])) | ||
| 387 | elif ret_format == "ver": | ||
| 388 | for pkg in sorted(pkg_dict): | ||
| 389 | output.append("%s %s %s" % (pkg, pkg_dict[pkg]["arch"], pkg_dict[pkg]["ver"])) | ||
| 390 | elif ret_format == "deps": | ||
| 391 | rprov_map = rprovides_map(pkgdata_dir, pkg_dict) | ||
| 392 | for pkg in sorted(pkg_dict): | ||
| 393 | for dep in pkg_dict[pkg]["deps"]: | ||
| 394 | if dep in rprov_map: | ||
| 395 | # There could be multiple providers within the image | ||
| 396 | for pkg_provider in rprov_map[dep]: | ||
| 397 | output.append("%s|%s * %s [RPROVIDES]" % (pkg, pkg_provider, dep)) | ||
| 398 | else: | ||
| 399 | output.append("%s|%s" % (pkg, dep)) | ||
| 400 | else: | ||
| 401 | for pkg in sorted(pkg_dict): | ||
| 402 | output.append(pkg) | ||
| 403 | |||
| 404 | output_str = '\n'.join(output) | ||
| 405 | |||
| 406 | if output_str: | ||
| 407 | # make sure last line is newline terminated | ||
| 408 | output_str += '\n' | ||
| 409 | |||
| 410 | return output_str | ||
| 411 | |||
| 412 | |||
| 413 | # Helper function to get the host compiler version | ||
| 414 | # Do not assume the compiler is gcc | ||
| 415 | def get_host_compiler_version(d, taskcontextonly=False): | ||
| 416 | import re, subprocess | ||
| 417 | |||
| 418 | if taskcontextonly and d.getVar('BB_WORKERCONTEXT') != '1': | ||
| 419 | return | ||
| 420 | |||
| 421 | compiler = d.getVar("BUILD_CC") | ||
| 422 | # Get rid of ccache since it is not present when parsing. | ||
| 423 | if compiler.startswith('ccache '): | ||
| 424 | compiler = compiler[7:] | ||
| 425 | try: | ||
| 426 | env = os.environ.copy() | ||
| 427 | # datastore PATH does not contain session PATH as set by environment-setup-... | ||
| 428 | # this breaks the install-buildtools use-case | ||
| 429 | # env["PATH"] = d.getVar("PATH") | ||
| 430 | output = subprocess.check_output("%s --version" % compiler, \ | ||
| 431 | shell=True, env=env, stderr=subprocess.STDOUT).decode("utf-8") | ||
| 432 | except subprocess.CalledProcessError as e: | ||
| 433 | bb.fatal("Error running %s --version: %s" % (compiler, e.output.decode("utf-8"))) | ||
| 434 | |||
| 435 | match = re.match(r".* (\d+\.\d+)\.\d+.*", output.split('\n')[0]) | ||
| 436 | if not match: | ||
| 437 | bb.fatal("Can't get compiler version from %s --version output" % compiler) | ||
| 438 | |||
| 439 | version = match.group(1) | ||
| 440 | return compiler, version | ||
| 441 | |||
| 442 | |||
| 443 | def host_gcc_version(d, taskcontextonly=False): | ||
| 444 | import re, subprocess | ||
| 445 | |||
| 446 | if taskcontextonly and d.getVar('BB_WORKERCONTEXT') != '1': | ||
| 447 | return | ||
| 448 | |||
| 449 | compiler = d.getVar("BUILD_CC") | ||
| 450 | # Get rid of ccache since it is not present when parsing. | ||
| 451 | if compiler.startswith('ccache '): | ||
| 452 | compiler = compiler[7:] | ||
| 453 | try: | ||
| 454 | env = os.environ.copy() | ||
| 455 | env["PATH"] = d.getVar("PATH") | ||
| 456 | output = subprocess.check_output("%s --version" % compiler, \ | ||
| 457 | shell=True, env=env, stderr=subprocess.STDOUT).decode("utf-8") | ||
| 458 | except subprocess.CalledProcessError as e: | ||
| 459 | bb.fatal("Error running %s --version: %s" % (compiler, e.output.decode("utf-8"))) | ||
| 460 | |||
| 461 | match = re.match(r".* (\d+\.\d+)\.\d+.*", output.split('\n')[0]) | ||
| 462 | if not match: | ||
| 463 | bb.fatal("Can't get compiler version from %s --version output" % compiler) | ||
| 464 | |||
| 465 | version = match.group(1) | ||
| 466 | return "-%s" % version if version in ("4.8", "4.9") else "" | ||
| 467 | |||
| 468 | |||
| 469 | def get_multilib_datastore(variant, d): | ||
| 470 | localdata = bb.data.createCopy(d) | ||
| 471 | if variant: | ||
| 472 | overrides = localdata.getVar("OVERRIDES", False) + ":virtclass-multilib-" + variant | ||
| 473 | localdata.setVar("OVERRIDES", overrides) | ||
| 474 | localdata.setVar("MLPREFIX", variant + "-") | ||
| 475 | else: | ||
| 476 | origdefault = localdata.getVar("DEFAULTTUNE_MULTILIB_ORIGINAL") | ||
| 477 | if origdefault: | ||
| 478 | localdata.setVar("DEFAULTTUNE", origdefault) | ||
| 479 | overrides = localdata.getVar("OVERRIDES", False).split(":") | ||
| 480 | overrides = ":".join([x for x in overrides if not x.startswith("virtclass-multilib-")]) | ||
| 481 | localdata.setVar("OVERRIDES", overrides) | ||
| 482 | localdata.setVar("MLPREFIX", "") | ||
| 483 | return localdata | ||
| 484 | |||
| 485 | def sh_quote(string): | ||
| 486 | import shlex | ||
| 487 | return shlex.quote(string) | ||
| 488 | |||
| 489 | def directory_size(root, blocksize=4096): | ||
| 490 | """ | ||
| 491 | Calculate the size of the directory, taking into account hard links, | ||
| 492 | rounding up every size to multiples of the blocksize. | ||
| 493 | """ | ||
| 494 | def roundup(size): | ||
| 495 | """ | ||
| 496 | Round the size up to the nearest multiple of the block size. | ||
| 497 | """ | ||
| 498 | import math | ||
| 499 | return math.ceil(size / blocksize) * blocksize | ||
| 500 | |||
| 501 | def getsize(filename): | ||
| 502 | """ | ||
| 503 | Get the size of the filename, not following symlinks, taking into | ||
| 504 | account hard links. | ||
| 505 | """ | ||
| 506 | stat = os.lstat(filename) | ||
| 507 | if stat.st_ino not in inodes: | ||
| 508 | inodes.add(stat.st_ino) | ||
| 509 | return stat.st_size | ||
| 510 | else: | ||
| 511 | return 0 | ||
| 512 | |||
| 513 | inodes = set() | ||
| 514 | total = 0 | ||
| 515 | for root, dirs, files in os.walk(root): | ||
| 516 | total += sum(roundup(getsize(os.path.join(root, name))) for name in files) | ||
| 517 | total += roundup(getsize(root)) | ||
| 518 | return total | ||
| 519 | |||
| 520 | # Update the mtime of a file, skip if permission/read-only issues | ||
| 521 | def touch(filename): | ||
| 522 | try: | ||
| 523 | os.utime(filename, None) | ||
| 524 | except PermissionError: | ||
| 525 | pass | ||
| 526 | except OSError as e: | ||
| 527 | # Handle read-only file systems gracefully | ||
| 528 | if e.errno != errno.EROFS: | ||
| 529 | raise e | ||
