summaryrefslogtreecommitdiffstats
path: root/meta/classes
diff options
context:
space:
mode:
authorRichard Purdie <richard.purdie@linuxfoundation.org>2025-11-07 13:31:53 +0000
committerRichard Purdie <richard.purdie@linuxfoundation.org>2025-11-07 13:31:53 +0000
commit8c22ff0d8b70d9b12f0487ef696a7e915b9e3173 (patch)
treeefdc32587159d0050a69009bdf2330a531727d95 /meta/classes
parentd412d2747595c1cc4a5e3ca975e3adc31b2f7891 (diff)
downloadpoky-8c22ff0d8b70d9b12f0487ef696a7e915b9e3173.tar.gz
The poky repository master branch is no longer being updated.
You can either: a) switch to individual clones of bitbake, openembedded-core, meta-yocto and yocto-docs b) use the new bitbake-setup You can find information about either approach in our documentation: https://docs.yoctoproject.org/ Note that "poky" the distro setting is still available in meta-yocto as before and we continue to use and maintain that. Long live Poky! Some further information on the background of this change can be found in: https://lists.openembedded.org/g/openembedded-architecture/message/2179 Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
Diffstat (limited to 'meta/classes')
-rw-r--r--meta/classes/archiver.bbclass631
-rw-r--r--meta/classes/buildhistory.bbclass961
-rw-r--r--meta/classes/buildstats-summary.bbclass46
-rw-r--r--meta/classes/ccache.bbclass74
-rw-r--r--meta/classes/ccmake.bbclass103
-rw-r--r--meta/classes/chrpath.bbclass140
-rw-r--r--meta/classes/copyleft_compliance.bbclass70
-rw-r--r--meta/classes/copyleft_filter.bbclass83
-rw-r--r--meta/classes/create-spdx-2.2.bbclass970
-rw-r--r--meta/classes/create-spdx-3.0.bbclass206
-rw-r--r--meta/classes/create-spdx.bbclass8
-rw-r--r--meta/classes/cve-check.bbclass570
-rw-r--r--meta/classes/devtool-source.bbclass192
-rw-r--r--meta/classes/distrooverrides.bbclass38
-rw-r--r--meta/classes/externalsrc.bbclass280
-rw-r--r--meta/classes/extrausers.bbclass77
-rw-r--r--meta/classes/go-vendor.bbclass215
-rw-r--r--meta/classes/image-buildinfo.bbclass81
-rw-r--r--meta/classes/mcextend.bbclass22
-rw-r--r--meta/classes/metadata_scm.bbclass10
-rw-r--r--meta/classes/multilib.bbclass252
-rw-r--r--meta/classes/multilib_global.bbclass233
-rw-r--r--meta/classes/oelint.bbclass90
-rw-r--r--meta/classes/own-mirrors.bbclass22
-rw-r--r--meta/classes/prexport.bbclass65
-rw-r--r--meta/classes/primport.bbclass27
-rw-r--r--meta/classes/recipe_sanity.bbclass155
-rw-r--r--meta/classes/relative_symlinks.bbclass11
-rw-r--r--meta/classes/relocatable.bbclass26
-rw-r--r--meta/classes/remove-libtool.bbclass17
-rw-r--r--meta/classes/report-error.bbclass159
-rw-r--r--meta/classes/rm_work.bbclass197
-rw-r--r--meta/classes/rm_work_and_downloads.bbclass32
-rw-r--r--meta/classes/sign_ipk.bbclass58
-rw-r--r--meta/classes/sign_package_feed.bbclass53
-rw-r--r--meta/classes/sign_rpm.bbclass78
-rw-r--r--meta/classes/spdx-common.bbclass107
-rw-r--r--meta/classes/terminal.bbclass115
-rw-r--r--meta/classes/toaster.bbclass388
-rw-r--r--meta/classes/toolchain/clang-native.bbclass18
-rw-r--r--meta/classes/toolchain/clang.bbclass40
-rw-r--r--meta/classes/toolchain/gcc-native.bbclass15
-rw-r--r--meta/classes/toolchain/gcc.bbclass33
-rw-r--r--meta/classes/typecheck.bbclass18
-rw-r--r--meta/classes/useradd-staticids.bbclass313
-rw-r--r--meta/classes/useradd.bbclass290
-rw-r--r--meta/classes/useradd_base.bbclass171
-rw-r--r--meta/classes/vex.bbclass303
48 files changed, 0 insertions, 8063 deletions
diff --git a/meta/classes/archiver.bbclass b/meta/classes/archiver.bbclass
deleted file mode 100644
index a95c899a0f..0000000000
--- a/meta/classes/archiver.bbclass
+++ /dev/null
@@ -1,631 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7#
8# This bbclass is used for creating archive for:
9# 1) original (or unpacked) source: ARCHIVER_MODE[src] = "original"
10# 2) patched source: ARCHIVER_MODE[src] = "patched" (default)
11# 3) configured source: ARCHIVER_MODE[src] = "configured"
12# 4) source mirror: ARCHIVER_MODE[src] = "mirror"
13# 5) The patches between do_unpack and do_patch:
14# ARCHIVER_MODE[diff] = "1"
15# And you can set the one that you'd like to exclude from the diff:
16# ARCHIVER_MODE[diff-exclude] ?= ".pc autom4te.cache patches"
17# 6) The environment data, similar to 'bitbake -e recipe':
18# ARCHIVER_MODE[dumpdata] = "1"
19# 7) The recipe (.bb and .inc): ARCHIVER_MODE[recipe] = "1"
20# 8) Whether output the .src.rpm package:
21# ARCHIVER_MODE[srpm] = "1"
22# 9) Filter the license, the recipe whose license in
23# COPYLEFT_LICENSE_INCLUDE will be included, and in
24# COPYLEFT_LICENSE_EXCLUDE will be excluded.
25# COPYLEFT_LICENSE_INCLUDE = 'GPL* LGPL*'
26# COPYLEFT_LICENSE_EXCLUDE = 'CLOSED Proprietary'
27# 10) The recipe type that will be archived:
28# COPYLEFT_RECIPE_TYPES = 'target'
29# 11) The source mirror mode:
30# ARCHIVER_MODE[mirror] = "split" (default): Sources are split into
31# per-recipe directories in a similar way to other archiver modes.
32# Post-processing may be required to produce a single mirror directory.
33# This does however allow inspection of duplicate sources and more
34# intelligent handling.
35# ARCHIVER_MODE[mirror] = "combined": All sources are placed into a single
36# directory suitable for direct use as a mirror. Duplicate sources are
37# ignored.
38# 12) Source mirror exclusions:
39# ARCHIVER_MIRROR_EXCLUDE is a list of prefixes to exclude from the mirror.
40# This may be used for sources which you are already publishing yourself
41# (e.g. if the URI starts with 'https://mysite.com/' and your mirror is
42# going to be published to the same site). It may also be used to exclude
43# local files (with the prefix 'file://') if these will be provided as part
44# of an archive of the layers themselves.
45#
46
47# Create archive for all the recipe types
48COPYLEFT_RECIPE_TYPES ?= 'target native nativesdk cross crosssdk cross-canadian'
49inherit copyleft_filter
50
51ARCHIVER_MODE[srpm] ?= "0"
52ARCHIVER_MODE[src] ?= "patched"
53ARCHIVER_MODE[diff] ?= "0"
54ARCHIVER_MODE[diff-exclude] ?= ".pc autom4te.cache patches"
55ARCHIVER_MODE[dumpdata] ?= "0"
56ARCHIVER_MODE[recipe] ?= "0"
57ARCHIVER_MODE[mirror] ?= "split"
58ARCHIVER_MODE[compression] ?= "xz"
59
60DEPLOY_DIR_SRC ?= "${DEPLOY_DIR}/sources"
61ARCHIVER_TOPDIR ?= "${WORKDIR}/archiver-sources"
62ARCHIVER_ARCH = "${TARGET_SYS}"
63ARCHIVER_OUTDIR = "${ARCHIVER_TOPDIR}/${ARCHIVER_ARCH}/${PF}/"
64ARCHIVER_RPMTOPDIR ?= "${WORKDIR}/deploy-sources-rpm"
65ARCHIVER_RPMOUTDIR = "${ARCHIVER_RPMTOPDIR}/${ARCHIVER_ARCH}/${PF}/"
66ARCHIVER_WORKDIR = "${WORKDIR}/archiver-work/"
67
68# When producing a combined mirror directory, allow duplicates for the case
69# where multiple recipes use the same SRC_URI.
70ARCHIVER_COMBINED_MIRRORDIR = "${ARCHIVER_TOPDIR}/mirror"
71SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_SRC}/mirror"
72
73do_dumpdata[dirs] = "${ARCHIVER_OUTDIR}"
74do_ar_recipe[dirs] = "${ARCHIVER_OUTDIR}"
75do_ar_original[dirs] = "${ARCHIVER_OUTDIR} ${ARCHIVER_WORKDIR}"
76
77# This is a convenience for the shell script to use it
78
79def include_package(d, pn):
80
81 included, reason = copyleft_should_include(d)
82 if not included:
83 bb.debug(1, 'archiver: %s is excluded: %s' % (pn, reason))
84 return False
85
86 else:
87 bb.debug(1, 'archiver: %s is included: %s' % (pn, reason))
88
89 # glibc-locale: do_fetch, do_unpack and do_patch tasks have been deleted,
90 # so avoid archiving source here.
91 if pn.startswith('glibc-locale'):
92 return False
93
94 # We just archive gcc-source for all the gcc related recipes
95 if d.getVar('BPN') in ['gcc', 'libgcc'] \
96 and not pn.startswith('gcc-source'):
97 bb.debug(1, 'archiver: %s is excluded, covered by gcc-source' % pn)
98 return False
99
100 return True
101
102python () {
103 pn = d.getVar('PN')
104 assume_provided = (d.getVar("ASSUME_PROVIDED") or "").split()
105 if pn in assume_provided:
106 for p in d.getVar("PROVIDES").split():
107 if p != pn:
108 pn = p
109 break
110
111 if not include_package(d, pn):
112 return
113
114 # TARGET_SYS in ARCHIVER_ARCH will break the stamp for gcc-source in multiconfig
115 if pn.startswith('gcc-source'):
116 d.setVar('ARCHIVER_ARCH', "allarch")
117
118 def hasTask(task):
119 return bool(d.getVarFlag(task, "task", False)) and not bool(d.getVarFlag(task, "noexec", False))
120
121 ar_src = d.getVarFlag('ARCHIVER_MODE', 'src')
122 ar_dumpdata = d.getVarFlag('ARCHIVER_MODE', 'dumpdata')
123 ar_recipe = d.getVarFlag('ARCHIVER_MODE', 'recipe')
124
125 if ar_src == "original":
126 d.appendVarFlag('do_deploy_archives', 'depends', ' %s:do_ar_original' % pn)
127 # 'patched' and 'configured' invoke do_unpack_and_patch because
128 # do_ar_patched resp. do_ar_configured depend on it, but for 'original'
129 # we have to add it explicitly.
130 if d.getVarFlag('ARCHIVER_MODE', 'diff') == '1':
131 d.appendVarFlag('do_deploy_archives', 'depends', ' %s:do_unpack_and_patch' % pn)
132 elif ar_src == "patched":
133 d.appendVarFlag('do_deploy_archives', 'depends', ' %s:do_ar_patched' % pn)
134 elif ar_src == "configured":
135 # We can't use "addtask do_ar_configured after do_configure" since it
136 # will cause the deptask of do_populate_sysroot to run no matter what
137 # archives we need, so we add the depends here.
138
139 # There is a corner case with "gcc-source-${PV}" recipes, they don't have
140 # the "do_configure" task, so we need to use "do_preconfigure"
141 if hasTask("do_preconfigure"):
142 d.appendVarFlag('do_ar_configured', 'depends', ' %s:do_preconfigure' % pn)
143 elif hasTask("do_configure"):
144 d.appendVarFlag('do_ar_configured', 'depends', ' %s:do_configure' % pn)
145 d.appendVarFlag('do_deploy_archives', 'depends', ' %s:do_ar_configured' % pn)
146 elif ar_src == "mirror":
147 d.appendVarFlag('do_deploy_archives', 'depends', '%s:do_ar_mirror' % pn)
148
149 elif ar_src:
150 bb.fatal("Invalid ARCHIVER_MODE[src]: %s" % ar_src)
151
152 if ar_dumpdata == "1":
153 d.appendVarFlag('do_deploy_archives', 'depends', ' %s:do_dumpdata' % pn)
154
155 if ar_recipe == "1":
156 d.appendVarFlag('do_deploy_archives', 'depends', ' %s:do_ar_recipe' % pn)
157
158 # Output the SRPM package
159 if d.getVarFlag('ARCHIVER_MODE', 'srpm') == "1" and d.getVar('PACKAGES'):
160 if "package_rpm" not in d.getVar('PACKAGE_CLASSES'):
161 bb.fatal("ARCHIVER_MODE[srpm] needs package_rpm in PACKAGE_CLASSES")
162
163 # Some recipes do not have any packaging tasks
164 if hasTask("do_package_write_rpm"):
165 d.appendVarFlag('do_deploy_archives', 'depends', ' %s:do_package_write_rpm' % pn)
166 d.appendVarFlag('do_package_write_rpm', 'dirs', ' ${ARCHIVER_RPMTOPDIR}')
167 d.appendVarFlag('do_package_write_rpm', 'sstate-inputdirs', ' ${ARCHIVER_RPMTOPDIR}')
168 d.appendVarFlag('do_package_write_rpm', 'sstate-outputdirs', ' ${DEPLOY_DIR_SRC}')
169 d.appendVar('PSEUDO_INCLUDE_PATHS', ',${ARCHIVER_TOPDIR}')
170 if ar_dumpdata == "1":
171 d.appendVarFlag('do_package_write_rpm', 'depends', ' %s:do_dumpdata' % pn)
172 if ar_recipe == "1":
173 d.appendVarFlag('do_package_write_rpm', 'depends', ' %s:do_ar_recipe' % pn)
174 if ar_src == "original":
175 d.appendVarFlag('do_package_write_rpm', 'depends', ' %s:do_ar_original' % pn)
176 elif ar_src == "patched":
177 d.appendVarFlag('do_package_write_rpm', 'depends', ' %s:do_ar_patched' % pn)
178 elif ar_src == "configured":
179 d.appendVarFlag('do_package_write_rpm', 'depends', ' %s:do_ar_configured' % pn)
180}
181
182# Take all the sources for a recipe and put them in WORKDIR/archiver-work/.
183# Files in SRC_URI are copied directly, anything that's a directory
184# (e.g. git repositories) is "unpacked" and then put into a tarball.
185python do_ar_original() {
186
187 import shutil, tempfile
188
189 if d.getVarFlag('ARCHIVER_MODE', 'src') != "original":
190 return
191
192 ar_outdir = d.getVar('ARCHIVER_OUTDIR')
193 bb.note('Archiving the original source...')
194 urls = d.getVar("SRC_URI").split()
195 # destsuffix (git fetcher) and subdir (everything else) are allowed to be
196 # absolute paths (for example, destsuffix=${S}/foobar).
197 # That messes with unpacking inside our tmpdir below, because the fetchers
198 # will then unpack in that directory and completely ignore the tmpdir.
199 # That breaks parallel tasks relying on ${S}, like do_compile.
200 #
201 # To solve this, we remove these parameters from all URLs.
202 # We do this even for relative paths because it makes the content of the
203 # archives more useful (no extra paths that are only used during
204 # compilation).
205 for i, url in enumerate(urls):
206 decoded = bb.fetch2.decodeurl(url)
207 for param in ('destsuffix', 'subdir'):
208 if param in decoded[5]:
209 del decoded[5][param]
210 encoded = bb.fetch2.encodeurl(decoded)
211 urls[i] = encoded
212
213 # Cleanup SRC_URI before call bb.fetch2.Fetch() since now SRC_URI is in the
214 # variable "urls", otherwise there might be errors like:
215 # The SRCREV_FORMAT variable must be set when multiple SCMs are used
216 ld = bb.data.createCopy(d)
217 ld.setVar('SRC_URI', '')
218 fetch = bb.fetch2.Fetch(urls, ld)
219 tarball_suffix = {}
220 for url in fetch.urls:
221 local = fetch.localpath(url).rstrip("/");
222 if os.path.isfile(local):
223 shutil.copy(local, ar_outdir)
224 elif os.path.isdir(local):
225 tmpdir = tempfile.mkdtemp(dir=d.getVar('ARCHIVER_WORKDIR'))
226 fetch.unpack(tmpdir, (url,))
227 # To handle recipes with more than one source, we add the "name"
228 # URL parameter as suffix. We treat it as an error when
229 # there's more than one URL without a name, or a name gets reused.
230 # This is an additional safety net, in practice the name has
231 # to be set when using the git fetcher, otherwise SRCREV cannot
232 # be set separately for each URL.
233 params = bb.fetch2.decodeurl(url)[5]
234 type = bb.fetch2.decodeurl(url)[0]
235 location = bb.fetch2.decodeurl(url)[2]
236 name = params.get('name', '')
237 if type.lower() == 'file':
238 name_tmp = location.rstrip("*").rstrip("/")
239 name = os.path.basename(name_tmp)
240 else:
241 if name in tarball_suffix:
242 if not name:
243 bb.fatal("Cannot determine archive names for original source because 'name' URL parameter is unset in more than one URL. Add it to at least one of these: %s %s" % (tarball_suffix[name], url))
244 else:
245 bb.fatal("Cannot determine archive names for original source because 'name=' URL parameter '%s' is used twice. Make it unique in: %s %s" % (tarball_suffix[name], url))
246 tarball_suffix[name] = url
247 create_tarball(d, tmpdir + '/.', name, ar_outdir)
248
249 # Emit patch series files for 'original'
250 bb.note('Writing patch series files...')
251 for patch in src_patches(d):
252 _, _, local, _, _, parm = bb.fetch.decodeurl(patch)
253 patchdir = parm.get('patchdir')
254 if patchdir:
255 series = os.path.join(ar_outdir, 'series.subdir.%s' % patchdir.replace('/', '_'))
256 else:
257 series = os.path.join(ar_outdir, 'series')
258
259 with open(series, 'a') as s:
260 s.write('%s -p%s\n' % (os.path.basename(local), parm['striplevel']))
261}
262
263python do_ar_patched() {
264
265 if d.getVarFlag('ARCHIVER_MODE', 'src') != 'patched':
266 return
267
268 # Get the ARCHIVER_OUTDIR before we reset the WORKDIR
269 ar_outdir = d.getVar('ARCHIVER_OUTDIR')
270 if not is_work_shared(d):
271 ar_workdir = d.getVar('ARCHIVER_WORKDIR')
272 d.setVar('WORKDIR', ar_workdir)
273 bb.note('Archiving the patched source...')
274 create_tarball(d, d.getVar('S'), 'patched', ar_outdir)
275}
276
277python do_ar_configured() {
278 import shutil
279
280 # Forcibly expand the sysroot paths as we're about to change WORKDIR
281 d.setVar('STAGING_DIR_HOST', d.getVar('STAGING_DIR_HOST'))
282 d.setVar('STAGING_DIR_TARGET', d.getVar('STAGING_DIR_TARGET'))
283 d.setVar('RECIPE_SYSROOT', d.getVar('RECIPE_SYSROOT'))
284 d.setVar('RECIPE_SYSROOT_NATIVE', d.getVar('RECIPE_SYSROOT_NATIVE'))
285
286 ar_outdir = d.getVar('ARCHIVER_OUTDIR')
287 if d.getVarFlag('ARCHIVER_MODE', 'src') == 'configured':
288 bb.note('Archiving the configured source...')
289 pn = d.getVar('PN')
290 # "gcc-source-${PV}" recipes don't have "do_configure"
291 # task, so we need to run "do_preconfigure" instead
292 if pn.startswith("gcc-source-"):
293 d.setVar('WORKDIR', d.getVar('ARCHIVER_WORKDIR'))
294 bb.build.exec_func('do_preconfigure', d)
295
296 # The libtool-native's do_configure will remove the
297 # ${STAGING_DATADIR}/aclocal/libtool.m4, so we can't re-run the
298 # do_configure, we archive the already configured ${S} to
299 # instead of.
300 # The kernel class functions require it to be on work-shared, we
301 # don't unpack, patch, configure again, just archive the already
302 # configured ${S}
303 elif not (pn == 'libtool-native' or is_work_shared(d)):
304 def runTask(task):
305 prefuncs = d.getVarFlag(task, 'prefuncs') or ''
306 for func in prefuncs.split():
307 if func != "sysroot_cleansstate":
308 bb.build.exec_func(func, d)
309 bb.build.exec_func(task, d)
310 postfuncs = d.getVarFlag(task, 'postfuncs') or ''
311 for func in postfuncs.split():
312 if func != 'do_qa_configure':
313 bb.build.exec_func(func, d)
314
315 # Change the WORKDIR to make do_configure run in another dir.
316 d.setVar('WORKDIR', d.getVar('ARCHIVER_WORKDIR'))
317
318 preceeds = bb.build.preceedtask('do_configure', False, d)
319 for task in preceeds:
320 if task != 'do_patch' and task != 'do_prepare_recipe_sysroot':
321 runTask(task)
322 runTask('do_configure')
323
324 srcdir = d.getVar('S')
325 builddir = d.getVar('B')
326 if srcdir != builddir:
327 if os.path.exists(builddir):
328 oe.path.copytree(builddir, os.path.join(srcdir, \
329 'build.%s.ar_configured' % d.getVar('PF')))
330 create_tarball(d, srcdir, 'configured', ar_outdir)
331}
332
333python do_ar_mirror() {
334 import subprocess
335
336 src_uri = (d.getVar('SRC_URI') or '').split()
337 if len(src_uri) == 0:
338 return
339
340 dl_dir = d.getVar('DL_DIR')
341 mirror_exclusions = (d.getVar('ARCHIVER_MIRROR_EXCLUDE') or '').split()
342 mirror_mode = d.getVarFlag('ARCHIVER_MODE', 'mirror')
343 have_mirror_tarballs = oe.types.boolean(d.getVar('BB_GENERATE_MIRROR_TARBALLS'))
344
345 if mirror_mode == 'combined':
346 destdir = d.getVar('ARCHIVER_COMBINED_MIRRORDIR')
347 elif mirror_mode == 'split':
348 destdir = d.getVar('ARCHIVER_OUTDIR')
349 else:
350 bb.fatal('Invalid ARCHIVER_MODE[mirror]: %s' % (mirror_mode))
351
352 if not have_mirror_tarballs:
353 bb.fatal('Using `ARCHIVER_MODE[src] = "mirror"` depends on setting `BB_GENERATE_MIRROR_TARBALLS = "1"`')
354
355 def is_excluded(url):
356 for prefix in mirror_exclusions:
357 if url.startswith(prefix):
358 return True
359 return False
360
361 bb.note('Archiving the source as a mirror...')
362
363 bb.utils.mkdirhier(destdir)
364
365 fetcher = bb.fetch2.Fetch(src_uri, d)
366
367 for ud in fetcher.expanded_urldata():
368 if is_excluded(ud.url):
369 bb.note('Skipping excluded url: %s' % (ud.url))
370 continue
371
372 bb.note('Archiving url: %s' % (ud.url))
373 ud.setup_localpath(d)
374 localpath = None
375
376 # Check for mirror tarballs first. We will archive the first mirror
377 # tarball that we find as it's assumed that we just need one.
378 for mirror_fname in ud.mirrortarballs:
379 mirror_path = os.path.join(dl_dir, mirror_fname)
380 if os.path.exists(mirror_path):
381 bb.note('Found mirror tarball: %s' % (mirror_path))
382 localpath = mirror_path
383 break
384
385 if len(ud.mirrortarballs) and not localpath:
386 bb.warn('Mirror tarballs are listed for a source but none are present. ' \
387 'Falling back to original download.\n' \
388 'SRC_URI = %s' % (ud.url))
389
390 # Check original download
391 if not localpath:
392 bb.note('Using original download: %s' % (ud.localpath))
393 localpath = ud.localpath
394
395 if not localpath or not os.path.exists(localpath):
396 bb.fatal('Original download is missing for a source.\n' \
397 'SRC_URI = %s' % (ud.url))
398
399 # We now have an appropriate localpath
400 bb.note('Copying source mirror')
401 cmd = 'cp -fpPRH %s %s' % (localpath, destdir)
402 subprocess.check_call(cmd, shell=True)
403}
404
405def create_tarball(d, srcdir, suffix, ar_outdir):
406 """
407 create the tarball from srcdir
408 """
409 import subprocess
410
411 # Make sure we are only creating a single tarball for gcc sources
412 if (d.getVar('SRC_URI') == ""):
413 return
414
415 # For the kernel archive, srcdir may just be a link to the
416 # work-shared location. Use os.path.realpath to make sure
417 # that we archive the actual directory and not just the link.
418 srcdir = os.path.realpath(srcdir)
419
420 compression_method = d.getVarFlag('ARCHIVER_MODE', 'compression')
421 if compression_method == "xz":
422 compression_cmd = "xz %s" % d.getVar('XZ_DEFAULTS')
423 # To keep compatibility with ARCHIVER_MODE[compression]
424 elif compression_method == "gz":
425 compression_cmd = "gzip"
426 elif compression_method == "bz2":
427 compression_cmd = "bzip2"
428 else:
429 bb.fatal("Unsupported compression_method: %s" % compression_method)
430
431 bb.utils.mkdirhier(ar_outdir)
432 if suffix:
433 filename = '%s-%s.tar.%s' % (d.getVar('PF'), suffix, compression_method)
434 else:
435 filename = '%s.tar.%s' % (d.getVar('PF'), compression_method)
436 tarname = os.path.join(ar_outdir, filename)
437
438 bb.note('Creating %s' % tarname)
439 dirname = os.path.dirname(srcdir)
440 basename = os.path.basename(srcdir)
441 exclude = "--exclude=temp --exclude=patches --exclude='.pc'"
442 tar_cmd = "tar %s -cf - %s | %s > %s" % (exclude, basename, compression_cmd, tarname)
443 subprocess.check_call(tar_cmd, cwd=dirname, shell=True)
444
445# creating .diff.gz between source.orig and source
446def create_diff_gz(d, src_orig, src, ar_outdir):
447
448 import subprocess
449
450 if not os.path.isdir(src) or not os.path.isdir(src_orig):
451 return
452
453 # The diff --exclude can't exclude the file with path, so we copy
454 # the patched source, and remove the files that we'd like to
455 # exclude.
456 src_patched = src + '.patched'
457 oe.path.copyhardlinktree(src, src_patched)
458 for i in d.getVarFlag('ARCHIVER_MODE', 'diff-exclude').split():
459 bb.utils.remove(os.path.join(src_orig, i), recurse=True)
460 bb.utils.remove(os.path.join(src_patched, i), recurse=True)
461
462 dirname = os.path.dirname(src)
463 basename = os.path.basename(src)
464 bb.utils.mkdirhier(ar_outdir)
465 cwd = os.getcwd()
466 try:
467 os.chdir(dirname)
468 out_file = os.path.join(ar_outdir, '%s-diff.gz' % d.getVar('PF'))
469 diff_cmd = 'diff -Naur %s.orig %s.patched | gzip -c > %s' % (basename, basename, out_file)
470 subprocess.check_call(diff_cmd, shell=True)
471 bb.utils.remove(src_patched, recurse=True)
472 finally:
473 os.chdir(cwd)
474
475def is_work_shared(d):
476 sharedworkdir = os.path.join(d.getVar('TMPDIR'), 'work-shared')
477 sourcedir = os.path.realpath(d.getVar('S'))
478 return sourcedir.startswith(sharedworkdir)
479
480# Run do_unpack and do_patch
481python do_unpack_and_patch() {
482 if d.getVarFlag('ARCHIVER_MODE', 'src') not in \
483 [ 'patched', 'configured'] and \
484 d.getVarFlag('ARCHIVER_MODE', 'diff') != '1':
485 return
486 ar_outdir = d.getVar('ARCHIVER_OUTDIR')
487 ar_workdir = d.getVar('ARCHIVER_WORKDIR')
488 ar_sysroot_native = d.getVar('STAGING_DIR_NATIVE')
489 pn = d.getVar('PN')
490
491 # The kernel class functions require it to be on work-shared, so we don't change WORKDIR
492 if not is_work_shared(d):
493 # Change the WORKDIR to make do_unpack do_patch run in another dir.
494 d.setVar('WORKDIR', ar_workdir)
495 # Restore the original path to recipe's native sysroot (it's relative to WORKDIR).
496 d.setVar('STAGING_DIR_NATIVE', ar_sysroot_native)
497
498 # The changed 'WORKDIR' also caused 'B' changed, create dir 'B' for the
499 # possibly requiring of the following tasks (such as some recipes's
500 # do_patch required 'B' existed).
501 bb.utils.mkdirhier(d.getVar('B'))
502
503 bb.build.exec_func('do_unpack', d)
504
505 # Save the original source for creating the patches
506 if d.getVarFlag('ARCHIVER_MODE', 'diff') == '1':
507 src = d.getVar('S').rstrip('/')
508 src_orig = '%s.orig' % src
509 oe.path.copytree(src, src_orig)
510
511 if bb.data.inherits_class('dos2unix', d):
512 bb.build.exec_func('do_convert_crlf_to_lf', d)
513
514 # Make sure gcc and kernel sources are patched only once
515 if not (d.getVar('SRC_URI') == "" or is_work_shared(d)):
516 bb.build.exec_func('do_patch', d)
517
518 # Create the patches
519 if d.getVarFlag('ARCHIVER_MODE', 'diff') == '1':
520 bb.note('Creating diff gz...')
521 create_diff_gz(d, src_orig, src, ar_outdir)
522 bb.utils.remove(src_orig, recurse=True)
523}
524
525# BBINCLUDED is special (excluded from basehash signature
526# calculation). Using it in a task signature can cause "basehash
527# changed" errors.
528#
529# Depending on BBINCLUDED also causes do_ar_recipe to run again
530# for unrelated changes, like adding or removing buildhistory.bbclass.
531#
532# For these reasons we ignore the dependency completely. The versioning
533# of the output file ensures that we create it each time the recipe
534# gets rebuilt, at least as long as a PR server is used. We also rely
535# on that mechanism to catch changes in the file content, because the
536# file content is not part of the task signature either.
537do_ar_recipe[vardepsexclude] += "BBINCLUDED"
538python do_ar_recipe () {
539 """
540 archive the recipe, including .bb and .inc.
541 """
542 import re
543 import shutil
544
545 require_re = re.compile( r"require\s+(.+)" )
546 include_re = re.compile( r"include\s+(.+)" )
547 bbfile = d.getVar('FILE')
548 outdir = os.path.join(d.getVar('WORKDIR'), \
549 '%s-recipe' % d.getVar('PF'))
550 bb.utils.mkdirhier(outdir)
551 shutil.copy(bbfile, outdir)
552
553 pn = d.getVar('PN')
554 bbappend_files = d.getVar('BBINCLUDED').split()
555 # If recipe name is aa, we need to match files like aa.bbappend and aa_1.1.bbappend
556 # Files like aa1.bbappend or aa1_1.1.bbappend must be excluded.
557 bbappend_re = re.compile( r".*/%s_[^/]*\.bbappend$" % re.escape(pn))
558 bbappend_re1 = re.compile( r".*/%s\.bbappend$" % re.escape(pn))
559 for file in bbappend_files:
560 if bbappend_re.match(file) or bbappend_re1.match(file):
561 shutil.copy(file, outdir)
562
563 dirname = os.path.dirname(bbfile)
564 bbpath = '%s:%s' % (dirname, d.getVar('BBPATH'))
565 f = open(bbfile, 'r')
566 for line in f.readlines():
567 incfile = None
568 if require_re.match(line):
569 incfile = require_re.match(line).group(1)
570 elif include_re.match(line):
571 incfile = include_re.match(line).group(1)
572 if incfile:
573 incfile = d.expand(incfile)
574 if incfile:
575 incfile = bb.utils.which(bbpath, incfile)
576 if incfile:
577 shutil.copy(incfile, outdir)
578
579 create_tarball(d, outdir, 'recipe', d.getVar('ARCHIVER_OUTDIR'))
580 bb.utils.remove(outdir, recurse=True)
581}
582
583python do_dumpdata () {
584 """
585 dump environment data to ${PF}-showdata.dump
586 """
587
588 dumpfile = os.path.join(d.getVar('ARCHIVER_OUTDIR'), \
589 '%s-showdata.dump' % d.getVar('PF'))
590 bb.note('Dumping metadata into %s' % dumpfile)
591 with open(dumpfile, "w") as f:
592 # emit variables and shell functions
593 bb.data.emit_env(f, d, True)
594 # emit the metadata which isn't valid shell
595 for e in d.keys():
596 if d.getVarFlag(e, "python", False):
597 f.write("\npython %s () {\n%s}\n" % (e, d.getVar(e, False)))
598}
599
600SSTATETASKS += "do_deploy_archives"
601do_deploy_archives () {
602 bbnote "Deploying source archive files from ${ARCHIVER_TOPDIR} to ${DEPLOY_DIR_SRC}."
603}
604python do_deploy_archives_setscene () {
605 sstate_setscene(d)
606}
607do_deploy_archives[dirs] = "${ARCHIVER_TOPDIR}"
608do_deploy_archives[sstate-inputdirs] = "${ARCHIVER_TOPDIR}"
609do_deploy_archives[sstate-outputdirs] = "${DEPLOY_DIR_SRC}"
610addtask do_deploy_archives_setscene
611
612addtask do_ar_original after do_unpack
613addtask do_unpack_and_patch after do_patch do_preconfigure
614addtask do_ar_patched after do_unpack_and_patch
615addtask do_ar_configured after do_unpack_and_patch
616addtask do_ar_mirror after do_fetch
617addtask do_dumpdata
618addtask do_ar_recipe
619addtask do_deploy_archives
620do_build[recrdeptask] += "do_deploy_archives"
621do_rootfs[recrdeptask] += "do_deploy_archives"
622do_populate_sdk[recrdeptask] += "do_deploy_archives"
623
624python () {
625 # Add tasks in the correct order, specifically for linux-yocto to avoid race condition.
626 # sstatesig.py:sstate_rundepfilter has special support that excludes this dependency
627 # so that do_kernel_configme does not need to run again when do_unpack_and_patch
628 # gets added or removed (by adding or removing archiver.bbclass).
629 if bb.data.inherits_class('kernel-yocto', d):
630 bb.build.addtask('do_kernel_configme', 'do_configure', 'do_unpack_and_patch', d)
631}
diff --git a/meta/classes/buildhistory.bbclass b/meta/classes/buildhistory.bbclass
deleted file mode 100644
index 4a380c10c6..0000000000
--- a/meta/classes/buildhistory.bbclass
+++ /dev/null
@@ -1,961 +0,0 @@
1#
2# Records history of build output in order to detect regressions
3#
4# Based in part on testlab.bbclass and packagehistory.bbclass
5#
6# Copyright (C) 2011-2016 Intel Corporation
7# Copyright (C) 2007-2011 Koen Kooi <koen@openembedded.org>
8#
9# SPDX-License-Identifier: MIT
10#
11
12IMAGE_CLASSES += "image-artifact-names"
13
14BUILDHISTORY_FEATURES ?= "image package sdk"
15BUILDHISTORY_DIR ?= "${TOPDIR}/buildhistory"
16BUILDHISTORY_DIR_IMAGE = "${BUILDHISTORY_DIR}/images/${MACHINE_ARCH}/${TCLIBC}/${IMAGE_BASENAME}"
17BUILDHISTORY_DIR_PACKAGE = "${BUILDHISTORY_DIR}/packages/${MULTIMACH_TARGET_SYS}/${PN}"
18
19BUILDHISTORY_DIR_SDK = "${BUILDHISTORY_DIR}/sdk/${SDK_NAME}${SDK_EXT}/${IMAGE_BASENAME}"
20BUILDHISTORY_IMAGE_FILES ?= "/etc/passwd /etc/group"
21BUILDHISTORY_SDK_FILES ?= "conf/local.conf conf/bblayers.conf conf/auto.conf conf/locked-sigs.inc conf/devtool.conf"
22BUILDHISTORY_COMMIT ?= "1"
23BUILDHISTORY_COMMIT_AUTHOR ?= "buildhistory <buildhistory@${DISTRO}>"
24BUILDHISTORY_PUSH_REPO ?= ""
25BUILDHISTORY_TAG ?= "build"
26BUILDHISTORY_PATH_PREFIX_STRIP ?= ""
27
28# We want to avoid influencing the signatures of the task so use vardepsexclude
29do_populate_sysroot[postfuncs] += "buildhistory_emit_sysroot"
30do_populate_sysroot_setscene[postfuncs] += "buildhistory_emit_sysroot"
31do_populate_sysroot[vardepsexclude] += "buildhistory_emit_sysroot"
32
33do_package[postfuncs] += "buildhistory_list_pkg_files"
34do_package_setscene[postfuncs] += "buildhistory_list_pkg_files"
35do_package[vardepsexclude] += "buildhistory_list_pkg_files"
36
37do_packagedata[postfuncs] += "buildhistory_emit_pkghistory"
38do_packagedata_setscene[postfuncs] += "buildhistory_emit_pkghistory"
39do_packagedata[vardepsexclude] += "buildhistory_emit_pkghistory"
40
41# Similarly for our function that gets the output signatures
42SSTATEPOSTUNPACKFUNCS:append = " buildhistory_emit_outputsigs"
43sstate_installpkgdir[vardepsexclude] += "buildhistory_emit_outputsigs"
44SSTATEPOSTUNPACKFUNCS[vardepvalueexclude] .= "| buildhistory_emit_outputsigs"
45
46# All items except those listed here will be removed from a recipe's
47# build history directory by buildhistory_emit_pkghistory(). This is
48# necessary because some of these items (package directories, files that
49# we no longer emit) might be obsolete.
50#
51# The files listed here are either written by tasks that aren't do_package (e.g.
52# latest_srcrev from do_fetch) so do_package must not remove them, or, they're
53# used to read values in do_package before always being overwritten, e.g. latest,
54# for version backwards checks.
55BUILDHISTORY_PRESERVE = "latest latest_srcrev sysroot"
56
57PATCH_GIT_USER_EMAIL ?= "buildhistory@oe"
58PATCH_GIT_USER_NAME ?= "OpenEmbedded"
59
60#
61# Write out the contents of the sysroot
62#
63buildhistory_emit_sysroot() {
64 mkdir --parents ${BUILDHISTORY_DIR_PACKAGE}
65 case ${CLASSOVERRIDE} in
66 class-native|class-cross|class-crosssdk)
67 BASE=${SYSROOT_DESTDIR}/${STAGING_DIR_NATIVE}
68 ;;
69 *)
70 BASE=${SYSROOT_DESTDIR}
71 ;;
72 esac
73 buildhistory_list_files_no_owners $BASE ${BUILDHISTORY_DIR_PACKAGE}/sysroot
74}
75
76#
77# Write out metadata about this package for comparison when writing future packages
78#
79python buildhistory_emit_pkghistory() {
80 import re
81 import json
82 import shlex
83 import errno
84 import shutil
85
86 if not "package" in (d.getVar('BUILDHISTORY_FEATURES') or "").split():
87 return 0
88
89 pkghistdir = d.getVar('BUILDHISTORY_DIR_PACKAGE')
90
91 class RecipeInfo:
92 def __init__(self, name):
93 self.name = name
94 self.pe = "0"
95 self.pv = "0"
96 self.pr = "r0"
97 self.depends = ""
98 self.packages = ""
99 self.srcrev = ""
100 self.layer = ""
101 self.license = ""
102 self.config = ""
103 self.src_uri = ""
104
105
106 class PackageInfo:
107 def __init__(self, name):
108 self.name = name
109 self.pe = "0"
110 self.pv = "0"
111 self.pr = "r0"
112 # pkg/pkge/pkgv/pkgr should be empty because we want to be able to default them
113 self.pkg = ""
114 self.pkge = ""
115 self.pkgv = ""
116 self.pkgr = ""
117 self.size = 0
118 self.depends = ""
119 self.rprovides = ""
120 self.rdepends = ""
121 self.rrecommends = ""
122 self.rsuggests = ""
123 self.rreplaces = ""
124 self.rconflicts = ""
125 self.files = ""
126 self.filelist = ""
127 # Variables that need to be written to their own separate file
128 self.filevars = dict.fromkeys(['pkg_preinst', 'pkg_postinst', 'pkg_prerm', 'pkg_postrm'])
129
130 # Should check PACKAGES here to see if anything was removed
131
132 def readPackageInfo(pkg, histfile):
133 pkginfo = PackageInfo(pkg)
134 with open(histfile, "r") as f:
135 for line in f:
136 lns = line.split('=', 1)
137 name = lns[0].strip()
138 value = lns[1].strip(" \t\r\n").strip('"')
139 if name == "PE":
140 pkginfo.pe = value
141 elif name == "PV":
142 pkginfo.pv = value
143 elif name == "PR":
144 pkginfo.pr = value
145 elif name == "PKG":
146 pkginfo.pkg = value
147 elif name == "PKGE":
148 pkginfo.pkge = value
149 elif name == "PKGV":
150 pkginfo.pkgv = value
151 elif name == "PKGR":
152 pkginfo.pkgr = value
153 elif name == "RPROVIDES":
154 pkginfo.rprovides = value
155 elif name == "RDEPENDS":
156 pkginfo.rdepends = value
157 elif name == "RRECOMMENDS":
158 pkginfo.rrecommends = value
159 elif name == "RSUGGESTS":
160 pkginfo.rsuggests = value
161 elif name == "RREPLACES":
162 pkginfo.rreplaces = value
163 elif name == "RCONFLICTS":
164 pkginfo.rconflicts = value
165 elif name == "PKGSIZE":
166 pkginfo.size = int(value)
167 elif name == "FILES":
168 pkginfo.files = value
169 elif name == "FILELIST":
170 pkginfo.filelist = value
171 # Apply defaults
172 if not pkginfo.pkg:
173 pkginfo.pkg = pkginfo.name
174 if not pkginfo.pkge:
175 pkginfo.pkge = pkginfo.pe
176 if not pkginfo.pkgv:
177 pkginfo.pkgv = pkginfo.pv
178 if not pkginfo.pkgr:
179 pkginfo.pkgr = pkginfo.pr
180 return pkginfo
181
182 def getlastpkgversion(pkg):
183 try:
184 histfile = os.path.join(pkghistdir, pkg, "latest")
185 return readPackageInfo(pkg, histfile)
186 except EnvironmentError:
187 return None
188
189 def sortpkglist(string):
190 pkgiter = re.finditer(r'[a-zA-Z0-9.+-]+( \([><=]+[^)]+\))?', string, 0)
191 pkglist = [p.group(0) for p in pkgiter]
192 pkglist.sort()
193 return ' '.join(pkglist)
194
195 def sortlist(string):
196 items = string.split(' ')
197 items.sort()
198 return ' '.join(items)
199
200 pn = d.getVar('PN')
201 pe = d.getVar('PE') or "0"
202 pv = d.getVar('PV')
203 pr = d.getVar('PR')
204 layer = bb.utils.get_file_layer(d.getVar('FILE'), d)
205 license = d.getVar('LICENSE')
206
207 pkgdata_dir = d.getVar('PKGDATA_DIR')
208 packages = ""
209 try:
210 with open(os.path.join(pkgdata_dir, pn)) as f:
211 for line in f.readlines():
212 if line.startswith('PACKAGES: '):
213 packages = oe.utils.squashspaces(line.split(': ', 1)[1])
214 break
215 except IOError as e:
216 if e.errno == errno.ENOENT:
217 # Probably a -cross recipe, just ignore
218 return 0
219 else:
220 raise
221
222 packagelist = packages.split()
223 preserve = d.getVar('BUILDHISTORY_PRESERVE').split()
224 if not os.path.exists(pkghistdir):
225 bb.utils.mkdirhier(pkghistdir)
226 else:
227 # Remove files for packages that no longer exist
228 for item in os.listdir(pkghistdir):
229 if item not in preserve:
230 if item not in packagelist:
231 itempath = os.path.join(pkghistdir, item)
232 if os.path.isdir(itempath):
233 for subfile in os.listdir(itempath):
234 os.unlink(os.path.join(itempath, subfile))
235 os.rmdir(itempath)
236 else:
237 os.unlink(itempath)
238
239 rcpinfo = RecipeInfo(pn)
240 rcpinfo.pe = pe
241 rcpinfo.pv = pv
242 rcpinfo.pr = pr
243 rcpinfo.depends = sortlist(oe.utils.squashspaces(d.getVar('DEPENDS') or ""))
244 rcpinfo.packages = packages
245 rcpinfo.layer = layer
246 rcpinfo.license = license
247 rcpinfo.config = sortlist(oe.utils.squashspaces(d.getVar('PACKAGECONFIG') or ""))
248 rcpinfo.src_uri = oe.utils.squashspaces(d.getVar('SRC_URI') or "")
249 write_recipehistory(rcpinfo, d)
250
251 bb.build.exec_func("read_subpackage_metadata", d)
252
253 for pkg in packagelist:
254 localdata = d.createCopy()
255 localdata.setVar('OVERRIDES', d.getVar("OVERRIDES", False) + ":" + pkg)
256
257 pkge = localdata.getVar("PKGE") or '0'
258 pkgv = localdata.getVar("PKGV")
259 pkgr = localdata.getVar("PKGR")
260 #
261 # Find out what the last version was
262 # Make sure the version did not decrease
263 #
264 lastversion = getlastpkgversion(pkg)
265 if lastversion:
266 last_pkge = lastversion.pkge
267 last_pkgv = lastversion.pkgv
268 last_pkgr = lastversion.pkgr
269 r = bb.utils.vercmp((pkge, pkgv, pkgr), (last_pkge, last_pkgv, last_pkgr))
270 if r < 0:
271 msg = "Package version for package %s went backwards which would break package feeds (from %s:%s-%s to %s:%s-%s)" % (pkg, last_pkge, last_pkgv, last_pkgr, pkge, pkgv, pkgr)
272 oe.qa.handle_error("version-going-backwards", msg, d)
273
274 pkginfo = PackageInfo(pkg)
275 # Apparently the version can be different on a per-package basis (see Python)
276 pkginfo.pe = localdata.getVar("PE") or '0'
277 pkginfo.pv = localdata.getVar("PV")
278 pkginfo.pr = localdata.getVar("PR")
279 pkginfo.pkg = localdata.getVar("PKG")
280 pkginfo.pkge = pkge
281 pkginfo.pkgv = pkgv
282 pkginfo.pkgr = pkgr
283 pkginfo.rprovides = sortpkglist(oe.utils.squashspaces(localdata.getVar("RPROVIDES") or ""))
284 pkginfo.rdepends = sortpkglist(oe.utils.squashspaces(localdata.getVar("RDEPENDS") or ""))
285 pkginfo.rrecommends = sortpkglist(oe.utils.squashspaces(localdata.getVar("RRECOMMENDS") or ""))
286 pkginfo.rsuggests = sortpkglist(oe.utils.squashspaces(localdata.getVar("RSUGGESTS") or ""))
287 pkginfo.replaces = sortpkglist(oe.utils.squashspaces(localdata.getVar("RREPLACES") or ""))
288 pkginfo.rconflicts = sortpkglist(oe.utils.squashspaces(localdata.getVar("RCONFLICTS") or ""))
289 pkginfo.files = oe.utils.squashspaces(localdata.getVar("FILES") or "")
290 for filevar in pkginfo.filevars:
291 pkginfo.filevars[filevar] = localdata.getVar(filevar) or ""
292
293 # Gather information about packaged files
294 val = localdata.getVar('FILES_INFO') or ''
295 dictval = json.loads(val)
296 filelist = list(dictval.keys())
297 filelist.sort()
298 pkginfo.filelist = " ".join([shlex.quote(x) for x in filelist])
299
300 pkginfo.size = int(localdata.getVar('PKGSIZE') or '0')
301
302 write_pkghistory(pkginfo, d)
303
304 oe.qa.exit_if_errors(d)
305}
306
307python buildhistory_emit_outputsigs() {
308 if not "task" in (d.getVar('BUILDHISTORY_FEATURES') or "").split():
309 return
310
311 import hashlib
312
313 taskoutdir = os.path.join(d.getVar('BUILDHISTORY_DIR'), 'task', 'output')
314 bb.utils.mkdirhier(taskoutdir)
315 currenttask = d.getVar('BB_CURRENTTASK')
316 pn = d.getVar('PN')
317 taskfile = os.path.join(taskoutdir, '%s.%s' % (pn, currenttask))
318
319 cwd = os.getcwd()
320 filesigs = {}
321 for root, _, files in os.walk(cwd):
322 for fname in files:
323 if fname == 'fixmepath':
324 continue
325 fullpath = os.path.join(root, fname)
326 try:
327 if os.path.islink(fullpath):
328 sha256 = hashlib.sha256(os.readlink(fullpath).encode('utf-8')).hexdigest()
329 elif os.path.isfile(fullpath):
330 sha256 = bb.utils.sha256_file(fullpath)
331 else:
332 continue
333 except OSError:
334 bb.warn('buildhistory: unable to read %s to get output signature' % fullpath)
335 continue
336 filesigs[os.path.relpath(fullpath, cwd)] = sha256
337 with open(taskfile, 'w') as f:
338 for fpath, fsig in sorted(filesigs.items(), key=lambda item: item[0]):
339 f.write('%s %s\n' % (fpath, fsig))
340}
341
342
343def write_recipehistory(rcpinfo, d):
344 bb.debug(2, "Writing recipe history")
345
346 pkghistdir = d.getVar('BUILDHISTORY_DIR_PACKAGE')
347
348 infofile = os.path.join(pkghistdir, "latest")
349 with open(infofile, "w") as f:
350 if rcpinfo.pe != "0":
351 f.write(u"PE = %s\n" % rcpinfo.pe)
352 f.write(u"PV = %s\n" % rcpinfo.pv)
353 f.write(u"PR = %s\n" % rcpinfo.pr)
354 f.write(u"DEPENDS = %s\n" % rcpinfo.depends)
355 f.write(u"PACKAGES = %s\n" % rcpinfo.packages)
356 f.write(u"LAYER = %s\n" % rcpinfo.layer)
357 f.write(u"LICENSE = %s\n" % rcpinfo.license)
358 f.write(u"CONFIG = %s\n" % rcpinfo.config)
359 f.write(u"SRC_URI = %s\n" % rcpinfo.src_uri)
360
361 write_latest_srcrev(d, pkghistdir)
362
363def write_pkghistory(pkginfo, d):
364 bb.debug(2, "Writing package history for package %s" % pkginfo.name)
365
366 pkghistdir = d.getVar('BUILDHISTORY_DIR_PACKAGE')
367
368 pkgpath = os.path.join(pkghistdir, pkginfo.name)
369 if not os.path.exists(pkgpath):
370 bb.utils.mkdirhier(pkgpath)
371
372 infofile = os.path.join(pkgpath, "latest")
373 with open(infofile, "w") as f:
374 if pkginfo.pe != "0":
375 f.write(u"PE = %s\n" % pkginfo.pe)
376 f.write(u"PV = %s\n" % pkginfo.pv)
377 f.write(u"PR = %s\n" % pkginfo.pr)
378
379 if pkginfo.pkg != pkginfo.name:
380 f.write(u"PKG = %s\n" % pkginfo.pkg)
381 if pkginfo.pkge != pkginfo.pe:
382 f.write(u"PKGE = %s\n" % pkginfo.pkge)
383 if pkginfo.pkgv != pkginfo.pv:
384 f.write(u"PKGV = %s\n" % pkginfo.pkgv)
385 if pkginfo.pkgr != pkginfo.pr:
386 f.write(u"PKGR = %s\n" % pkginfo.pkgr)
387 f.write(u"RPROVIDES = %s\n" % pkginfo.rprovides)
388 f.write(u"RDEPENDS = %s\n" % pkginfo.rdepends)
389 f.write(u"RRECOMMENDS = %s\n" % pkginfo.rrecommends)
390 if pkginfo.rsuggests:
391 f.write(u"RSUGGESTS = %s\n" % pkginfo.rsuggests)
392 if pkginfo.rreplaces:
393 f.write(u"RREPLACES = %s\n" % pkginfo.rreplaces)
394 if pkginfo.rconflicts:
395 f.write(u"RCONFLICTS = %s\n" % pkginfo.rconflicts)
396 f.write(u"PKGSIZE = %d\n" % pkginfo.size)
397 f.write(u"FILES = %s\n" % pkginfo.files)
398 f.write(u"FILELIST = %s\n" % pkginfo.filelist)
399
400 for filevar in pkginfo.filevars:
401 filevarpath = os.path.join(pkgpath, "latest.%s" % filevar)
402 val = pkginfo.filevars[filevar]
403 if val:
404 with open(filevarpath, "w") as f:
405 f.write(val)
406 else:
407 if os.path.exists(filevarpath):
408 os.unlink(filevarpath)
409
410#
411# rootfs_type can be: image, sdk_target, sdk_host
412#
413def buildhistory_list_installed(d, rootfs_type="image"):
414 from oe.rootfs import image_list_installed_packages
415 from oe.sdk import sdk_list_installed_packages
416 from oe.utils import format_pkg_list
417
418 process_list = [('file', 'bh_installed_pkgs_%s.txt' % os.getpid()),\
419 ('deps', 'bh_installed_pkgs_deps_%s.txt' % os.getpid())]
420
421 if rootfs_type == "image":
422 pkgs = image_list_installed_packages(d)
423 else:
424 pkgs = sdk_list_installed_packages(d, rootfs_type == "sdk_target")
425
426 if rootfs_type == "sdk_host":
427 pkgdata_dir = d.getVar('PKGDATA_DIR_SDK')
428 else:
429 pkgdata_dir = d.getVar('PKGDATA_DIR')
430
431 for output_type, output_file in process_list:
432 output_file_full = os.path.join(d.getVar('WORKDIR'), output_file)
433
434 with open(output_file_full, 'w') as output:
435 output.write(format_pkg_list(pkgs, output_type, pkgdata_dir))
436
437python buildhistory_list_installed_image() {
438 buildhistory_list_installed(d)
439}
440
441python buildhistory_list_installed_sdk_target() {
442 buildhistory_list_installed(d, "sdk_target")
443}
444
445python buildhistory_list_installed_sdk_host() {
446 buildhistory_list_installed(d, "sdk_host")
447}
448
449buildhistory_get_installed() {
450 mkdir -p $1
451
452 # Get list of installed packages
453 pkgcache="$1/installed-packages.tmp"
454 cat ${WORKDIR}/bh_installed_pkgs_${PID}.txt | sort > $pkgcache && rm ${WORKDIR}/bh_installed_pkgs_${PID}.txt
455
456 cat $pkgcache | awk '{ print $1 }' > $1/installed-package-names.txt
457
458 if [ -s $pkgcache ] ; then
459 cat $pkgcache | awk '{ print $2 }' | xargs -n1 basename > $1/installed-packages.txt
460 else
461 printf "" > $1/installed-packages.txt
462 fi
463
464 # Produce dependency graph
465 # First, quote each name to handle characters that cause issues for dot
466 sed 's:\([^| ]*\):"\1":g' ${WORKDIR}/bh_installed_pkgs_deps_${PID}.txt > $1/depends.tmp &&
467 rm ${WORKDIR}/bh_installed_pkgs_deps_${PID}.txt
468 # Remove lines with rpmlib(...) and config(...) dependencies, change the
469 # delimiter from pipe to "->", set the style for recommend lines and
470 # turn versioned dependencies into edge labels.
471 sed -i -e '/rpmlib(/d' \
472 -e '/config(/d' \
473 -e 's:|: -> :' \
474 -e 's:"\[REC\]":[style=dotted]:' \
475 -e 's:"\([<>=]\+\)" "\([^"]*\)":[label="\1 \2"]:' \
476 -e 's:"\([*]\+\)" "\([^"]*\)":[label="\2"]:' \
477 -e 's:"\[RPROVIDES\]":[style=dashed]:' \
478 $1/depends.tmp
479 # Add header, sorted and de-duped contents and footer and then delete the temp file
480 printf "digraph depends {\n node [shape=plaintext]\n" > $1/depends.dot
481 cat $1/depends.tmp | sort -u >> $1/depends.dot
482 echo "}" >> $1/depends.dot
483 rm $1/depends.tmp
484
485 # Set correct pkgdatadir
486 pkgdatadir=${PKGDATA_DIR}
487 if [ "$2" = "sdk" ] && [ "$3" = "host" ] ; then
488 pkgdatadir="${PKGDATA_DIR_SDK}"
489 fi
490
491 # Produce installed package sizes list
492 oe-pkgdata-util -p $pkgdatadir read-value "PKGSIZE" -n -f $pkgcache > $1/installed-package-sizes.tmp
493 cat $1/installed-package-sizes.tmp | awk '{print $2 "\tKiB\t" $1}' | sort -n -r > $1/installed-package-sizes.txt
494 rm $1/installed-package-sizes.tmp
495
496 # Produce package info: runtime_name, buildtime_name, recipe, version, size
497 oe-pkgdata-util -p $pkgdatadir read-value "PACKAGE,PN,PV,PKGSIZE" -n -f $pkgcache > $1/installed-package-info.tmp
498 cat $1/installed-package-info.tmp | sort -n -r -k 5 > $1/installed-package-info.txt
499 rm $1/installed-package-info.tmp
500
501 # We're now done with the cache, delete it
502 rm $pkgcache
503
504 if [ "$2" != "sdk" ] ; then
505 # Produce some cut-down graphs (for readability)
506 grep -v kernel-image $1/depends.dot | grep -v kernel-3 | grep -v kernel-4 > $1/depends-nokernel.dot
507 grep -v libc6 $1/depends-nokernel.dot | grep -v libgcc > $1/depends-nokernel-nolibc.dot
508 grep -v update- $1/depends-nokernel-nolibc.dot > $1/depends-nokernel-nolibc-noupdate.dot
509 grep -v kernel-module $1/depends-nokernel-nolibc-noupdate.dot > $1/depends-nokernel-nolibc-noupdate-nomodules.dot
510 fi
511
512 # Add complementary package information
513 if [ -e ${WORKDIR}/complementary_pkgs.txt ]; then
514 cp ${WORKDIR}/complementary_pkgs.txt $1
515 fi
516}
517
518buildhistory_get_image_installed() {
519 # Anything requiring the use of the packaging system should be done in here
520 # in case the packaging files are going to be removed for this image
521
522 if [ "${@bb.utils.contains('BUILDHISTORY_FEATURES', 'image', '1', '0', d)}" = "0" ] ; then
523 return
524 fi
525
526 buildhistory_get_installed ${BUILDHISTORY_DIR_IMAGE}
527}
528
529buildhistory_get_sdk_installed() {
530 # Anything requiring the use of the packaging system should be done in here
531 # in case the packaging files are going to be removed for this SDK
532
533 if [ "${@bb.utils.contains('BUILDHISTORY_FEATURES', 'sdk', '1', '0', d)}" = "0" ] ; then
534 return
535 fi
536
537 buildhistory_get_installed ${BUILDHISTORY_DIR_SDK}/$1 sdk $1
538}
539
540buildhistory_get_sdk_installed_host() {
541 buildhistory_get_sdk_installed host
542}
543
544buildhistory_get_sdk_installed_target() {
545 buildhistory_get_sdk_installed target
546}
547
548buildhistory_list_files() {
549 # List the files in the specified directory, but exclude date/time etc.
550 # This is somewhat messy, but handles cases where the size is not printed for device files under pseudo
551 ( cd $1
552 find_cmd='find . ! -path . -printf "%M %-10u %-10g %10s %p -> %l\n"'
553 if [ "$3" = "fakeroot" ] ; then
554 eval ${FAKEROOTENV} ${FAKEROOTCMD} $find_cmd
555 else
556 eval $find_cmd
557 fi | sort -k5 | sed 's/ * -> $//' > $2 )
558}
559
560buildhistory_list_files_no_owners() {
561 # List the files in the specified directory, but exclude date/time etc.
562 # Also don't output the ownership data, but instead output just - - so
563 # that the same parsing code as for _list_files works.
564 # This is somewhat messy, but handles cases where the size is not printed for device files under pseudo
565 ( cd $1
566 find_cmd='find . ! -path . -printf "%M - - %10s %p -> %l\n"'
567 if [ "$3" = "fakeroot" ] ; then
568 eval ${FAKEROOTENV} ${FAKEROOTCMD} "$find_cmd"
569 else
570 eval "$find_cmd"
571 fi | sort -k5 | sed 's/ * -> $//' > $2 )
572}
573
574buildhistory_list_pkg_files() {
575 if [ "${@bb.utils.contains('BUILDHISTORY_FEATURES', 'package', '1', '0', d)}" = "0" ] ; then
576 return
577 fi
578
579 # Create individual files-in-package for each recipe's package
580 pkgdirlist=$(find ${PKGDEST}/* -maxdepth 0 -type d)
581 for pkgdir in $pkgdirlist; do
582 pkgname=$(basename $pkgdir)
583 outfolder="${BUILDHISTORY_DIR_PACKAGE}/$pkgname"
584 outfile="$outfolder/files-in-package.txt"
585 mkdir -p $outfolder
586 buildhistory_list_files $pkgdir $outfile fakeroot
587 done
588}
589
590buildhistory_get_imageinfo() {
591 if [ "${@bb.utils.contains('BUILDHISTORY_FEATURES', 'image', '1', '0', d)}" = "0" ] ; then
592 return
593 fi
594
595 mkdir -p ${BUILDHISTORY_DIR_IMAGE}
596 buildhistory_list_files ${IMAGE_ROOTFS} ${BUILDHISTORY_DIR_IMAGE}/files-in-image.txt
597
598 # Collect files requested in BUILDHISTORY_IMAGE_FILES
599 rm -rf ${BUILDHISTORY_DIR_IMAGE}/image-files
600 for f in ${BUILDHISTORY_IMAGE_FILES}; do
601 if [ -f ${IMAGE_ROOTFS}/$f ] ; then
602 mkdir -p ${BUILDHISTORY_DIR_IMAGE}/image-files/`dirname $f`
603 cp ${IMAGE_ROOTFS}/$f ${BUILDHISTORY_DIR_IMAGE}/image-files/$f
604 fi
605 done
606
607 # Record some machine-readable meta-information about the image
608 printf "" > ${BUILDHISTORY_DIR_IMAGE}/image-info.txt
609 cat >> ${BUILDHISTORY_DIR_IMAGE}/image-info.txt <<END
610${@buildhistory_get_imagevars(d)}
611END
612 imagesize=`du -ks ${IMAGE_ROOTFS} | awk '{ print $1 }'`
613 echo "IMAGESIZE = $imagesize" >> ${BUILDHISTORY_DIR_IMAGE}/image-info.txt
614
615 # Add some configuration information
616 echo "${MACHINE}: ${IMAGE_BASENAME} configured for ${DISTRO} ${DISTRO_VERSION}" > ${BUILDHISTORY_DIR_IMAGE}/build-id.txt
617
618 cat >> ${BUILDHISTORY_DIR_IMAGE}/build-id.txt <<END
619${@buildhistory_get_build_id(d)}
620END
621}
622
623buildhistory_get_sdkinfo() {
624 if [ "${@bb.utils.contains('BUILDHISTORY_FEATURES', 'sdk', '1', '0', d)}" = "0" ] ; then
625 return
626 fi
627
628 buildhistory_list_files ${SDK_OUTPUT} ${BUILDHISTORY_DIR_SDK}/files-in-sdk.txt
629
630 # Collect files requested in BUILDHISTORY_SDK_FILES
631 rm -rf ${BUILDHISTORY_DIR_SDK}/sdk-files
632 for f in ${BUILDHISTORY_SDK_FILES}; do
633 if [ -f ${SDK_OUTPUT}/${SDKPATH}/$f ] ; then
634 mkdir -p ${BUILDHISTORY_DIR_SDK}/sdk-files/`dirname $f`
635 cp ${SDK_OUTPUT}/${SDKPATH}/$f ${BUILDHISTORY_DIR_SDK}/sdk-files/$f
636 fi
637 done
638
639 # Record some machine-readable meta-information about the SDK
640 printf "" > ${BUILDHISTORY_DIR_SDK}/sdk-info.txt
641 cat >> ${BUILDHISTORY_DIR_SDK}/sdk-info.txt <<END
642${@buildhistory_get_sdkvars(d)}
643END
644 sdksize=`du -ks ${SDK_OUTPUT} | awk '{ print $1 }'`
645 echo "SDKSIZE = $sdksize" >> ${BUILDHISTORY_DIR_SDK}/sdk-info.txt
646}
647
648python buildhistory_get_extra_sdkinfo() {
649 import operator
650 from oe.sdk import get_extra_sdkinfo
651
652 sstate_dir = d.expand('${SDK_OUTPUT}/${SDKPATH}/sstate-cache')
653 extra_info = get_extra_sdkinfo(sstate_dir)
654
655 if d.getVar('BB_CURRENTTASK') == 'populate_sdk_ext' and \
656 "sdk" in (d.getVar('BUILDHISTORY_FEATURES') or "").split():
657 with open(d.expand('${BUILDHISTORY_DIR_SDK}/sstate-package-sizes.txt'), 'w') as f:
658 filesizes_sorted = sorted(extra_info['filesizes'].items(), key=operator.itemgetter(1, 0), reverse=True)
659 for fn, size in filesizes_sorted:
660 f.write('%10d KiB %s\n' % (size, fn))
661 with open(d.expand('${BUILDHISTORY_DIR_SDK}/sstate-task-sizes.txt'), 'w') as f:
662 tasksizes_sorted = sorted(extra_info['tasksizes'].items(), key=operator.itemgetter(1, 0), reverse=True)
663 for task, size in tasksizes_sorted:
664 f.write('%10d KiB %s\n' % (size, task))
665}
666
667# By using ROOTFS_POSTUNINSTALL_COMMAND we get in after uninstallation of
668# unneeded packages but before the removal of packaging files
669ROOTFS_POSTUNINSTALL_COMMAND += "buildhistory_list_installed_image"
670ROOTFS_POSTUNINSTALL_COMMAND += "buildhistory_get_image_installed"
671ROOTFS_POSTUNINSTALL_COMMAND[vardepvalueexclude] .= "| buildhistory_list_installed_image| buildhistory_get_image_installed"
672ROOTFS_POSTUNINSTALL_COMMAND[vardepsexclude] += "buildhistory_list_installed_image buildhistory_get_image_installed"
673
674IMAGE_POSTPROCESS_COMMAND += "buildhistory_get_imageinfo"
675IMAGE_POSTPROCESS_COMMAND[vardepvalueexclude] .= "| buildhistory_get_imageinfo"
676IMAGE_POSTPROCESS_COMMAND[vardepsexclude] += "buildhistory_get_imageinfo"
677
678# We want these to be the last run so that we get called after complementary package installation
679POPULATE_SDK_POST_TARGET_COMMAND:append = " buildhistory_list_installed_sdk_target"
680POPULATE_SDK_POST_TARGET_COMMAND:append = " buildhistory_get_sdk_installed_target"
681POPULATE_SDK_POST_TARGET_COMMAND[vardepvalueexclude] .= "| buildhistory_list_installed_sdk_target| buildhistory_get_sdk_installed_target"
682POPULATE_SDK_POST_TARGET_COMMAND[vardepsexclude] += "buildhistory_list_installed_sdk_target buildhistory_get_sdk_installed_target"
683
684POPULATE_SDK_POST_HOST_COMMAND:append = " buildhistory_list_installed_sdk_host"
685POPULATE_SDK_POST_HOST_COMMAND:append = " buildhistory_get_sdk_installed_host"
686POPULATE_SDK_POST_HOST_COMMAND[vardepvalueexclude] .= "| buildhistory_list_installed_sdk_host| buildhistory_get_sdk_installed_host"
687POPULATE_SDK_POST_HOST_COMMAND[vardepsexclude] += "buildhistory_list_installed_sdk_host buildhistory_get_sdk_installed_host"
688
689SDK_POSTPROCESS_COMMAND:append = " buildhistory_get_sdkinfo buildhistory_get_extra_sdkinfo"
690SDK_POSTPROCESS_COMMAND[vardepvalueexclude] .= "| buildhistory_get_sdkinfo buildhistory_get_extra_sdkinfo"
691SDK_POSTPROCESS_COMMAND[vardepsexclude] += "buildhistory_get_sdkinfo buildhistory_get_extra_sdkinfo"
692
693python buildhistory_write_sigs() {
694 if not "task" in (d.getVar('BUILDHISTORY_FEATURES') or "").split():
695 return
696
697 # Create sigs file
698 if hasattr(bb.parse.siggen, 'dump_siglist'):
699 taskoutdir = os.path.join(d.getVar('BUILDHISTORY_DIR'), 'task')
700 bb.utils.mkdirhier(taskoutdir)
701 bb.parse.siggen.dump_siglist(os.path.join(taskoutdir, 'tasksigs.txt'), d.getVar("BUILDHISTORY_PATH_PREFIX_STRIP"))
702}
703
704def buildhistory_get_build_id(d):
705 if d.getVar('BB_WORKERCONTEXT') != '1':
706 return ""
707 localdata = bb.data.createCopy(d)
708 statuslines = []
709 for func in oe.data.typed_value('BUILDCFG_FUNCS', localdata):
710 g = globals()
711 if func not in g:
712 bb.warn("Build configuration function '%s' does not exist" % func)
713 else:
714 flines = g[func](localdata)
715 if flines:
716 statuslines.extend(flines)
717
718 statusheader = d.getVar('BUILDCFG_HEADER')
719 return('\n%s\n%s\n' % (statusheader, '\n'.join(statuslines)))
720
721def buildhistory_get_metadata_revs(d):
722 # We want an easily machine-readable format here
723 revisions = oe.buildcfg.get_layer_revisions(d)
724 medadata_revs = ["%-17s = %s:%s%s" % (r[1], r[2], r[3], r[4]) for r in revisions]
725 return '\n'.join(medadata_revs)
726
727def outputvars(vars, listvars, d):
728 vars = vars.split()
729 listvars = listvars.split()
730 ret = ""
731 for var in vars:
732 value = d.getVar(var) or ""
733 if var in listvars:
734 # Squash out spaces
735 value = oe.utils.squashspaces(value)
736 ret += "%s = %s\n" % (var, value)
737 return ret.rstrip('\n')
738
739def buildhistory_get_imagevars(d):
740 if d.getVar('BB_WORKERCONTEXT') != '1':
741 return ""
742 imagevars = "DISTRO DISTRO_VERSION USER_CLASSES IMAGE_CLASSES IMAGE_FEATURES IMAGE_LINGUAS IMAGE_INSTALL BAD_RECOMMENDATIONS NO_RECOMMENDATIONS PACKAGE_EXCLUDE ROOTFS_POSTPROCESS_COMMAND IMAGE_POSTPROCESS_COMMAND"
743 listvars = "USER_CLASSES IMAGE_CLASSES IMAGE_FEATURES IMAGE_LINGUAS IMAGE_INSTALL BAD_RECOMMENDATIONS PACKAGE_EXCLUDE"
744 return outputvars(imagevars, listvars, d)
745
746def buildhistory_get_sdkvars(d):
747 if d.getVar('BB_WORKERCONTEXT') != '1':
748 return ""
749 sdkvars = "DISTRO DISTRO_VERSION SDK_NAME SDK_VERSION SDKMACHINE SDKIMAGE_FEATURES TOOLCHAIN_HOST_TASK TOOLCHAIN_TARGET_TASK BAD_RECOMMENDATIONS NO_RECOMMENDATIONS PACKAGE_EXCLUDE"
750 if d.getVar('BB_CURRENTTASK') == 'populate_sdk_ext':
751 # Extensible SDK uses some additional variables
752 sdkvars += " ESDK_LOCALCONF_ALLOW ESDK_LOCALCONF_REMOVE ESDK_CLASS_INHERIT_DISABLE SDK_UPDATE_URL SDK_EXT_TYPE SDK_RECRDEP_TASKS SDK_INCLUDE_PKGDATA SDK_INCLUDE_TOOLCHAIN"
753 listvars = "SDKIMAGE_FEATURES BAD_RECOMMENDATIONS PACKAGE_EXCLUDE ESDK_LOCALCONF_ALLOW ESDK_LOCALCONF_REMOVE ESDK_CLASS_INHERIT_DISABLE"
754 return outputvars(sdkvars, listvars, d)
755
756
757def buildhistory_get_cmdline(d):
758 argv = d.getVar('BB_CMDLINE', False)
759 if argv:
760 if argv[0].endswith('bin/bitbake'):
761 bincmd = 'bitbake'
762 else:
763 bincmd = argv[0]
764 return '%s %s' % (bincmd, ' '.join(argv[1:]))
765 return ''
766
767
768buildhistory_single_commit() {
769 if [ "$3" = "" ] ; then
770 commitopts="${BUILDHISTORY_DIR}/ --allow-empty"
771 shortlogprefix="No changes: "
772 else
773 commitopts=""
774 shortlogprefix=""
775 fi
776 if [ "${BUILDHISTORY_BUILD_FAILURES}" = "0" ] ; then
777 result="succeeded"
778 else
779 result="failed"
780 fi
781 case ${BUILDHISTORY_BUILD_INTERRUPTED} in
782 1)
783 result="$result (interrupted)"
784 ;;
785 2)
786 result="$result (force interrupted)"
787 ;;
788 esac
789 commitmsgfile=`mktemp`
790 cat > $commitmsgfile << END
791${shortlogprefix}Build ${BUILDNAME} of ${DISTRO} ${DISTRO_VERSION} for machine ${MACHINE} on $2
792
793cmd: $1
794
795result: $result
796
797metadata revisions:
798END
799 cat ${BUILDHISTORY_DIR}/metadata-revs >> $commitmsgfile
800 git commit $commitopts -F $commitmsgfile --author "${BUILDHISTORY_COMMIT_AUTHOR}" > /dev/null
801 rm $commitmsgfile
802}
803
804buildhistory_commit() {
805 if [ ! -d ${BUILDHISTORY_DIR} ] ; then
806 # Code above that creates this dir never executed, so there can't be anything to commit
807 return
808 fi
809
810 # Create a machine-readable list of metadata revisions for each layer
811 cat > ${BUILDHISTORY_DIR}/metadata-revs <<END
812${@buildhistory_get_metadata_revs(d)}
813END
814
815 ( cd ${BUILDHISTORY_DIR}/
816 # Initialise the repo if necessary
817 if [ ! -e .git ] ; then
818 git init -q
819 else
820 git tag -f --no-sign ${BUILDHISTORY_TAG}-minus-3 ${BUILDHISTORY_TAG}-minus-2 > /dev/null 2>&1 || true
821 git tag -f --no-sign ${BUILDHISTORY_TAG}-minus-2 ${BUILDHISTORY_TAG}-minus-1 > /dev/null 2>&1 || true
822 git tag -f --no-sign ${BUILDHISTORY_TAG}-minus-1 > /dev/null 2>&1 || true
823 fi
824
825 check_git_config
826
827 # Check if there are new/changed files to commit (other than metadata-revs)
828 repostatus=`git status --porcelain | grep -v " metadata-revs$"`
829 HOSTNAME=`hostname 2>/dev/null || echo unknown`
830 CMDLINE="${@buildhistory_get_cmdline(d)}"
831 if [ "$repostatus" != "" ] ; then
832 git add -A .
833 # Porcelain output looks like "?? packages/foo/bar"
834 # Ensure we commit metadata-revs with the first commit
835 buildhistory_single_commit "$CMDLINE" "$HOSTNAME" dummy
836 else
837 buildhistory_single_commit "$CMDLINE" "$HOSTNAME"
838 fi
839 if [ "${BUILDHISTORY_PUSH_REPO}" != "" ] ; then
840 git push -q ${BUILDHISTORY_PUSH_REPO}
841 fi) || true
842}
843
844python buildhistory_eventhandler() {
845 if (e.data.getVar('BUILDHISTORY_FEATURES') or "").strip():
846 if isinstance(e, bb.event.BuildCompleted):
847 if e.data.getVar("BUILDHISTORY_COMMIT") == "1":
848 bb.note("Writing buildhistory")
849 bb.build.exec_func("buildhistory_write_sigs", d)
850 import time
851 start=time.time()
852 localdata = bb.data.createCopy(e.data)
853 localdata.setVar('BUILDHISTORY_BUILD_FAILURES', str(e._failures))
854 interrupted = getattr(e, '_interrupted', 0)
855 localdata.setVar('BUILDHISTORY_BUILD_INTERRUPTED', str(interrupted))
856 bb.build.exec_func("buildhistory_commit", localdata)
857 stop=time.time()
858 bb.note("Writing buildhistory took: %s seconds" % round(stop-start))
859 else:
860 bb.note("No commit since BUILDHISTORY_COMMIT != '1'")
861}
862
863addhandler buildhistory_eventhandler
864buildhistory_eventhandler[eventmask] = "bb.event.BuildCompleted bb.event.BuildStarted"
865
866
867# FIXME this ought to be moved into the fetcher
868def _get_srcrev_values(d):
869 """
870 Return the version strings for the current recipe
871 """
872
873 scms = []
874 fetcher = bb.fetch.Fetch(d.getVar('SRC_URI').split(), d)
875 urldata = fetcher.ud
876 for u in urldata:
877 if urldata[u].method.supports_srcrev():
878 scms.append(u)
879
880 dict_srcrevs = {}
881 dict_tag_srcrevs = {}
882 for scm in scms:
883 ud = urldata[scm]
884 autoinc, rev = ud.method.sortable_revision(ud, d, ud.name)
885 dict_srcrevs[ud.name] = rev
886 if 'tag' in ud.parm:
887 tag = ud.parm['tag'];
888 key = ud.name+'_'+tag
889 dict_tag_srcrevs[key] = rev
890 return (dict_srcrevs, dict_tag_srcrevs)
891
892do_fetch[postfuncs] += "write_srcrev"
893do_fetch[vardepsexclude] += "write_srcrev"
894python write_srcrev() {
895 write_latest_srcrev(d, d.getVar('BUILDHISTORY_DIR_PACKAGE'))
896}
897
898def write_latest_srcrev(d, pkghistdir):
899 srcrevfile = os.path.join(pkghistdir, 'latest_srcrev')
900
901 srcrevs, tag_srcrevs = _get_srcrev_values(d)
902 if srcrevs:
903 if not os.path.exists(pkghistdir):
904 bb.utils.mkdirhier(pkghistdir)
905 old_tag_srcrevs = {}
906 if os.path.exists(srcrevfile):
907 with open(srcrevfile) as f:
908 for line in f:
909 if line.startswith('# tag_'):
910 key, value = line.split("=", 1)
911 key = key.replace('# tag_', '').strip()
912 value = value.replace('"', '').strip()
913 old_tag_srcrevs[key] = value
914 with open(srcrevfile, 'w') as f:
915 for name, srcrev in sorted(srcrevs.items()):
916 suffix = "_" + name
917 if name == "default":
918 suffix = ""
919 orig_srcrev = d.getVar('SRCREV%s' % suffix, False)
920 if orig_srcrev:
921 f.write('# SRCREV%s = "%s"\n' % (suffix, orig_srcrev))
922 f.write('SRCREV%s = "%s"\n' % (suffix, srcrev))
923 for name, srcrev in sorted(tag_srcrevs.items()):
924 f.write('# tag_%s = "%s"\n' % (name, srcrev))
925 if name in old_tag_srcrevs and old_tag_srcrevs[name] != srcrev:
926 pkg = d.getVar('PN')
927 bb.warn("Revision for tag %s in package %s was changed since last build (from %s to %s)" % (name, pkg, old_tag_srcrevs[name], srcrev))
928
929 else:
930 if os.path.exists(srcrevfile):
931 os.remove(srcrevfile)
932
933do_testimage[postfuncs] += "write_ptest_result"
934do_testimage[vardepsexclude] += "write_ptest_result"
935
936python write_ptest_result() {
937 write_latest_ptest_result(d, d.getVar('BUILDHISTORY_DIR'))
938}
939
940def write_latest_ptest_result(d, histdir):
941 import glob
942 import subprocess
943 test_log_dir = d.getVar('TEST_LOG_DIR')
944 input_ptest = os.path.join(test_log_dir, 'ptest_log')
945 output_ptest = os.path.join(histdir, 'ptest')
946 if os.path.exists(input_ptest):
947 try:
948 # Lock it to avoid race issue
949 lock = bb.utils.lockfile(output_ptest + "/ptest.lock")
950 bb.utils.mkdirhier(output_ptest)
951 oe.path.copytree(input_ptest, output_ptest)
952 # Sort test result
953 for result in glob.glob('%s/pass.fail.*' % output_ptest):
954 bb.debug(1, 'Processing %s' % result)
955 cmd = ['sort', result, '-o', result]
956 bb.debug(1, 'Running %s' % cmd)
957 ret = subprocess.call(cmd)
958 if ret != 0:
959 bb.error('Failed to run %s!' % cmd)
960 finally:
961 bb.utils.unlockfile(lock)
diff --git a/meta/classes/buildstats-summary.bbclass b/meta/classes/buildstats-summary.bbclass
deleted file mode 100644
index 12e8f17836..0000000000
--- a/meta/classes/buildstats-summary.bbclass
+++ /dev/null
@@ -1,46 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# Summarize sstate usage at the end of the build
8python buildstats_summary () {
9 import collections
10 import os.path
11
12 bsdir = e.data.expand("${BUILDSTATS_BASE}/${BUILDNAME}")
13 if not os.path.exists(bsdir):
14 return
15
16 sstatetasks = (e.data.getVar('SSTATETASKS') or '').split()
17 built = collections.defaultdict(lambda: [set(), set()])
18 for pf in os.listdir(bsdir):
19 taskdir = os.path.join(bsdir, pf)
20 if not os.path.isdir(taskdir):
21 continue
22
23 tasks = os.listdir(taskdir)
24 for t in sstatetasks:
25 no_sstate, sstate = built[t]
26 if t in tasks:
27 no_sstate.add(pf)
28 elif t + '_setscene' in tasks:
29 sstate.add(pf)
30
31 header_printed = False
32 for t in sstatetasks:
33 no_sstate, sstate = built[t]
34 if no_sstate | sstate:
35 if not header_printed:
36 header_printed = True
37 bb.note("Build completion summary:")
38
39 sstate_count = len(sstate)
40 no_sstate_count = len(no_sstate)
41 total_count = sstate_count + no_sstate_count
42 bb.note(" {0}: {1:.1f}% sstate reuse({2} setscene, {3} scratch)".format(
43 t, round(100 * sstate_count / total_count, 1), sstate_count, no_sstate_count))
44}
45addhandler buildstats_summary
46buildstats_summary[eventmask] = "bb.event.BuildCompleted"
diff --git a/meta/classes/ccache.bbclass b/meta/classes/ccache.bbclass
deleted file mode 100644
index f6bd972ff4..0000000000
--- a/meta/classes/ccache.bbclass
+++ /dev/null
@@ -1,74 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7#
8# Usage:
9# - Enable ccache
10# Add the following line to a conffile such as conf/local.conf:
11# INHERIT += "ccache"
12#
13# - Disable ccache for a recipe
14# Add the following line to the recipe if it can't be built with ccache:
15# CCACHE_DISABLE = "1"
16#
17# - Share ccache files between different builds
18# Set CCACHE_TOP_DIR to a shared dir
19# CCACHE_TOP_DIR = "/path/to/shared_ccache/"
20#
21# - To debug ccache
22# export CCACHE_DEBUG = "1"
23# export CCACHE_LOGFILE = "${CCACHE_DIR}/logfile.log"
24# And also set PARALLEL_MAKE = "-j 1" to get make the log in order
25#
26# By default this class will only use ccache for target builds, and build
27# our own ccache-native. It is possible to use a host-provided ccache that
28# can then be used by native recipes too by setting:
29# ASSUME_PROVIDED += "ccache-native"
30# HOSTTOOLS += "ccache"
31
32# Set it to a shared location for different builds, so that cache files can
33# be shared between different builds.
34CCACHE_TOP_DIR ?= "${TMPDIR}/ccache"
35
36# ccache-native and cmake-native have a circular dependency
37# that affects other native recipes, but not all.
38# Allows to use ccache in specified native recipes.
39CCACHE_NATIVE_RECIPES_ALLOWED ?= ""
40
41# ccahe removes CCACHE_BASEDIR from file path, so that hashes will be the same
42# in different builds.
43export CCACHE_BASEDIR ?= "${TMPDIR}"
44
45export CCACHE_CONFIGPATH ?= "${COREBASE}/meta/conf/ccache.conf"
46
47export CCACHE_DIR ?= "${CCACHE_TOP_DIR}/${MULTIMACH_TARGET_SYS}/${PN}"
48
49python() {
50 """
51 Enable ccache for the recipe
52 """
53 pn = d.getVar('PN')
54 if (not bb.utils.to_boolean(d.getVar('CCACHE_DISABLE')) and
55 ("ccache" in d.getVar("HOSTTOOLS").split() or
56 pn in d.getVar('CCACHE_NATIVE_RECIPES_ALLOWED') or
57 not bb.data.inherits_class("native", d))):
58 d.appendVar('DEPENDS', ' ccache-native')
59 d.setVar('CCACHE', 'ccache ')
60}
61
62addtask cleanccache after do_clean
63python do_cleanccache() {
64 import shutil
65
66 ccache_dir = d.getVar('CCACHE_DIR')
67 if os.path.exists(ccache_dir):
68 bb.note("Removing %s" % ccache_dir)
69 shutil.rmtree(ccache_dir)
70 else:
71 bb.note("%s doesn't exist" % ccache_dir)
72}
73addtask cleanall after do_cleanccache
74do_cleanccache[nostamp] = "1"
diff --git a/meta/classes/ccmake.bbclass b/meta/classes/ccmake.bbclass
deleted file mode 100644
index c5b4bf6260..0000000000
--- a/meta/classes/ccmake.bbclass
+++ /dev/null
@@ -1,103 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7inherit terminal
8
9python do_ccmake() {
10 import shutil
11
12 # copy current config for diffing
13 config = os.path.join(d.getVar("B"), "CMakeCache.txt")
14 if os.path.exists(config):
15 shutil.copy(config, config + ".orig")
16
17 oe_terminal(d.expand("ccmake ${OECMAKE_GENERATOR_ARGS} ${OECMAKE_SOURCEPATH} -Wno-dev"),
18 d.getVar("PN") + " - ccmake", d)
19
20 if os.path.exists(config) and os.path.exists(config + ".orig"):
21 if bb.utils.md5_file(config) != bb.utils.md5_file(config + ".orig"):
22 # the cmake class uses cmake --build, which will by default
23 # regenerate configuration, simply mark the compile step as tainted
24 # to ensure it is re-run
25 bb.note("Configuration changed, recompile will be forced")
26 bb.build.write_taint('do_compile', d)
27
28}
29do_ccmake[depends] += "cmake-native:do_populate_sysroot"
30do_ccmake[nostamp] = "1"
31do_ccmake[dirs] = "${B}"
32addtask ccmake after do_configure
33
34def cmake_parse_config_cache(path):
35 with open(path, "r") as f:
36 for i in f:
37 i = i.rstrip("\n")
38 if len(i) == 0 or i.startswith("//") or i.startswith("#"):
39 continue # empty or comment
40 key, value = i.split("=", 1)
41 key, keytype = key.split(":")
42 if keytype in ["INTERNAL", "STATIC"]:
43 continue # skip internal and static config options
44 yield key, keytype, value
45
46def cmake_diff_config_vars(a, b):
47 removed, added = [], []
48
49 for ak, akt, av in a:
50 found = False
51 for bk, bkt, bv in b:
52 if bk == ak:
53 found = True
54 if bkt != akt or bv != av: # changed
55 removed.append((ak, akt, av))
56 added.append((bk, bkt, bv))
57 break
58 # remove any missing from b
59 if not found:
60 removed.append((ak, akt, av))
61
62 # add any missing from a
63 for bk, bkt, bv in b:
64 if not any(bk == ak for ak, akt, av in a):
65 added.append((bk, bkt, bv))
66
67 return removed, added
68
69python do_ccmake_diffconfig() {
70 import shutil
71 config = os.path.join(d.getVar("B"), "CMakeCache.txt")
72 if os.path.exists(config) and os.path.exists(config + ".orig"):
73 if bb.utils.md5_file(config) != bb.utils.md5_file(config + ".orig"):
74 # scan the changed options
75 old = list(cmake_parse_config_cache(config + ".orig"))
76 new = list(cmake_parse_config_cache(config))
77 _, added = cmake_diff_config_vars(old, new)
78
79 if len(added) != 0:
80 with open(d.expand("${WORKDIR}/configuration.inc"), "w") as f:
81 f.write("EXTRA_OECMAKE += \" \\\n")
82 for k, kt, v in added:
83 escaped = v if " " not in v else "\"{0}\"".format(v)
84 f.write(" -D{0}:{1}={2} \\\n".format(k, kt, escaped))
85 f.write(" \"\n")
86 bb.plain("Configuration recipe fragment written to: {0}".format(d.expand("${WORKDIR}/configuration.inc")))
87
88 with open(d.expand("${WORKDIR}/site-file.cmake"), "w") as f:
89 for k, kt, v in added:
90 f.write("SET({0} \"{1}\" CACHE {2} \"\")\n".format(k, v, kt))
91 bb.plain("Configuration cmake fragment written to: {0}".format(d.expand("${WORKDIR}/site-file.cmake")))
92
93 # restore the original config
94 shutil.copy(config + ".orig", config)
95 else:
96 bb.plain("No configuration differences, skipping configuration fragment generation.")
97 else:
98 bb.fatal("No config files found. Did you run ccmake?")
99}
100do_ccmake_diffconfig[nostamp] = "1"
101do_ccmake_diffconfig[dirs] = "${B}"
102addtask ccmake_diffconfig
103
diff --git a/meta/classes/chrpath.bbclass b/meta/classes/chrpath.bbclass
deleted file mode 100644
index 16729dcf61..0000000000
--- a/meta/classes/chrpath.bbclass
+++ /dev/null
@@ -1,140 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7CHRPATH_BIN ?= "chrpath"
8PREPROCESS_RELOCATE_DIRS ?= ""
9
10def process_file_linux(cmd, fpath, rootdir, baseprefix, tmpdir, d, break_hardlinks = False):
11 import subprocess, oe.qa
12
13 with oe.qa.ELFFile(fpath) as elf:
14 try:
15 elf.open()
16 except oe.qa.NotELFFileError:
17 return
18
19 try:
20 out = subprocess.check_output([cmd, "-l", fpath], universal_newlines=True)
21 except subprocess.CalledProcessError:
22 return
23
24 # Handle RUNPATH as well as RPATH
25 out = out.replace("RUNPATH=","RPATH=")
26 # Throw away everything other than the rpath list
27 curr_rpath = out.partition("RPATH=")[2]
28 #bb.note("Current rpath for %s is %s" % (fpath, curr_rpath.strip()))
29 rpaths = curr_rpath.strip().split(":")
30 new_rpaths = []
31 modified = False
32 for rpath in rpaths:
33 # If rpath is already dynamic copy it to new_rpath and continue
34 if rpath.find("$ORIGIN") != -1:
35 new_rpaths.append(rpath)
36 continue
37 rpath = os.path.normpath(rpath)
38 if baseprefix not in rpath and tmpdir not in rpath:
39 # Skip standard search paths
40 if rpath in ['/lib', '/usr/lib', '/lib64/', '/usr/lib64']:
41 bb.warn("Skipping RPATH %s as is a standard search path for %s" % (rpath, fpath))
42 modified = True
43 continue
44 new_rpaths.append(rpath)
45 continue
46 new_rpaths.append("$ORIGIN/" + os.path.relpath(rpath, os.path.dirname(fpath.replace(rootdir, "/"))))
47 modified = True
48
49 # if we have modified some rpaths call chrpath to update the binary
50 if modified:
51 if break_hardlinks:
52 bb.utils.break_hardlinks(fpath)
53
54 args = ":".join(new_rpaths)
55 #bb.note("Setting rpath for %s to %s" %(fpath, args))
56 try:
57 subprocess.check_output([cmd, "-r", args, fpath],
58 stderr=subprocess.PIPE, universal_newlines=True)
59 except subprocess.CalledProcessError as e:
60 bb.fatal("chrpath command failed with exit code %d:\n%s\n%s" % (e.returncode, e.stdout, e.stderr))
61
62def process_file_darwin(cmd, fpath, rootdir, baseprefix, tmpdir, d, break_hardlinks = False):
63 import subprocess as sub
64
65 p = sub.Popen([d.expand("${HOST_PREFIX}otool"), '-L', fpath],stdout=sub.PIPE,stderr=sub.PIPE, text=True)
66 out, err = p.communicate()
67 # If returned successfully, process stdout for results
68 if p.returncode != 0:
69 return
70 for l in out.split("\n"):
71 if "(compatibility" not in l:
72 continue
73 rpath = l.partition("(compatibility")[0].strip()
74 if baseprefix not in rpath:
75 continue
76
77 if break_hardlinks:
78 bb.utils.break_hardlinks(fpath)
79
80 newpath = "@loader_path/" + os.path.relpath(rpath, os.path.dirname(fpath.replace(rootdir, "/")))
81 p = sub.Popen([d.expand("${HOST_PREFIX}install_name_tool"), '-change', rpath, newpath, fpath],stdout=sub.PIPE,stderr=sub.PIPE)
82 out, err = p.communicate()
83
84def process_dir(rootdir, directory, d, break_hardlinks = False):
85 bb.debug(2, "Checking %s for binaries to process" % directory)
86 if not os.path.exists(directory):
87 return
88
89 import stat
90
91 rootdir = os.path.normpath(rootdir)
92 cmd = d.expand('${CHRPATH_BIN}')
93 tmpdir = os.path.normpath(d.getVar('TMPDIR', False))
94 baseprefix = os.path.normpath(d.expand('${base_prefix}'))
95 hostos = d.getVar("HOST_OS")
96
97 if "linux" in hostos:
98 process_file = process_file_linux
99 elif "darwin" in hostos:
100 process_file = process_file_darwin
101 else:
102 # Relocations not supported
103 return
104
105 dirs = os.listdir(directory)
106 for file in dirs:
107 fpath = directory + "/" + file
108 fpath = os.path.normpath(fpath)
109 if os.path.islink(fpath):
110 # Skip symlinks
111 continue
112
113 if os.path.isdir(fpath):
114 process_dir(rootdir, fpath, d, break_hardlinks = break_hardlinks)
115 else:
116 #bb.note("Testing %s for relocatability" % fpath)
117
118 # We need read and write permissions for chrpath, if we don't have
119 # them then set them temporarily. Take a copy of the files
120 # permissions so that we can restore them afterwards.
121 perms = os.stat(fpath)[stat.ST_MODE]
122 if os.access(fpath, os.W_OK|os.R_OK):
123 perms = None
124 else:
125 # Temporarily make the file writeable so we can chrpath it
126 os.chmod(fpath, perms|stat.S_IRWXU)
127
128 process_file(cmd, fpath, rootdir, baseprefix, tmpdir, d, break_hardlinks = break_hardlinks)
129
130 if perms:
131 os.chmod(fpath, perms)
132
133def rpath_replace (path, d):
134 bindirs = d.expand("${bindir} ${sbindir} ${base_sbindir} ${base_bindir} ${libdir} ${base_libdir} ${libexecdir} ${PREPROCESS_RELOCATE_DIRS}").split()
135
136 for bindir in bindirs:
137 #bb.note ("Processing directory " + bindir)
138 directory = path + "/" + bindir
139 process_dir (path, directory, d)
140
diff --git a/meta/classes/copyleft_compliance.bbclass b/meta/classes/copyleft_compliance.bbclass
deleted file mode 100644
index 9ff9956fe9..0000000000
--- a/meta/classes/copyleft_compliance.bbclass
+++ /dev/null
@@ -1,70 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# Deploy sources for recipes for compliance with copyleft-style licenses
8# Defaults to using symlinks, as it's a quick operation, and one can easily
9# follow the links when making use of the files (e.g. tar with the -h arg).
10#
11# vi:sts=4:sw=4:et
12
13inherit copyleft_filter
14
15COPYLEFT_SOURCES_DIR ?= '${DEPLOY_DIR}/copyleft_sources'
16
17python do_prepare_copyleft_sources () {
18 """Populate a tree of the recipe sources and emit patch series files"""
19 import os.path
20 import shutil
21
22 p = d.getVar('P')
23 included, reason = copyleft_should_include(d)
24 if not included:
25 bb.debug(1, 'copyleft: %s is excluded: %s' % (p, reason))
26 return
27 else:
28 bb.debug(1, 'copyleft: %s is included: %s' % (p, reason))
29
30 sources_dir = d.getVar('COPYLEFT_SOURCES_DIR')
31 dl_dir = d.getVar('DL_DIR')
32 src_uri = d.getVar('SRC_URI').split()
33 fetch = bb.fetch2.Fetch(src_uri, d)
34 ud = fetch.ud
35
36 pf = d.getVar('PF')
37 dest = os.path.join(sources_dir, pf)
38 shutil.rmtree(dest, ignore_errors=True)
39 bb.utils.mkdirhier(dest)
40
41 for u in ud.values():
42 local = os.path.normpath(fetch.localpath(u.url))
43 if local.endswith('.bb'):
44 continue
45 elif local.endswith('/'):
46 local = local[:-1]
47
48 if u.mirrortarball:
49 tarball_path = os.path.join(dl_dir, u.mirrortarball)
50 if os.path.exists(tarball_path):
51 local = tarball_path
52
53 oe.path.symlink(local, os.path.join(dest, os.path.basename(local)), force=True)
54
55 patches = src_patches(d)
56 for patch in patches:
57 _, _, local, _, _, parm = bb.fetch.decodeurl(patch)
58 patchdir = parm.get('patchdir')
59 if patchdir:
60 series = os.path.join(dest, 'series.subdir.%s' % patchdir.replace('/', '_'))
61 else:
62 series = os.path.join(dest, 'series')
63
64 with open(series, 'a') as s:
65 s.write('%s -p%s\n' % (os.path.basename(local), parm['striplevel']))
66}
67
68addtask prepare_copyleft_sources after do_fetch before do_build
69do_prepare_copyleft_sources[dirs] = "${WORKDIR}"
70do_build[recrdeptask] += 'do_prepare_copyleft_sources'
diff --git a/meta/classes/copyleft_filter.bbclass b/meta/classes/copyleft_filter.bbclass
deleted file mode 100644
index 83cd90060d..0000000000
--- a/meta/classes/copyleft_filter.bbclass
+++ /dev/null
@@ -1,83 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# Filter the license, the copyleft_should_include returns True for the
8# COPYLEFT_LICENSE_INCLUDE recipe, and False for the
9# COPYLEFT_LICENSE_EXCLUDE.
10#
11# By default, includes all GPL and LGPL, and excludes CLOSED and Proprietary.
12
13COPYLEFT_LICENSE_INCLUDE ?= 'GPL* LGPL* AGPL*'
14COPYLEFT_LICENSE_INCLUDE[type] = 'list'
15COPYLEFT_LICENSE_INCLUDE[doc] = 'Space separated list of globs which include licenses'
16
17COPYLEFT_LICENSE_EXCLUDE ?= 'CLOSED Proprietary'
18COPYLEFT_LICENSE_EXCLUDE[type] = 'list'
19COPYLEFT_LICENSE_EXCLUDE[doc] = 'Space separated list of globs which exclude licenses'
20
21COPYLEFT_RECIPE_TYPE ?= '${@copyleft_recipe_type(d)}'
22COPYLEFT_RECIPE_TYPE[doc] = 'The "type" of the current recipe (e.g. target, native, cross)'
23
24COPYLEFT_RECIPE_TYPES ?= 'target'
25COPYLEFT_RECIPE_TYPES[type] = 'list'
26COPYLEFT_RECIPE_TYPES[doc] = 'Space separated list of recipe types to include'
27
28COPYLEFT_AVAILABLE_RECIPE_TYPES = 'target native nativesdk cross crosssdk cross-canadian'
29COPYLEFT_AVAILABLE_RECIPE_TYPES[type] = 'list'
30COPYLEFT_AVAILABLE_RECIPE_TYPES[doc] = 'Space separated list of available recipe types'
31
32COPYLEFT_PN_INCLUDE ?= ''
33COPYLEFT_PN_INCLUDE[type] = 'list'
34COPYLEFT_PN_INCLUDE[doc] = 'Space separated list of recipe names to include'
35
36COPYLEFT_PN_EXCLUDE ?= ''
37COPYLEFT_PN_EXCLUDE[type] = 'list'
38COPYLEFT_PN_EXCLUDE[doc] = 'Space separated list of recipe names to exclude'
39
40def copyleft_recipe_type(d):
41 for recipe_type in oe.data.typed_value('COPYLEFT_AVAILABLE_RECIPE_TYPES', d):
42 if oe.utils.inherits(d, recipe_type):
43 return recipe_type
44 return 'target'
45
46def copyleft_should_include(d):
47 """
48 Determine if this recipe's sources should be deployed for compliance
49 """
50 import ast
51 import oe.license
52 from fnmatch import fnmatchcase as fnmatch
53
54 recipe_type = d.getVar('COPYLEFT_RECIPE_TYPE')
55 if recipe_type not in oe.data.typed_value('COPYLEFT_RECIPE_TYPES', d):
56 included, motive = False, 'recipe type "%s" is excluded' % recipe_type
57 else:
58 included, motive = False, 'recipe did not match anything'
59
60 include = oe.data.typed_value('COPYLEFT_LICENSE_INCLUDE', d)
61 exclude = oe.data.typed_value('COPYLEFT_LICENSE_EXCLUDE', d)
62
63 try:
64 is_included, reason = oe.license.is_included(d.getVar('LICENSE'), include, exclude)
65 except oe.license.LicenseError as exc:
66 bb.fatal('%s: %s' % (d.getVar('PF'), exc))
67 else:
68 if is_included:
69 if reason:
70 included, motive = True, 'recipe has included licenses: %s' % ', '.join(reason)
71 else:
72 included, motive = False, 'recipe does not include a copyleft license'
73 else:
74 included, motive = False, 'recipe has excluded licenses: %s' % ', '.join(reason)
75
76 if any(fnmatch(d.getVar('PN'), name) \
77 for name in oe.data.typed_value('COPYLEFT_PN_INCLUDE', d)):
78 included, motive = True, 'recipe included by name'
79 if any(fnmatch(d.getVar('PN'), name) \
80 for name in oe.data.typed_value('COPYLEFT_PN_EXCLUDE', d)):
81 included, motive = False, 'recipe excluded by name'
82
83 return included, motive
diff --git a/meta/classes/create-spdx-2.2.bbclass b/meta/classes/create-spdx-2.2.bbclass
deleted file mode 100644
index 94e0108815..0000000000
--- a/meta/classes/create-spdx-2.2.bbclass
+++ /dev/null
@@ -1,970 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: GPL-2.0-only
5#
6
7inherit spdx-common
8
9SPDX_VERSION = "2.2"
10
11SPDX_ORG ??= "OpenEmbedded ()"
12SPDX_SUPPLIER ??= "Organization: ${SPDX_ORG}"
13SPDX_SUPPLIER[doc] = "The SPDX PackageSupplier field for SPDX packages created from \
14 this recipe. For SPDX documents create using this class during the build, this \
15 is the contact information for the person or organization who is doing the \
16 build."
17
18SPDX_ARCHIVE_SOURCES ??= "0"
19SPDX_ARCHIVE_PACKAGED ??= "0"
20
21def get_namespace(d, name):
22 import uuid
23 namespace_uuid = uuid.uuid5(uuid.NAMESPACE_DNS, d.getVar("SPDX_UUID_NAMESPACE"))
24 return "%s/%s-%s" % (d.getVar("SPDX_NAMESPACE_PREFIX"), name, str(uuid.uuid5(namespace_uuid, name)))
25
26SPDX_PACKAGE_VERSION ??= "${PV}"
27SPDX_PACKAGE_VERSION[doc] = "The version of a package, versionInfo in recipe, package and image"
28
29def create_annotation(d, comment):
30 from datetime import datetime, timezone
31
32 creation_time = datetime.now(tz=timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
33 annotation = oe.spdx.SPDXAnnotation()
34 annotation.annotationDate = creation_time
35 annotation.annotationType = "OTHER"
36 annotation.annotator = "Tool: %s - %s" % (d.getVar("SPDX_TOOL_NAME"), d.getVar("SPDX_TOOL_VERSION"))
37 annotation.comment = comment
38 return annotation
39
40def recipe_spdx_is_native(d, recipe):
41 return any(a.annotationType == "OTHER" and
42 a.annotator == "Tool: %s - %s" % (d.getVar("SPDX_TOOL_NAME"), d.getVar("SPDX_TOOL_VERSION")) and
43 a.comment == "isNative" for a in recipe.annotations)
44
45def get_json_indent(d):
46 if d.getVar("SPDX_PRETTY") == "1":
47 return 2
48 return None
49
50
51def convert_license_to_spdx(lic, license_data, document, d, existing={}):
52 from pathlib import Path
53 import oe.spdx
54
55 extracted = {}
56
57 def add_extracted_license(ident, name):
58 nonlocal document
59
60 if name in extracted:
61 return
62
63 extracted_info = oe.spdx.SPDXExtractedLicensingInfo()
64 extracted_info.name = name
65 extracted_info.licenseId = ident
66 extracted_info.extractedText = None
67
68 if name == "PD":
69 # Special-case this.
70 extracted_info.extractedText = "Software released to the public domain"
71 else:
72 # Seach for the license in COMMON_LICENSE_DIR and LICENSE_PATH
73 for directory in [d.getVar('COMMON_LICENSE_DIR')] + (d.getVar('LICENSE_PATH') or '').split():
74 try:
75 with (Path(directory) / name).open(errors="replace") as f:
76 extracted_info.extractedText = f.read()
77 break
78 except FileNotFoundError:
79 pass
80 if extracted_info.extractedText is None:
81 # If it's not SPDX or PD, then NO_GENERIC_LICENSE must be set
82 entry = d.getVarFlag('NO_GENERIC_LICENSE', name).split(';')
83 filename = entry[0]
84 params = {i.split('=')[0]: i.split('=')[1] for i in entry[1:] if '=' in i}
85 beginline = int(params.get('beginline', 1))
86 endline = params.get('endline', None)
87 if endline:
88 endline = int(endline)
89 if filename:
90 filename = d.expand("${S}/" + filename)
91 with open(filename, errors="replace") as f:
92 extracted_info.extractedText = "".join(line for idx, line in enumerate(f, 1) if beginline <= idx and idx <= (endline or idx))
93 else:
94 bb.fatal("Cannot find any text for license %s" % name)
95
96 extracted[name] = extracted_info
97 document.hasExtractedLicensingInfos.append(extracted_info)
98
99 def convert(l):
100 if l == "(" or l == ")":
101 return l
102
103 if l == "&":
104 return "AND"
105
106 if l == "|":
107 return "OR"
108
109 if l == "CLOSED":
110 return "NONE"
111
112 spdx_license = d.getVarFlag("SPDXLICENSEMAP", l) or l
113 if spdx_license in license_data["licenses"]:
114 return spdx_license
115
116 try:
117 spdx_license = existing[l]
118 except KeyError:
119 spdx_license = "LicenseRef-" + l
120 add_extracted_license(spdx_license, l)
121
122 return spdx_license
123
124 lic_split = lic.replace("(", " ( ").replace(")", " ) ").replace("|", " | ").replace("&", " & ").split()
125
126 return ' '.join(convert(l) for l in lic_split)
127
128def add_package_files(d, doc, spdx_pkg, topdir, get_spdxid, get_types, *, archive=None, ignore_dirs=[], ignore_top_level_dirs=[]):
129 from pathlib import Path
130 import oe.spdx
131 import oe.spdx_common
132 import hashlib
133
134 source_date_epoch = d.getVar("SOURCE_DATE_EPOCH")
135 if source_date_epoch:
136 source_date_epoch = int(source_date_epoch)
137
138 sha1s = []
139 spdx_files = []
140
141 file_counter = 1
142
143 check_compiled_sources = d.getVar("SPDX_INCLUDE_COMPILED_SOURCES") == "1"
144 if check_compiled_sources:
145 compiled_sources, types = oe.spdx_common.get_compiled_sources(d)
146 bb.debug(1, f"Total compiled files: {len(compiled_sources)}")
147 for subdir, dirs, files in os.walk(topdir):
148 dirs[:] = [d for d in dirs if d not in ignore_dirs]
149 if subdir == str(topdir):
150 dirs[:] = [d for d in dirs if d not in ignore_top_level_dirs]
151
152 for file in files:
153 filepath = Path(subdir) / file
154 filename = str(filepath.relative_to(topdir))
155
156 if not filepath.is_symlink() and filepath.is_file():
157 # Check if file is compiled
158 if check_compiled_sources:
159 if not oe.spdx_common.is_compiled_source(filename, compiled_sources, types):
160 continue
161 spdx_file = oe.spdx.SPDXFile()
162 spdx_file.SPDXID = get_spdxid(file_counter)
163 for t in get_types(filepath):
164 spdx_file.fileTypes.append(t)
165 spdx_file.fileName = filename
166
167 if archive is not None:
168 with filepath.open("rb") as f:
169 info = archive.gettarinfo(fileobj=f)
170 info.name = filename
171 info.uid = 0
172 info.gid = 0
173 info.uname = "root"
174 info.gname = "root"
175
176 if source_date_epoch is not None and info.mtime > source_date_epoch:
177 info.mtime = source_date_epoch
178
179 archive.addfile(info, f)
180
181 sha1 = bb.utils.sha1_file(filepath)
182 sha1s.append(sha1)
183 spdx_file.checksums.append(oe.spdx.SPDXChecksum(
184 algorithm="SHA1",
185 checksumValue=sha1,
186 ))
187 spdx_file.checksums.append(oe.spdx.SPDXChecksum(
188 algorithm="SHA256",
189 checksumValue=bb.utils.sha256_file(filepath),
190 ))
191
192 if "SOURCE" in spdx_file.fileTypes:
193 extracted_lics = oe.spdx_common.extract_licenses(filepath)
194 if extracted_lics:
195 spdx_file.licenseInfoInFiles = extracted_lics
196
197 doc.files.append(spdx_file)
198 doc.add_relationship(spdx_pkg, "CONTAINS", spdx_file)
199 spdx_pkg.hasFiles.append(spdx_file.SPDXID)
200
201 spdx_files.append(spdx_file)
202
203 file_counter += 1
204
205 sha1s.sort()
206 verifier = hashlib.sha1()
207 for v in sha1s:
208 verifier.update(v.encode("utf-8"))
209 spdx_pkg.packageVerificationCode.packageVerificationCodeValue = verifier.hexdigest()
210
211 return spdx_files
212
213
214def add_package_sources_from_debug(d, package_doc, spdx_package, package, package_files, sources):
215 from pathlib import Path
216 import hashlib
217 import oe.packagedata
218 import oe.spdx
219
220 debug_search_paths = [
221 Path(d.getVar('PKGD')),
222 Path(d.getVar('STAGING_DIR_TARGET')),
223 Path(d.getVar('STAGING_DIR_NATIVE')),
224 Path(d.getVar('STAGING_KERNEL_DIR')),
225 ]
226
227 pkg_data = oe.packagedata.read_subpkgdata_extended(package, d)
228
229 if pkg_data is None:
230 return
231
232 for file_path, file_data in pkg_data["files_info"].items():
233 if not "debugsrc" in file_data:
234 continue
235
236 for pkg_file in package_files:
237 if file_path.lstrip("/") == pkg_file.fileName.lstrip("/"):
238 break
239 else:
240 bb.fatal("No package file found for %s in %s; SPDX found: %s" % (str(file_path), package,
241 " ".join(p.fileName for p in package_files)))
242 continue
243
244 for debugsrc in file_data["debugsrc"]:
245 ref_id = "NOASSERTION"
246 for search in debug_search_paths:
247 if debugsrc.startswith("/usr/src/kernel"):
248 debugsrc_path = search / debugsrc.replace('/usr/src/kernel/', '')
249 else:
250 debugsrc_path = search / debugsrc.lstrip("/")
251 # We can only hash files below, skip directories, links, etc.
252 if not os.path.isfile(debugsrc_path):
253 continue
254
255 file_sha256 = bb.utils.sha256_file(debugsrc_path)
256
257 if file_sha256 in sources:
258 source_file = sources[file_sha256]
259
260 doc_ref = package_doc.find_external_document_ref(source_file.doc.documentNamespace)
261 if doc_ref is None:
262 doc_ref = oe.spdx.SPDXExternalDocumentRef()
263 doc_ref.externalDocumentId = "DocumentRef-dependency-" + source_file.doc.name
264 doc_ref.spdxDocument = source_file.doc.documentNamespace
265 doc_ref.checksum.algorithm = "SHA1"
266 doc_ref.checksum.checksumValue = source_file.doc_sha1
267 package_doc.externalDocumentRefs.append(doc_ref)
268
269 ref_id = "%s:%s" % (doc_ref.externalDocumentId, source_file.file.SPDXID)
270 else:
271 bb.debug(1, "Debug source %s with SHA256 %s not found in any dependency" % (str(debugsrc_path), file_sha256))
272 break
273 else:
274 bb.debug(1, "Debug source %s not found" % debugsrc)
275
276 package_doc.add_relationship(pkg_file, "GENERATED_FROM", ref_id, comment=debugsrc)
277
278add_package_sources_from_debug[vardepsexclude] += "STAGING_KERNEL_DIR"
279
280def collect_dep_recipes(d, doc, spdx_recipe):
281 import json
282 from pathlib import Path
283 import oe.sbom
284 import oe.spdx
285 import oe.spdx_common
286
287 deploy_dir_spdx = Path(d.getVar("DEPLOY_DIR_SPDX"))
288 package_archs = d.getVar("SPDX_MULTILIB_SSTATE_ARCHS").split()
289 package_archs.reverse()
290
291 dep_recipes = []
292
293 deps = oe.spdx_common.get_spdx_deps(d)
294
295 for dep in deps:
296 # If this dependency is not calculated in the taskhash skip it.
297 # Otherwise, it can result in broken links since this task won't
298 # rebuild and see the new SPDX ID if the dependency changes
299 if not dep.in_taskhash:
300 continue
301
302 dep_recipe_path = oe.sbom.doc_find_by_hashfn(deploy_dir_spdx, package_archs, "recipe-" + dep.pn, dep.hashfn)
303 if not dep_recipe_path:
304 bb.fatal("Cannot find any SPDX file for recipe %s, %s" % (dep.pn, dep.hashfn))
305
306 spdx_dep_doc, spdx_dep_sha1 = oe.sbom.read_doc(dep_recipe_path)
307
308 for pkg in spdx_dep_doc.packages:
309 if pkg.name == dep.pn:
310 spdx_dep_recipe = pkg
311 break
312 else:
313 continue
314
315 dep_recipes.append(oe.sbom.DepRecipe(spdx_dep_doc, spdx_dep_sha1, spdx_dep_recipe))
316
317 dep_recipe_ref = oe.spdx.SPDXExternalDocumentRef()
318 dep_recipe_ref.externalDocumentId = "DocumentRef-dependency-" + spdx_dep_doc.name
319 dep_recipe_ref.spdxDocument = spdx_dep_doc.documentNamespace
320 dep_recipe_ref.checksum.algorithm = "SHA1"
321 dep_recipe_ref.checksum.checksumValue = spdx_dep_sha1
322
323 doc.externalDocumentRefs.append(dep_recipe_ref)
324
325 doc.add_relationship(
326 "%s:%s" % (dep_recipe_ref.externalDocumentId, spdx_dep_recipe.SPDXID),
327 "BUILD_DEPENDENCY_OF",
328 spdx_recipe
329 )
330
331 return dep_recipes
332
333collect_dep_recipes[vardepsexclude] = "SPDX_MULTILIB_SSTATE_ARCHS"
334
335def collect_dep_sources(d, dep_recipes):
336 import oe.sbom
337
338 sources = {}
339 for dep in dep_recipes:
340 # Don't collect sources from native recipes as they
341 # match non-native sources also.
342 if recipe_spdx_is_native(d, dep.recipe):
343 continue
344 recipe_files = set(dep.recipe.hasFiles)
345
346 for spdx_file in dep.doc.files:
347 if spdx_file.SPDXID not in recipe_files:
348 continue
349
350 if "SOURCE" in spdx_file.fileTypes:
351 for checksum in spdx_file.checksums:
352 if checksum.algorithm == "SHA256":
353 sources[checksum.checksumValue] = oe.sbom.DepSource(dep.doc, dep.doc_sha1, dep.recipe, spdx_file)
354 break
355
356 return sources
357
358def add_download_packages(d, doc, recipe):
359 import os.path
360 from bb.fetch2 import decodeurl, CHECKSUM_LIST
361 import bb.process
362 import oe.spdx
363 import oe.sbom
364
365 for download_idx, src_uri in enumerate(d.getVar('SRC_URI').split()):
366 f = bb.fetch2.FetchData(src_uri, d)
367
368 package = oe.spdx.SPDXPackage()
369 package.name = "%s-source-%d" % (d.getVar("PN"), download_idx + 1)
370 package.SPDXID = oe.sbom.get_download_spdxid(d, download_idx + 1)
371
372 if f.type == "file":
373 continue
374
375 if f.method.supports_checksum(f):
376 for checksum_id in CHECKSUM_LIST:
377 if checksum_id.upper() not in oe.spdx.SPDXPackage.ALLOWED_CHECKSUMS:
378 continue
379
380 expected_checksum = getattr(f, "%s_expected" % checksum_id)
381 if expected_checksum is None:
382 continue
383
384 c = oe.spdx.SPDXChecksum()
385 c.algorithm = checksum_id.upper()
386 c.checksumValue = expected_checksum
387 package.checksums.append(c)
388
389 package.downloadLocation = oe.spdx_common.fetch_data_to_uri(f, f.name)
390 doc.packages.append(package)
391 doc.add_relationship(doc, "DESCRIBES", package)
392 # In the future, we might be able to do more fancy dependencies,
393 # but this should be sufficient for now
394 doc.add_relationship(package, "BUILD_DEPENDENCY_OF", recipe)
395
396def get_license_list_version(license_data, d):
397 # Newer versions of the SPDX license list are SemVer ("MAJOR.MINOR.MICRO"),
398 # but SPDX 2 only uses "MAJOR.MINOR".
399 return ".".join(license_data["licenseListVersion"].split(".")[:2])
400
401
402python do_create_spdx() {
403 from datetime import datetime, timezone
404 import oe.sbom
405 import oe.spdx
406 import oe.spdx_common
407 import uuid
408 from pathlib import Path
409 from contextlib import contextmanager
410 import oe.cve_check
411
412 license_data = oe.spdx_common.load_spdx_license_data(d)
413
414 @contextmanager
415 def optional_tarfile(name, guard, mode="w"):
416 import tarfile
417 import bb.compress.zstd
418
419 num_threads = int(d.getVar("BB_NUMBER_THREADS"))
420
421 if guard:
422 name.parent.mkdir(parents=True, exist_ok=True)
423 with bb.compress.zstd.open(name, mode=mode + "b", num_threads=num_threads) as f:
424 with tarfile.open(fileobj=f, mode=mode + "|") as tf:
425 yield tf
426 else:
427 yield None
428
429
430 deploy_dir_spdx = Path(d.getVar("DEPLOY_DIR_SPDX"))
431 spdx_workdir = Path(d.getVar("SPDXWORK"))
432 include_sources = d.getVar("SPDX_INCLUDE_SOURCES") == "1"
433 archive_sources = d.getVar("SPDX_ARCHIVE_SOURCES") == "1"
434 archive_packaged = d.getVar("SPDX_ARCHIVE_PACKAGED") == "1"
435 pkg_arch = d.getVar("SSTATE_PKGARCH")
436
437 creation_time = datetime.now(tz=timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
438
439 doc = oe.spdx.SPDXDocument()
440
441 doc.name = "recipe-" + d.getVar("PN")
442 doc.documentNamespace = get_namespace(d, doc.name)
443 doc.creationInfo.created = creation_time
444 doc.creationInfo.comment = "This document was created by analyzing recipe files during the build."
445 doc.creationInfo.licenseListVersion = get_license_list_version(license_data, d)
446 doc.creationInfo.creators.append("Tool: OpenEmbedded Core create-spdx.bbclass")
447 doc.creationInfo.creators.append("Organization: %s" % d.getVar("SPDX_ORG"))
448 doc.creationInfo.creators.append("Person: N/A ()")
449
450 recipe = oe.spdx.SPDXPackage()
451 recipe.name = d.getVar("PN")
452 recipe.versionInfo = d.getVar("SPDX_PACKAGE_VERSION")
453 recipe.SPDXID = oe.sbom.get_recipe_spdxid(d)
454 recipe.supplier = d.getVar("SPDX_SUPPLIER")
455 if bb.data.inherits_class("native", d) or bb.data.inherits_class("cross", d):
456 recipe.annotations.append(create_annotation(d, "isNative"))
457
458 homepage = d.getVar("HOMEPAGE")
459 if homepage:
460 recipe.homepage = homepage
461
462 license = d.getVar("LICENSE")
463 if license:
464 recipe.licenseDeclared = convert_license_to_spdx(license, license_data, doc, d)
465
466 summary = d.getVar("SUMMARY")
467 if summary:
468 recipe.summary = summary
469
470 description = d.getVar("DESCRIPTION")
471 if description:
472 recipe.description = description
473
474 if d.getVar("SPDX_CUSTOM_ANNOTATION_VARS"):
475 for var in d.getVar('SPDX_CUSTOM_ANNOTATION_VARS').split():
476 recipe.annotations.append(create_annotation(d, var + "=" + d.getVar(var)))
477
478 # Some CVEs may be patched during the build process without incrementing the version number,
479 # so querying for CVEs based on the CPE id can lead to false positives. To account for this,
480 # save the CVEs fixed by patches to source information field in the SPDX.
481 patched_cves = oe.cve_check.get_patched_cves(d)
482 patched_cves = list(patched_cves)
483 patched_cves = ' '.join(patched_cves)
484 if patched_cves:
485 recipe.sourceInfo = "CVEs fixed: " + patched_cves
486
487 cpe_ids = oe.cve_check.get_cpe_ids(d.getVar("CVE_PRODUCT"), d.getVar("CVE_VERSION"))
488 if cpe_ids:
489 for cpe_id in cpe_ids:
490 cpe = oe.spdx.SPDXExternalReference()
491 cpe.referenceCategory = "SECURITY"
492 cpe.referenceType = "http://spdx.org/rdf/references/cpe23Type"
493 cpe.referenceLocator = cpe_id
494 recipe.externalRefs.append(cpe)
495
496 doc.packages.append(recipe)
497 doc.add_relationship(doc, "DESCRIBES", recipe)
498
499 add_download_packages(d, doc, recipe)
500
501 if oe.spdx_common.process_sources(d) and include_sources:
502 recipe_archive = deploy_dir_spdx / "recipes" / (doc.name + ".tar.zst")
503 with optional_tarfile(recipe_archive, archive_sources) as archive:
504 oe.spdx_common.get_patched_src(d)
505
506 add_package_files(
507 d,
508 doc,
509 recipe,
510 spdx_workdir,
511 lambda file_counter: "SPDXRef-SourceFile-%s-%d" % (d.getVar("PN"), file_counter),
512 lambda filepath: ["SOURCE"],
513 ignore_dirs=[".git"],
514 ignore_top_level_dirs=["temp"],
515 archive=archive,
516 )
517
518 if archive is not None:
519 recipe.packageFileName = str(recipe_archive.name)
520
521 dep_recipes = collect_dep_recipes(d, doc, recipe)
522
523 doc_sha1 = oe.sbom.write_doc(d, doc, pkg_arch, "recipes", indent=get_json_indent(d))
524 dep_recipes.append(oe.sbom.DepRecipe(doc, doc_sha1, recipe))
525
526 recipe_ref = oe.spdx.SPDXExternalDocumentRef()
527 recipe_ref.externalDocumentId = "DocumentRef-recipe-" + recipe.name
528 recipe_ref.spdxDocument = doc.documentNamespace
529 recipe_ref.checksum.algorithm = "SHA1"
530 recipe_ref.checksum.checksumValue = doc_sha1
531
532 sources = collect_dep_sources(d, dep_recipes)
533 found_licenses = {license.name:recipe_ref.externalDocumentId + ":" + license.licenseId for license in doc.hasExtractedLicensingInfos}
534
535 if not recipe_spdx_is_native(d, recipe):
536 bb.build.exec_func("read_subpackage_metadata", d)
537
538 pkgdest = Path(d.getVar("PKGDEST"))
539 for package in d.getVar("PACKAGES").split():
540 if not oe.packagedata.packaged(package, d):
541 continue
542
543 package_doc = oe.spdx.SPDXDocument()
544 pkg_name = d.getVar("PKG:%s" % package) or package
545 package_doc.name = pkg_name
546 package_doc.documentNamespace = get_namespace(d, package_doc.name)
547 package_doc.creationInfo.created = creation_time
548 package_doc.creationInfo.comment = "This document was created by analyzing packages created during the build."
549 package_doc.creationInfo.licenseListVersion = get_license_list_version(license_data, d)
550 package_doc.creationInfo.creators.append("Tool: OpenEmbedded Core create-spdx.bbclass")
551 package_doc.creationInfo.creators.append("Organization: %s" % d.getVar("SPDX_ORG"))
552 package_doc.creationInfo.creators.append("Person: N/A ()")
553 package_doc.externalDocumentRefs.append(recipe_ref)
554
555 package_license = d.getVar("LICENSE:%s" % package) or d.getVar("LICENSE")
556
557 spdx_package = oe.spdx.SPDXPackage()
558
559 spdx_package.SPDXID = oe.sbom.get_package_spdxid(pkg_name)
560 spdx_package.name = pkg_name
561 spdx_package.versionInfo = d.getVar("SPDX_PACKAGE_VERSION")
562 spdx_package.licenseDeclared = convert_license_to_spdx(package_license, license_data, package_doc, d, found_licenses)
563 spdx_package.supplier = d.getVar("SPDX_SUPPLIER")
564
565 package_doc.packages.append(spdx_package)
566
567 package_doc.add_relationship(spdx_package, "GENERATED_FROM", "%s:%s" % (recipe_ref.externalDocumentId, recipe.SPDXID))
568 package_doc.add_relationship(package_doc, "DESCRIBES", spdx_package)
569
570 package_archive = deploy_dir_spdx / "packages" / (package_doc.name + ".tar.zst")
571 with optional_tarfile(package_archive, archive_packaged) as archive:
572 package_files = add_package_files(
573 d,
574 package_doc,
575 spdx_package,
576 pkgdest / package,
577 lambda file_counter: oe.sbom.get_packaged_file_spdxid(pkg_name, file_counter),
578 lambda filepath: ["BINARY"],
579 ignore_top_level_dirs=['CONTROL', 'DEBIAN'],
580 archive=archive,
581 )
582
583 if archive is not None:
584 spdx_package.packageFileName = str(package_archive.name)
585
586 add_package_sources_from_debug(d, package_doc, spdx_package, package, package_files, sources)
587
588 oe.sbom.write_doc(d, package_doc, pkg_arch, "packages", indent=get_json_indent(d))
589}
590do_create_spdx[vardepsexclude] += "BB_NUMBER_THREADS"
591# NOTE: depending on do_unpack is a hack that is necessary to get it's dependencies for archive the source
592addtask do_create_spdx after do_package do_packagedata do_unpack do_collect_spdx_deps before do_populate_sdk do_build do_rm_work
593
594SSTATETASKS += "do_create_spdx"
595do_create_spdx[sstate-inputdirs] = "${SPDXDEPLOY}"
596do_create_spdx[sstate-outputdirs] = "${DEPLOY_DIR_SPDX}"
597
598python do_create_spdx_setscene () {
599 sstate_setscene(d)
600}
601addtask do_create_spdx_setscene
602
603do_create_spdx[dirs] = "${SPDXWORK}"
604do_create_spdx[cleandirs] = "${SPDXDEPLOY} ${SPDXWORK}"
605do_create_spdx[depends] += " \
606 ${PATCHDEPENDENCY} \
607 ${@create_spdx_source_deps(d)} \
608"
609
610python do_create_runtime_spdx() {
611 from datetime import datetime, timezone
612 import oe.sbom
613 import oe.spdx
614 import oe.spdx_common
615 import oe.packagedata
616 from pathlib import Path
617
618 deploy_dir_spdx = Path(d.getVar("DEPLOY_DIR_SPDX"))
619 spdx_deploy = Path(d.getVar("SPDXRUNTIMEDEPLOY"))
620 is_native = bb.data.inherits_class("native", d) or bb.data.inherits_class("cross", d)
621
622 creation_time = datetime.now(tz=timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
623
624 license_data = oe.spdx_common.load_spdx_license_data(d)
625
626 providers = oe.spdx_common.collect_package_providers(d)
627 pkg_arch = d.getVar("SSTATE_PKGARCH")
628 package_archs = d.getVar("SPDX_MULTILIB_SSTATE_ARCHS").split()
629 package_archs.reverse()
630
631 if not is_native:
632 bb.build.exec_func("read_subpackage_metadata", d)
633
634 dep_package_cache = {}
635
636 pkgdest = Path(d.getVar("PKGDEST"))
637 for package in d.getVar("PACKAGES").split():
638 localdata = bb.data.createCopy(d)
639 pkg_name = d.getVar("PKG:%s" % package) or package
640 localdata.setVar("PKG", pkg_name)
641 localdata.setVar('OVERRIDES', d.getVar("OVERRIDES", False) + ":" + package)
642
643 if not oe.packagedata.packaged(package, localdata):
644 continue
645
646 pkg_spdx_path = oe.sbom.doc_path(deploy_dir_spdx, pkg_name, pkg_arch, "packages")
647
648 package_doc, package_doc_sha1 = oe.sbom.read_doc(pkg_spdx_path)
649
650 for p in package_doc.packages:
651 if p.name == pkg_name:
652 spdx_package = p
653 break
654 else:
655 bb.fatal("Package '%s' not found in %s" % (pkg_name, pkg_spdx_path))
656
657 runtime_doc = oe.spdx.SPDXDocument()
658 runtime_doc.name = "runtime-" + pkg_name
659 runtime_doc.documentNamespace = get_namespace(localdata, runtime_doc.name)
660 runtime_doc.creationInfo.created = creation_time
661 runtime_doc.creationInfo.comment = "This document was created by analyzing package runtime dependencies."
662 runtime_doc.creationInfo.licenseListVersion = get_license_list_version(license_data, d)
663 runtime_doc.creationInfo.creators.append("Tool: OpenEmbedded Core create-spdx.bbclass")
664 runtime_doc.creationInfo.creators.append("Organization: %s" % d.getVar("SPDX_ORG"))
665 runtime_doc.creationInfo.creators.append("Person: N/A ()")
666
667 package_ref = oe.spdx.SPDXExternalDocumentRef()
668 package_ref.externalDocumentId = "DocumentRef-package-" + package
669 package_ref.spdxDocument = package_doc.documentNamespace
670 package_ref.checksum.algorithm = "SHA1"
671 package_ref.checksum.checksumValue = package_doc_sha1
672
673 runtime_doc.externalDocumentRefs.append(package_ref)
674
675 runtime_doc.add_relationship(
676 runtime_doc.SPDXID,
677 "AMENDS",
678 "%s:%s" % (package_ref.externalDocumentId, package_doc.SPDXID)
679 )
680
681 deps = bb.utils.explode_dep_versions2(localdata.getVar("RDEPENDS") or "")
682 seen_deps = set()
683 for dep, _ in deps.items():
684 if dep in seen_deps:
685 continue
686
687 if dep not in providers:
688 continue
689
690 (dep, dep_hashfn) = providers[dep]
691
692 if not oe.packagedata.packaged(dep, localdata):
693 continue
694
695 dep_pkg_data = oe.packagedata.read_subpkgdata_dict(dep, d)
696 dep_pkg = dep_pkg_data["PKG"]
697
698 if dep in dep_package_cache:
699 (dep_spdx_package, dep_package_ref) = dep_package_cache[dep]
700 else:
701 dep_path = oe.sbom.doc_find_by_hashfn(deploy_dir_spdx, package_archs, dep_pkg, dep_hashfn)
702 if not dep_path:
703 bb.fatal("No SPDX file found for package %s, %s" % (dep_pkg, dep_hashfn))
704
705 spdx_dep_doc, spdx_dep_sha1 = oe.sbom.read_doc(dep_path)
706
707 for pkg in spdx_dep_doc.packages:
708 if pkg.name == dep_pkg:
709 dep_spdx_package = pkg
710 break
711 else:
712 bb.fatal("Package '%s' not found in %s" % (dep_pkg, dep_path))
713
714 dep_package_ref = oe.spdx.SPDXExternalDocumentRef()
715 dep_package_ref.externalDocumentId = "DocumentRef-runtime-dependency-" + spdx_dep_doc.name
716 dep_package_ref.spdxDocument = spdx_dep_doc.documentNamespace
717 dep_package_ref.checksum.algorithm = "SHA1"
718 dep_package_ref.checksum.checksumValue = spdx_dep_sha1
719
720 dep_package_cache[dep] = (dep_spdx_package, dep_package_ref)
721
722 runtime_doc.externalDocumentRefs.append(dep_package_ref)
723
724 runtime_doc.add_relationship(
725 "%s:%s" % (dep_package_ref.externalDocumentId, dep_spdx_package.SPDXID),
726 "RUNTIME_DEPENDENCY_OF",
727 "%s:%s" % (package_ref.externalDocumentId, spdx_package.SPDXID)
728 )
729 seen_deps.add(dep)
730
731 oe.sbom.write_doc(d, runtime_doc, pkg_arch, "runtime", spdx_deploy, indent=get_json_indent(d))
732}
733
734do_create_runtime_spdx[vardepsexclude] += "OVERRIDES SPDX_MULTILIB_SSTATE_ARCHS"
735
736addtask do_create_runtime_spdx after do_create_spdx before do_build do_rm_work
737SSTATETASKS += "do_create_runtime_spdx"
738do_create_runtime_spdx[sstate-inputdirs] = "${SPDXRUNTIMEDEPLOY}"
739do_create_runtime_spdx[sstate-outputdirs] = "${DEPLOY_DIR_SPDX}"
740
741python do_create_runtime_spdx_setscene () {
742 sstate_setscene(d)
743}
744addtask do_create_runtime_spdx_setscene
745
746do_create_runtime_spdx[dirs] = "${SPDXRUNTIMEDEPLOY}"
747do_create_runtime_spdx[cleandirs] = "${SPDXRUNTIMEDEPLOY}"
748do_create_runtime_spdx[rdeptask] = "do_create_spdx"
749
750do_rootfs[recrdeptask] += "do_create_spdx do_create_runtime_spdx"
751do_rootfs[cleandirs] += "${SPDXIMAGEWORK}"
752
753ROOTFS_POSTUNINSTALL_COMMAND =+ "image_combine_spdx"
754
755do_populate_sdk[recrdeptask] += "do_create_spdx do_create_runtime_spdx"
756do_populate_sdk[cleandirs] += "${SPDXSDKWORK}"
757POPULATE_SDK_POST_HOST_COMMAND:append:task-populate-sdk = " sdk_host_combine_spdx"
758POPULATE_SDK_POST_TARGET_COMMAND:append:task-populate-sdk = " sdk_target_combine_spdx"
759
760python image_combine_spdx() {
761 import os
762 import oe.sbom
763 from pathlib import Path
764 from oe.rootfs import image_list_installed_packages
765
766 image_name = d.getVar("IMAGE_NAME")
767 image_link_name = d.getVar("IMAGE_LINK_NAME")
768 imgdeploydir = Path(d.getVar("IMGDEPLOYDIR"))
769 img_spdxid = oe.sbom.get_image_spdxid(image_name)
770 packages = image_list_installed_packages(d)
771
772 combine_spdx(d, image_name, imgdeploydir, img_spdxid, packages, Path(d.getVar("SPDXIMAGEWORK")))
773
774 def make_image_link(target_path, suffix):
775 if image_link_name:
776 link = imgdeploydir / (image_link_name + suffix)
777 if link != target_path:
778 link.symlink_to(os.path.relpath(target_path, link.parent))
779
780 spdx_tar_path = imgdeploydir / (image_name + ".spdx.tar.zst")
781 make_image_link(spdx_tar_path, ".spdx.tar.zst")
782}
783
784python sdk_host_combine_spdx() {
785 sdk_combine_spdx(d, "host")
786}
787
788python sdk_target_combine_spdx() {
789 sdk_combine_spdx(d, "target")
790}
791
792def sdk_combine_spdx(d, sdk_type):
793 import oe.sbom
794 from pathlib import Path
795 from oe.sdk import sdk_list_installed_packages
796
797 sdk_name = d.getVar("TOOLCHAIN_OUTPUTNAME") + "-" + sdk_type
798 sdk_deploydir = Path(d.getVar("SDKDEPLOYDIR"))
799 sdk_spdxid = oe.sbom.get_sdk_spdxid(sdk_name)
800 sdk_packages = sdk_list_installed_packages(d, sdk_type == "target")
801 combine_spdx(d, sdk_name, sdk_deploydir, sdk_spdxid, sdk_packages, Path(d.getVar('SPDXSDKWORK')))
802
803def combine_spdx(d, rootfs_name, rootfs_deploydir, rootfs_spdxid, packages, spdx_workdir):
804 import os
805 import oe.spdx
806 import oe.sbom
807 import oe.spdx_common
808 import io
809 import json
810 from datetime import timezone, datetime
811 from pathlib import Path
812 import tarfile
813 import bb.compress.zstd
814
815 license_data = oe.spdx_common.load_spdx_license_data(d)
816
817 providers = oe.spdx_common.collect_package_providers(d)
818 package_archs = d.getVar("SPDX_MULTILIB_SSTATE_ARCHS").split()
819 package_archs.reverse()
820
821 creation_time = datetime.now(tz=timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
822 deploy_dir_spdx = Path(d.getVar("DEPLOY_DIR_SPDX"))
823 source_date_epoch = d.getVar("SOURCE_DATE_EPOCH")
824
825 doc = oe.spdx.SPDXDocument()
826 doc.name = rootfs_name
827 doc.documentNamespace = get_namespace(d, doc.name)
828 doc.creationInfo.created = creation_time
829 doc.creationInfo.comment = "This document was created by analyzing the source of the Yocto recipe during the build."
830 doc.creationInfo.licenseListVersion = get_license_list_version(license_data, d)
831 doc.creationInfo.creators.append("Tool: OpenEmbedded Core create-spdx.bbclass")
832 doc.creationInfo.creators.append("Organization: %s" % d.getVar("SPDX_ORG"))
833 doc.creationInfo.creators.append("Person: N/A ()")
834
835 image = oe.spdx.SPDXPackage()
836 image.name = d.getVar("PN")
837 image.versionInfo = d.getVar("SPDX_PACKAGE_VERSION")
838 image.SPDXID = rootfs_spdxid
839 image.supplier = d.getVar("SPDX_SUPPLIER")
840
841 doc.packages.append(image)
842
843 if packages:
844 for name in sorted(packages.keys()):
845 if name not in providers:
846 bb.fatal("Unable to find SPDX provider for '%s'" % name)
847
848 pkg_name, pkg_hashfn = providers[name]
849
850 pkg_spdx_path = oe.sbom.doc_find_by_hashfn(deploy_dir_spdx, package_archs, pkg_name, pkg_hashfn)
851 if not pkg_spdx_path:
852 bb.fatal("No SPDX file found for package %s, %s" % (pkg_name, pkg_hashfn))
853
854 pkg_doc, pkg_doc_sha1 = oe.sbom.read_doc(pkg_spdx_path)
855
856 for p in pkg_doc.packages:
857 if p.name == name:
858 pkg_ref = oe.spdx.SPDXExternalDocumentRef()
859 pkg_ref.externalDocumentId = "DocumentRef-%s" % pkg_doc.name
860 pkg_ref.spdxDocument = pkg_doc.documentNamespace
861 pkg_ref.checksum.algorithm = "SHA1"
862 pkg_ref.checksum.checksumValue = pkg_doc_sha1
863
864 doc.externalDocumentRefs.append(pkg_ref)
865 doc.add_relationship(image, "CONTAINS", "%s:%s" % (pkg_ref.externalDocumentId, p.SPDXID))
866 break
867 else:
868 bb.fatal("Unable to find package with name '%s' in SPDX file %s" % (name, pkg_spdx_path))
869
870 runtime_spdx_path = oe.sbom.doc_find_by_hashfn(deploy_dir_spdx, package_archs, "runtime-" + name, pkg_hashfn)
871 if not runtime_spdx_path:
872 bb.fatal("No runtime SPDX document found for %s, %s" % (name, pkg_hashfn))
873
874 runtime_doc, runtime_doc_sha1 = oe.sbom.read_doc(runtime_spdx_path)
875
876 runtime_ref = oe.spdx.SPDXExternalDocumentRef()
877 runtime_ref.externalDocumentId = "DocumentRef-%s" % runtime_doc.name
878 runtime_ref.spdxDocument = runtime_doc.documentNamespace
879 runtime_ref.checksum.algorithm = "SHA1"
880 runtime_ref.checksum.checksumValue = runtime_doc_sha1
881
882 # "OTHER" isn't ideal here, but I can't find a relationship that makes sense
883 doc.externalDocumentRefs.append(runtime_ref)
884 doc.add_relationship(
885 image,
886 "OTHER",
887 "%s:%s" % (runtime_ref.externalDocumentId, runtime_doc.SPDXID),
888 comment="Runtime dependencies for %s" % name
889 )
890 bb.utils.mkdirhier(spdx_workdir)
891 image_spdx_path = spdx_workdir / (rootfs_name + ".spdx.json")
892
893 with image_spdx_path.open("wb") as f:
894 doc.to_json(f, sort_keys=True, indent=get_json_indent(d))
895
896 num_threads = int(d.getVar("BB_NUMBER_THREADS"))
897
898 visited_docs = set()
899
900 index = {"documents": []}
901
902 spdx_tar_path = rootfs_deploydir / (rootfs_name + ".spdx.tar.zst")
903 with bb.compress.zstd.open(spdx_tar_path, "w", num_threads=num_threads) as f:
904 with tarfile.open(fileobj=f, mode="w|") as tar:
905 def collect_spdx_document(path):
906 nonlocal tar
907 nonlocal deploy_dir_spdx
908 nonlocal source_date_epoch
909 nonlocal index
910
911 if path in visited_docs:
912 return
913
914 visited_docs.add(path)
915
916 with path.open("rb") as f:
917 doc, sha1 = oe.sbom.read_doc(f)
918 f.seek(0)
919
920 if doc.documentNamespace in visited_docs:
921 return
922
923 bb.note("Adding SPDX document %s" % path)
924 visited_docs.add(doc.documentNamespace)
925 info = tar.gettarinfo(fileobj=f)
926
927 info.name = doc.name + ".spdx.json"
928 info.uid = 0
929 info.gid = 0
930 info.uname = "root"
931 info.gname = "root"
932
933 if source_date_epoch is not None and info.mtime > int(source_date_epoch):
934 info.mtime = int(source_date_epoch)
935
936 tar.addfile(info, f)
937
938 index["documents"].append({
939 "filename": info.name,
940 "documentNamespace": doc.documentNamespace,
941 "sha1": sha1,
942 })
943
944 for ref in doc.externalDocumentRefs:
945 ref_path = oe.sbom.doc_find_by_namespace(deploy_dir_spdx, package_archs, ref.spdxDocument)
946 if not ref_path:
947 bb.fatal("Cannot find any SPDX file for document %s" % ref.spdxDocument)
948 collect_spdx_document(ref_path)
949
950 collect_spdx_document(image_spdx_path)
951
952 index["documents"].sort(key=lambda x: x["filename"])
953
954 index_str = io.BytesIO(json.dumps(
955 index,
956 sort_keys=True,
957 indent=get_json_indent(d),
958 ).encode("utf-8"))
959
960 info = tarfile.TarInfo()
961 info.name = "index.json"
962 info.size = len(index_str.getvalue())
963 info.uid = 0
964 info.gid = 0
965 info.uname = "root"
966 info.gname = "root"
967
968 tar.addfile(info, fileobj=index_str)
969
970combine_spdx[vardepsexclude] += "BB_NUMBER_THREADS SPDX_MULTILIB_SSTATE_ARCHS"
diff --git a/meta/classes/create-spdx-3.0.bbclass b/meta/classes/create-spdx-3.0.bbclass
deleted file mode 100644
index a6d2d44e34..0000000000
--- a/meta/classes/create-spdx-3.0.bbclass
+++ /dev/null
@@ -1,206 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: GPL-2.0-only
5#
6
7inherit spdx-common
8
9SPDX_VERSION = "3.0.1"
10
11# The list of SPDX profiles generated documents will conform to
12SPDX_PROFILES ?= "core build software simpleLicensing security"
13
14SPDX_INCLUDE_BUILD_VARIABLES ??= "0"
15SPDX_INCLUDE_BUILD_VARIABLES[doc] = "If set to '1', the bitbake variables for a \
16 recipe will be included in the Build object. This will most likely result \
17 in non-reproducible SPDX output"
18
19SPDX_INCLUDE_BITBAKE_PARENT_BUILD ??= "0"
20SPDX_INCLUDE_BITBAKE_PARENT_BUILD[doc] = "Report the parent invocation of bitbake \
21 for each Build object. This allows you to know who invoked bitbake to perform \
22 a build, but will result in non-reproducible SPDX output."
23
24SPDX_PACKAGE_ADDITIONAL_PURPOSE ?= ""
25SPDX_PACKAGE_ADDITIONAL_PURPOSE[doc] = "The list of additional purposes to assign to \
26 the generated packages for a recipe. The primary purpose is always `install`. \
27 Packages overrides are allowed to override the additional purposes for \
28 individual packages."
29
30SPDX_IMAGE_PURPOSE ?= "filesystemImage"
31SPDX_IMAGE_PURPOSE[doc] = "The list of purposes to assign to the generated images. \
32 The first listed item will be the Primary Purpose and all additional items will \
33 be added as additional purposes"
34
35SPDX_SDK_PURPOSE ?= "install"
36SPDX_SDK_PURPOSE[doc] = "The list of purposes to assign to the generate SDK installer. \
37 The first listed item will be the Primary Purpose and all additional items will \
38 be added as additional purposes"
39
40SPDX_INCLUDE_VEX ??= "current"
41SPDX_INCLUDE_VEX[doc] = "Controls what VEX information is in the output. Set to \
42 'none' to disable all VEX data. Set to 'current' to only include VEX data \
43 for vulnerabilities not already fixed in the upstream source code \
44 (recommended). Set to 'all' to get all known historical vulnerabilities, \
45 including those already fixed upstream (warning: This can be large and \
46 slow)."
47
48SPDX_INCLUDE_TIMESTAMPS ?= "0"
49SPDX_INCLUDE_TIMESTAMPS[doc] = "Include time stamps in SPDX output. This is \
50 useful if you want to know when artifacts were produced and when builds \
51 occurred, but will result in non-reproducible SPDX output"
52
53SPDX_IMPORTS ??= ""
54SPDX_IMPORTS[doc] = "SPDX_IMPORTS is the base variable that describes how to \
55 reference external SPDX ids. Each import is defined as a key in this \
56 variable with a suffix to describe to as a suffix to look up more \
57 information about the import. Each key can have the following variables: \
58 SPDX_IMPORTS_<key>_spdxid: The Fully qualified SPDX ID of the object \
59 SPDX_IMPORTS_<key>_uri: The URI where the SPDX Document that contains \
60 the external object can be found. Optional but recommended \
61 SPDX_IMPORTS_<key>_hash_<hash>: The Checksum of the SPDX Document that \
62 contains the External ID. <hash> must be one the valid SPDX hashing \
63 algorithms, as described by the HashAlgorithm vocabulary in the\
64 SPDX 3 spec. Optional but recommended"
65
66# Agents
67# Bitbake variables can be used to describe an SPDX Agent that may be used
68# during the build. An Agent is specified using a set of variables which all
69# start with some common base name:
70#
71# <BASE>_name: The name of the Agent (required)
72# <BASE>_type: The type of Agent. Must be one of "person", "organization",
73# "software", or "agent" (the default if not specified)
74# <BASE>_comment: The comment for the Agent (optional)
75# <BASE>_id_<ID>: And External Identifier for the Agent. <ID> must be a valid
76# ExternalIdentifierType from the SPDX 3 spec. Commonly, an E-mail address
77# can be specified with <BASE>_id_email
78#
79# Alternatively, an Agent can be an external reference by referencing a key
80# in SPDX_IMPORTS like so:
81#
82# <BASE>_import = "<key>"
83#
84# Finally, the same agent described by another set of agent variables can be
85# referenced by specifying the basename of the variable that should be
86# referenced:
87#
88# SPDX_PACKAGE_SUPPLIER_ref = "SPDX_AUTHORS_openembedded"
89
90SPDX_AUTHORS ??= "openembedded"
91SPDX_AUTHORS[doc] = "A space separated list of the document authors. Each item \
92 is used to name a base variable like SPDX_AUTHORS_<AUTHOR> that \
93 describes the author."
94
95SPDX_AUTHORS_openembedded_name = "OpenEmbedded"
96SPDX_AUTHORS_openembedded_type = "organization"
97
98SPDX_BUILD_HOST[doc] = "The base variable name to describe the build host on \
99 which a build is running. Must be an SPDX_IMPORTS key. Requires \
100 SPDX_INCLUDE_BITBAKE_PARENT_BUILD. NOTE: Setting this will result in \
101 non-reproducible SPDX output"
102
103SPDX_INVOKED_BY[doc] = "The base variable name to describe the Agent that \
104 invoked the build, which builds will link to if specified. Requires \
105 SPDX_INCLUDE_BITBAKE_PARENT_BUILD. NOTE: Setting this will likely result in \
106 non-reproducible SPDX output"
107
108SPDX_ON_BEHALF_OF[doc] = "The base variable name to describe the Agent on who's \
109 behalf the invoking Agent (SPDX_INVOKED_BY) is running the build. Requires \
110 SPDX_INCLUDE_BITBAKE_PARENT_BUILD. NOTE: Setting this will likely result in \
111 non-reproducible SPDX output"
112
113SPDX_PACKAGE_SUPPLIER[doc] = "The base variable name to describe the Agent who \
114 is supplying artifacts produced by the build"
115
116SPDX_PACKAGE_VERSION ??= "${PV}"
117SPDX_PACKAGE_VERSION[doc] = "The version of a package, software_packageVersion \
118 in software_Package"
119
120SPDX_PACKAGE_URL ??= ""
121SPDX_PACKAGE_URL[doc] = "Provides a place for the SPDX data creator to record \
122the package URL string (in accordance with the Package URL specification) for \
123a software Package."
124
125IMAGE_CLASSES:append = " create-spdx-image-3.0"
126SDK_CLASSES += "create-spdx-sdk-3.0"
127
128oe.spdx30_tasks.set_timestamp_now[vardepsexclude] = "SPDX_INCLUDE_TIMESTAMPS"
129oe.spdx30_tasks.get_package_sources_from_debug[vardepsexclude] += "STAGING_KERNEL_DIR"
130oe.spdx30_tasks.collect_dep_objsets[vardepsexclude] = "SPDX_MULTILIB_SSTATE_ARCHS"
131
132
133# SPDX library code makes heavy use of classes, which bitbake cannot easily
134# parse out dependencies. As such, the library code files that make use of
135# classes are explicitly added as file checksum dependencies.
136SPDX3_DEP_FILES = "\
137 ${COREBASE}/meta/lib/oe/sbom30.py:True \
138 ${COREBASE}/meta/lib/oe/spdx30.py:True \
139 ${SPDX_LICENSES}:True \
140 "
141
142python do_create_spdx() {
143 import oe.spdx30_tasks
144 oe.spdx30_tasks.create_spdx(d)
145}
146do_create_spdx[vardeps] += "\
147 SPDX_INCLUDE_BITBAKE_PARENT_BUILD \
148 SPDX_PACKAGE_ADDITIONAL_PURPOSE \
149 SPDX_PROFILES \
150 SPDX_NAMESPACE_PREFIX \
151 SPDX_UUID_NAMESPACE \
152 "
153
154addtask do_create_spdx after \
155 do_collect_spdx_deps \
156 do_deploy_source_date_epoch \
157 do_populate_sysroot do_package do_packagedata \
158 before do_populate_sdk do_populate_sdk_ext do_build do_rm_work
159
160SSTATETASKS += "do_create_spdx"
161do_create_spdx[sstate-inputdirs] = "${SPDXDEPLOY}"
162do_create_spdx[sstate-outputdirs] = "${DEPLOY_DIR_SPDX}"
163do_create_spdx[file-checksums] += "${SPDX3_DEP_FILES}"
164
165python do_create_spdx_setscene () {
166 sstate_setscene(d)
167}
168addtask do_create_spdx_setscene
169
170do_create_spdx[dirs] = "${SPDXWORK}"
171do_create_spdx[cleandirs] = "${SPDXDEPLOY} ${SPDXWORK}"
172do_create_spdx[depends] += " \
173 ${PATCHDEPENDENCY} \
174 ${@create_spdx_source_deps(d)} \
175"
176
177python do_create_package_spdx() {
178 import oe.spdx30_tasks
179 oe.spdx30_tasks.create_package_spdx(d)
180}
181oe.spdx30_tasks.create_package_spdx[vardepsexclude] = "OVERRIDES"
182
183addtask do_create_package_spdx after do_create_spdx before do_build do_rm_work
184SSTATETASKS += "do_create_package_spdx"
185do_create_package_spdx[sstate-inputdirs] = "${SPDXRUNTIMEDEPLOY}"
186do_create_package_spdx[sstate-outputdirs] = "${DEPLOY_DIR_SPDX}"
187do_create_package_spdx[file-checksums] += "${SPDX3_DEP_FILES}"
188
189python do_create_package_spdx_setscene () {
190 sstate_setscene(d)
191}
192addtask do_create_package_spdx_setscene
193
194do_create_package_spdx[dirs] = "${SPDXRUNTIMEDEPLOY}"
195do_create_package_spdx[cleandirs] = "${SPDXRUNTIMEDEPLOY}"
196do_create_package_spdx[rdeptask] = "do_create_spdx"
197
198python spdx30_build_started_handler () {
199 import oe.spdx30_tasks
200 d = e.data.createCopy()
201 oe.spdx30_tasks.write_bitbake_spdx(d)
202}
203
204addhandler spdx30_build_started_handler
205spdx30_build_started_handler[eventmask] = "bb.event.BuildStarted"
206
diff --git a/meta/classes/create-spdx.bbclass b/meta/classes/create-spdx.bbclass
deleted file mode 100644
index b604973ae0..0000000000
--- a/meta/classes/create-spdx.bbclass
+++ /dev/null
@@ -1,8 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: GPL-2.0-only
5#
6# Include this class when you don't care what version of SPDX you get; it will
7# be updated to the latest stable version that is supported
8inherit create-spdx-3.0
diff --git a/meta/classes/cve-check.bbclass b/meta/classes/cve-check.bbclass
deleted file mode 100644
index c63ebd56e1..0000000000
--- a/meta/classes/cve-check.bbclass
+++ /dev/null
@@ -1,570 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# This class is used to check recipes against public CVEs.
8#
9# In order to use this class just inherit the class in the
10# local.conf file and it will add the cve_check task for
11# every recipe. The task can be used per recipe, per image,
12# or using the special cases "world" and "universe". The
13# cve_check task will print a warning for every unpatched
14# CVE found and generate a file in the recipe WORKDIR/cve
15# directory. If an image is build it will generate a report
16# in DEPLOY_DIR_IMAGE for all the packages used.
17#
18# Example:
19# bitbake -c cve_check openssl
20# bitbake core-image-sato
21# bitbake -k -c cve_check universe
22#
23# DISCLAIMER
24#
25# This class/tool is meant to be used as support and not
26# the only method to check against CVEs. Running this tool
27# doesn't guarantee your packages are free of CVEs.
28
29# The product name that the CVE database uses defaults to BPN, but may need to
30# be overriden per recipe (for example tiff.bb sets CVE_PRODUCT=libtiff).
31CVE_PRODUCT ??= "${BPN}"
32CVE_VERSION ??= "${PV}"
33
34# Possible database sources: NVD1, NVD2, FKIE
35NVD_DB_VERSION ?= "FKIE"
36
37# Use different file names for each database source, as they synchronize at different moments, so may be slightly different
38CVE_CHECK_DB_FILENAME ?= "${@'nvdcve_2-2.db' if d.getVar('NVD_DB_VERSION') == 'NVD2' else 'nvdcve_1-3.db' if d.getVar('NVD_DB_VERSION') == 'NVD1' else 'nvdfkie_1-1.db'}"
39CVE_CHECK_DB_FETCHER ?= "${@'cve-update-nvd2-native' if d.getVar('NVD_DB_VERSION') == 'NVD2' else 'cve-update-db-native'}"
40CVE_CHECK_DB_DIR ?= "${STAGING_DIR}/CVE_CHECK"
41CVE_CHECK_DB_FILE ?= "${CVE_CHECK_DB_DIR}/${CVE_CHECK_DB_FILENAME}"
42CVE_CHECK_DB_FILE_LOCK ?= "${CVE_CHECK_DB_FILE}.lock"
43
44CVE_CHECK_SUMMARY_DIR ?= "${LOG_DIR}/cve"
45CVE_CHECK_SUMMARY_FILE_NAME ?= "cve-summary"
46CVE_CHECK_SUMMARY_FILE_NAME_JSON = "cve-summary.json"
47CVE_CHECK_SUMMARY_INDEX_PATH = "${CVE_CHECK_SUMMARY_DIR}/cve-summary-index.txt"
48
49CVE_CHECK_LOG_JSON ?= "${T}/cve.json"
50
51CVE_CHECK_DIR ??= "${DEPLOY_DIR}/cve"
52CVE_CHECK_RECIPE_FILE_JSON ?= "${CVE_CHECK_DIR}/${PN}_cve.json"
53CVE_CHECK_MANIFEST_JSON_SUFFIX ?= "json"
54CVE_CHECK_MANIFEST_JSON ?= "${IMGDEPLOYDIR}/${IMAGE_NAME}.${CVE_CHECK_MANIFEST_JSON_SUFFIX}"
55CVE_CHECK_COPY_FILES ??= "1"
56CVE_CHECK_CREATE_MANIFEST ??= "1"
57
58# Report Patched or Ignored CVEs
59CVE_CHECK_REPORT_PATCHED ??= "1"
60
61CVE_CHECK_SHOW_WARNINGS ??= "1"
62
63# Provide JSON output
64CVE_CHECK_FORMAT_JSON ??= "1"
65
66# Check for packages without CVEs (no issues or missing product name)
67CVE_CHECK_COVERAGE ??= "1"
68
69# Skip CVE Check for packages (PN)
70CVE_CHECK_SKIP_RECIPE ?= ""
71
72# Replace NVD DB check status for a given CVE. Each of CVE has to be mentioned
73# separately with optional detail and description for this status.
74#
75# CVE_STATUS[CVE-1234-0001] = "not-applicable-platform: Issue only applies on Windows"
76# CVE_STATUS[CVE-1234-0002] = "fixed-version: Fixed externally"
77#
78# Settings the same status and reason for multiple CVEs is possible
79# via CVE_STATUS_GROUPS variable.
80#
81# CVE_STATUS_GROUPS = "CVE_STATUS_WIN CVE_STATUS_PATCHED"
82#
83# CVE_STATUS_WIN = "CVE-1234-0001 CVE-1234-0003"
84# CVE_STATUS_WIN[status] = "not-applicable-platform: Issue only applies on Windows"
85# CVE_STATUS_PATCHED = "CVE-1234-0002 CVE-1234-0004"
86# CVE_STATUS_PATCHED[status] = "fixed-version: Fixed externally"
87#
88# All possible CVE statuses could be found in cve-check-map.conf
89# CVE_CHECK_STATUSMAP[not-applicable-platform] = "Ignored"
90# CVE_CHECK_STATUSMAP[fixed-version] = "Patched"
91#
92# CVE_CHECK_IGNORE is deprecated and CVE_STATUS has to be used instead.
93# Keep CVE_CHECK_IGNORE until other layers migrate to new variables
94CVE_CHECK_IGNORE ?= ""
95
96# Layers to be excluded
97CVE_CHECK_LAYER_EXCLUDELIST ??= ""
98
99# Layers to be included
100CVE_CHECK_LAYER_INCLUDELIST ??= ""
101
102
103# set to "alphabetical" for version using single alphabetical character as increment release
104CVE_VERSION_SUFFIX ??= ""
105
106python () {
107 from oe.cve_check import extend_cve_status
108 extend_cve_status(d)
109
110 nvd_database_type = d.getVar("NVD_DB_VERSION")
111 if nvd_database_type not in ("NVD1", "NVD2", "FKIE"):
112 bb.erroronce("Malformed NVD_DB_VERSION, must be one of: NVD1, NVD2, FKIE. Defaulting to NVD2")
113 d.setVar("NVD_DB_VERSION", "NVD2")
114}
115
116def generate_json_report(d, out_path, link_path):
117 if os.path.exists(d.getVar("CVE_CHECK_SUMMARY_INDEX_PATH")):
118 import json
119 from oe.cve_check import cve_check_merge_jsons, update_symlinks
120
121 bb.note("Generating JSON CVE summary")
122 index_file = d.getVar("CVE_CHECK_SUMMARY_INDEX_PATH")
123 summary = {"version":"1", "package": []}
124 with open(index_file) as f:
125 filename = f.readline()
126 while filename:
127 with open(filename.rstrip()) as j:
128 data = json.load(j)
129 cve_check_merge_jsons(summary, data)
130 filename = f.readline()
131
132 summary["package"].sort(key=lambda d: d['name'])
133
134 with open(out_path, "w") as f:
135 json.dump(summary, f, indent=2)
136
137 update_symlinks(out_path, link_path)
138
139python cve_save_summary_handler () {
140 import shutil
141 import datetime
142 from oe.cve_check import update_symlinks
143
144 cve_summary_name = d.getVar("CVE_CHECK_SUMMARY_FILE_NAME")
145 cvelogpath = d.getVar("CVE_CHECK_SUMMARY_DIR")
146 bb.utils.mkdirhier(cvelogpath)
147
148 timestamp = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
149
150 if d.getVar("CVE_CHECK_FORMAT_JSON") == "1":
151 json_summary_link_name = os.path.join(cvelogpath, d.getVar("CVE_CHECK_SUMMARY_FILE_NAME_JSON"))
152 json_summary_name = os.path.join(cvelogpath, "%s-%s.json" % (cve_summary_name, timestamp))
153 generate_json_report(d, json_summary_name, json_summary_link_name)
154 bb.plain("Complete CVE JSON report summary created at: %s" % json_summary_link_name)
155}
156
157addhandler cve_save_summary_handler
158cve_save_summary_handler[eventmask] = "bb.event.BuildCompleted"
159
160python do_cve_check () {
161 """
162 Check recipe for patched and unpatched CVEs
163 """
164 from oe.cve_check import get_patched_cves
165
166 with bb.utils.fileslocked([d.getVar("CVE_CHECK_DB_FILE_LOCK")], shared=True):
167 if os.path.exists(d.getVar("CVE_CHECK_DB_FILE")):
168 try:
169 patched_cves = get_patched_cves(d)
170 except FileNotFoundError:
171 bb.fatal("Failure in searching patches")
172 cve_data, status = check_cves(d, patched_cves)
173 if len(cve_data) or (d.getVar("CVE_CHECK_COVERAGE") == "1" and status):
174 get_cve_info(d, cve_data)
175 cve_write_data(d, cve_data, status)
176 else:
177 bb.note("No CVE database found, skipping CVE check")
178
179}
180
181addtask cve_check before do_build
182do_cve_check[depends] = "${CVE_CHECK_DB_FETCHER}:do_unpack"
183do_cve_check[nostamp] = "1"
184
185python cve_check_cleanup () {
186 """
187 Delete the file used to gather all the CVE information.
188 """
189 bb.utils.remove(e.data.getVar("CVE_CHECK_SUMMARY_INDEX_PATH"))
190}
191
192addhandler cve_check_cleanup
193cve_check_cleanup[eventmask] = "bb.event.BuildCompleted"
194
195python cve_check_write_rootfs_manifest () {
196 """
197 Create CVE manifest when building an image
198 """
199
200 import shutil
201 import json
202 from oe.rootfs import image_list_installed_packages
203 from oe.cve_check import cve_check_merge_jsons, update_symlinks
204
205 if d.getVar("CVE_CHECK_COPY_FILES") == "1":
206 deploy_file_json = d.getVar("CVE_CHECK_RECIPE_FILE_JSON")
207 if os.path.exists(deploy_file_json):
208 bb.utils.remove(deploy_file_json)
209
210 # Create a list of relevant recipies
211 recipies = set()
212 for pkg in list(image_list_installed_packages(d)):
213 pkg_info = os.path.join(d.getVar('PKGDATA_DIR'),
214 'runtime-reverse', pkg)
215 pkg_data = oe.packagedata.read_pkgdatafile(pkg_info)
216 recipies.add(pkg_data["PN"])
217
218 bb.note("Writing rootfs CVE manifest")
219 deploy_dir = d.getVar("IMGDEPLOYDIR")
220 link_name = d.getVar("IMAGE_LINK_NAME")
221
222 json_data = {"version":"1", "package": []}
223 text_data = ""
224 enable_json = d.getVar("CVE_CHECK_FORMAT_JSON") == "1"
225
226 save_pn = d.getVar("PN")
227
228 for pkg in recipies:
229 # To be able to use the CVE_CHECK_RECIPE_FILE_JSON variable we have to evaluate
230 # it with the different PN names set each time.
231 d.setVar("PN", pkg)
232
233 if enable_json:
234 pkgfilepath = d.getVar("CVE_CHECK_RECIPE_FILE_JSON")
235 if os.path.exists(pkgfilepath):
236 with open(pkgfilepath) as j:
237 data = json.load(j)
238 cve_check_merge_jsons(json_data, data)
239
240 d.setVar("PN", save_pn)
241
242 if enable_json:
243 manifest_name_suffix = d.getVar("CVE_CHECK_MANIFEST_JSON_SUFFIX")
244 manifest_name = d.getVar("CVE_CHECK_MANIFEST_JSON")
245
246 with open(manifest_name, "w") as f:
247 json.dump(json_data, f, indent=2)
248
249 if link_name:
250 link_path = os.path.join(deploy_dir, "%s.%s" % (link_name, manifest_name_suffix))
251 update_symlinks(manifest_name, link_path)
252
253 bb.plain("Image CVE JSON report stored in: %s" % manifest_name)
254}
255
256ROOTFS_POSTPROCESS_COMMAND:prepend = "${@'cve_check_write_rootfs_manifest ' if d.getVar('CVE_CHECK_CREATE_MANIFEST') == '1' else ''}"
257do_rootfs[recrdeptask] += "${@'do_cve_check' if d.getVar('CVE_CHECK_CREATE_MANIFEST') == '1' else ''}"
258do_populate_sdk[recrdeptask] += "${@'do_cve_check' if d.getVar('CVE_CHECK_CREATE_MANIFEST') == '1' else ''}"
259
260def cve_is_ignored(d, cve_data, cve):
261 if cve not in cve_data:
262 return False
263 if cve_data[cve]['abbrev-status'] == "Ignored":
264 return True
265 return False
266
267def cve_is_patched(d, cve_data, cve):
268 if cve not in cve_data:
269 return False
270 if cve_data[cve]['abbrev-status'] == "Patched":
271 return True
272 return False
273
274def cve_update(d, cve_data, cve, entry):
275 # If no entry, just add it
276 if cve not in cve_data:
277 cve_data[cve] = entry
278 return
279 # If we are updating, there might be change in the status
280 bb.debug(1, "Trying CVE entry update for %s from %s to %s" % (cve, cve_data[cve]['abbrev-status'], entry['abbrev-status']))
281 if cve_data[cve]['abbrev-status'] == "Unknown":
282 cve_data[cve] = entry
283 return
284 if cve_data[cve]['abbrev-status'] == entry['abbrev-status']:
285 return
286 # Update like in {'abbrev-status': 'Patched', 'status': 'version-not-in-range'} to {'abbrev-status': 'Unpatched', 'status': 'version-in-range'}
287 if entry['abbrev-status'] == "Unpatched" and cve_data[cve]['abbrev-status'] == "Patched":
288 if entry['status'] == "version-in-range" and cve_data[cve]['status'] == "version-not-in-range":
289 # New result from the scan, vulnerable
290 cve_data[cve] = entry
291 bb.debug(1, "CVE entry %s update from Patched to Unpatched from the scan result" % cve)
292 return
293 if entry['abbrev-status'] == "Patched" and cve_data[cve]['abbrev-status'] == "Unpatched":
294 if entry['status'] == "version-not-in-range" and cve_data[cve]['status'] == "version-in-range":
295 # Range does not match the scan, but we already have a vulnerable match, ignore
296 bb.debug(1, "CVE entry %s update from Patched to Unpatched from the scan result - not applying" % cve)
297 return
298 # If we have an "Ignored", it has a priority
299 if cve_data[cve]['abbrev-status'] == "Ignored":
300 bb.debug(1, "CVE %s not updating because Ignored" % cve)
301 return
302 bb.warn("Unhandled CVE entry update for %s from %s to %s" % (cve, cve_data[cve], entry))
303
304def check_cves(d, cve_data):
305 """
306 Connect to the NVD database and find unpatched cves.
307 """
308 from oe.cve_check import Version, convert_cve_version, decode_cve_status
309
310 pn = d.getVar("PN")
311 real_pv = d.getVar("PV")
312 suffix = d.getVar("CVE_VERSION_SUFFIX")
313
314 cves_status = []
315 cves_in_recipe = False
316 # CVE_PRODUCT can contain more than one product (eg. curl/libcurl)
317 products = d.getVar("CVE_PRODUCT").split()
318 # If this has been unset then we're not scanning for CVEs here (for example, image recipes)
319 if not products:
320 return ([], [])
321 pv = d.getVar("CVE_VERSION").split("+git")[0]
322
323 # If the recipe has been skipped/ignored we return empty lists
324 if pn in d.getVar("CVE_CHECK_SKIP_RECIPE").split():
325 bb.note("Recipe has been skipped by cve-check")
326 return ([], [])
327
328 import sqlite3
329 db_file = d.expand("file:${CVE_CHECK_DB_FILE}?mode=ro")
330 conn = sqlite3.connect(db_file, uri=True)
331
332 # For each of the known product names (e.g. curl has CPEs using curl and libcurl)...
333 for product in products:
334 cves_in_product = False
335 if ":" in product:
336 vendor, product = product.split(":", 1)
337 else:
338 vendor = "%"
339
340 # Find all relevant CVE IDs.
341 cve_cursor = conn.execute("SELECT DISTINCT ID FROM PRODUCTS WHERE PRODUCT IS ? AND VENDOR LIKE ?", (product, vendor))
342 for cverow in cve_cursor:
343 cve = cverow[0]
344
345 # Write status once only for each product
346 if not cves_in_product:
347 cves_status.append([product, True])
348 cves_in_product = True
349 cves_in_recipe = True
350
351 if cve_is_ignored(d, cve_data, cve):
352 bb.note("%s-%s ignores %s" % (product, pv, cve))
353 continue
354 elif cve_is_patched(d, cve_data, cve):
355 bb.note("%s has been patched" % (cve))
356 continue
357
358 vulnerable = False
359 ignored = False
360
361 product_cursor = conn.execute("SELECT * FROM PRODUCTS WHERE ID IS ? AND PRODUCT IS ? AND VENDOR LIKE ?", (cve, product, vendor))
362 for row in product_cursor:
363 (_, _, _, version_start, operator_start, version_end, operator_end) = row
364 #bb.debug(2, "Evaluating row " + str(row))
365 if cve_is_ignored(d, cve_data, cve):
366 ignored = True
367
368 version_start = convert_cve_version(version_start)
369 version_end = convert_cve_version(version_end)
370
371 if (operator_start == '=' and pv == version_start) or version_start == '-':
372 vulnerable = True
373 else:
374 if operator_start:
375 try:
376 vulnerable_start = (operator_start == '>=' and Version(pv,suffix) >= Version(version_start,suffix))
377 vulnerable_start |= (operator_start == '>' and Version(pv,suffix) > Version(version_start,suffix))
378 except:
379 bb.warn("%s: Failed to compare %s %s %s for %s" %
380 (product, pv, operator_start, version_start, cve))
381 vulnerable_start = False
382 else:
383 vulnerable_start = False
384
385 if operator_end:
386 try:
387 vulnerable_end = (operator_end == '<=' and Version(pv,suffix) <= Version(version_end,suffix) )
388 vulnerable_end |= (operator_end == '<' and Version(pv,suffix) < Version(version_end,suffix) )
389 except:
390 bb.warn("%s: Failed to compare %s %s %s for %s" %
391 (product, pv, operator_end, version_end, cve))
392 vulnerable_end = False
393 else:
394 vulnerable_end = False
395
396 if operator_start and operator_end:
397 vulnerable = vulnerable_start and vulnerable_end
398 else:
399 vulnerable = vulnerable_start or vulnerable_end
400
401 if vulnerable:
402 if ignored:
403 bb.note("%s is ignored in %s-%s" % (cve, pn, real_pv))
404 cve_update(d, cve_data, cve, {"abbrev-status": "Ignored"})
405 else:
406 bb.note("%s-%s is vulnerable to %s" % (pn, real_pv, cve))
407 cve_update(d, cve_data, cve, {"abbrev-status": "Unpatched", "status": "version-in-range"})
408 break
409 product_cursor.close()
410
411 if not vulnerable:
412 bb.note("%s-%s is not vulnerable to %s" % (pn, real_pv, cve))
413 cve_update(d, cve_data, cve, {"abbrev-status": "Patched", "status": "version-not-in-range"})
414 cve_cursor.close()
415
416 if not cves_in_product:
417 bb.note("No CVE records found for product %s, pn %s" % (product, pn))
418 cves_status.append([product, False])
419
420 conn.close()
421
422 if not cves_in_recipe:
423 bb.note("No CVE records for products in recipe %s" % (pn))
424
425 if d.getVar("CVE_CHECK_SHOW_WARNINGS") == "1":
426 unpatched_cves = [cve for cve in cve_data if cve_data[cve]["abbrev-status"] == "Unpatched"]
427 if unpatched_cves:
428 bb.warn("Found unpatched CVE (%s)" % " ".join(unpatched_cves))
429
430 return (cve_data, cves_status)
431
432def get_cve_info(d, cve_data):
433 """
434 Get CVE information from the database.
435 """
436
437 import sqlite3
438
439 db_file = d.expand("file:${CVE_CHECK_DB_FILE}?mode=ro")
440 conn = sqlite3.connect(db_file, uri=True)
441
442 for cve in cve_data:
443 cursor = conn.execute("SELECT * FROM NVD WHERE ID IS ?", (cve,))
444 for row in cursor:
445 # The CVE itdelf has been added already
446 if row[0] not in cve_data:
447 bb.note("CVE record %s not present" % row[0])
448 continue
449 #cve_data[row[0]] = {}
450 cve_data[row[0]]["NVD-summary"] = row[1]
451 cve_data[row[0]]["NVD-scorev2"] = row[2]
452 cve_data[row[0]]["NVD-scorev3"] = row[3]
453 cve_data[row[0]]["NVD-scorev4"] = row[4]
454 cve_data[row[0]]["NVD-modified"] = row[5]
455 cve_data[row[0]]["NVD-vector"] = row[6]
456 cve_data[row[0]]["NVD-vectorString"] = row[7]
457 cursor.close()
458 conn.close()
459
460def cve_check_write_json_output(d, output, direct_file, deploy_file, manifest_file):
461 """
462 Write CVE information in the JSON format: to WORKDIR; and to
463 CVE_CHECK_DIR, if CVE manifest if enabled, write fragment
464 files that will be assembled at the end in cve_check_write_rootfs_manifest.
465 """
466
467 import json
468
469 write_string = json.dumps(output, indent=2)
470 with open(direct_file, "w") as f:
471 bb.note("Writing file %s with CVE information" % direct_file)
472 f.write(write_string)
473
474 if d.getVar("CVE_CHECK_COPY_FILES") == "1":
475 bb.utils.mkdirhier(os.path.dirname(deploy_file))
476 with open(deploy_file, "w") as f:
477 f.write(write_string)
478
479 if d.getVar("CVE_CHECK_CREATE_MANIFEST") == "1":
480 cvelogpath = d.getVar("CVE_CHECK_SUMMARY_DIR")
481 index_path = d.getVar("CVE_CHECK_SUMMARY_INDEX_PATH")
482 bb.utils.mkdirhier(cvelogpath)
483 fragment_file = os.path.basename(deploy_file)
484 fragment_path = os.path.join(cvelogpath, fragment_file)
485 with open(fragment_path, "w") as f:
486 f.write(write_string)
487 with open(index_path, "a+") as f:
488 f.write("%s\n" % fragment_path)
489
490def cve_write_data_json(d, cve_data, cve_status):
491 """
492 Prepare CVE data for the JSON format, then write it.
493 """
494
495 output = {"version":"1", "package": []}
496 nvd_link = "https://nvd.nist.gov/vuln/detail/"
497
498 fdir_name = d.getVar("FILE_DIRNAME")
499 layer = fdir_name.split("/")[-3]
500
501 include_layers = d.getVar("CVE_CHECK_LAYER_INCLUDELIST").split()
502 exclude_layers = d.getVar("CVE_CHECK_LAYER_EXCLUDELIST").split()
503
504 report_all = d.getVar("CVE_CHECK_REPORT_PATCHED") == "1"
505
506 if exclude_layers and layer in exclude_layers:
507 return
508
509 if include_layers and layer not in include_layers:
510 return
511
512 product_data = []
513 for s in cve_status:
514 p = {"product": s[0], "cvesInRecord": "Yes"}
515 if s[1] == False:
516 p["cvesInRecord"] = "No"
517 product_data.append(p)
518
519 package_version = "%s%s" % (d.getVar("EXTENDPE"), d.getVar("PV"))
520 package_data = {
521 "name" : d.getVar("PN"),
522 "layer" : layer,
523 "version" : package_version,
524 "products": product_data
525 }
526
527 cve_list = []
528
529 for cve in sorted(cve_data):
530 if not report_all and (cve_data[cve]["abbrev-status"] == "Patched" or cve_data[cve]["abbrev-status"] == "Ignored"):
531 continue
532 issue_link = "%s%s" % (nvd_link, cve)
533
534 cve_item = {
535 "id" : cve,
536 "status" : cve_data[cve]["abbrev-status"],
537 "link": issue_link,
538 }
539 if 'NVD-summary' in cve_data[cve]:
540 cve_item["summary"] = cve_data[cve]["NVD-summary"]
541 cve_item["scorev2"] = cve_data[cve]["NVD-scorev2"]
542 cve_item["scorev3"] = cve_data[cve]["NVD-scorev3"]
543 cve_item["scorev4"] = cve_data[cve]["NVD-scorev4"]
544 cve_item["modified"] = cve_data[cve]["NVD-modified"]
545 cve_item["vector"] = cve_data[cve]["NVD-vector"]
546 cve_item["vectorString"] = cve_data[cve]["NVD-vectorString"]
547 if 'status' in cve_data[cve]:
548 cve_item["detail"] = cve_data[cve]["status"]
549 if 'justification' in cve_data[cve]:
550 cve_item["description"] = cve_data[cve]["justification"]
551 if 'resource' in cve_data[cve]:
552 cve_item["patch-file"] = cve_data[cve]["resource"]
553 cve_list.append(cve_item)
554
555 package_data["issue"] = cve_list
556 output["package"].append(package_data)
557
558 direct_file = d.getVar("CVE_CHECK_LOG_JSON")
559 deploy_file = d.getVar("CVE_CHECK_RECIPE_FILE_JSON")
560 manifest_file = d.getVar("CVE_CHECK_SUMMARY_FILE_NAME_JSON")
561
562 cve_check_write_json_output(d, output, direct_file, deploy_file, manifest_file)
563
564def cve_write_data(d, cve_data, status):
565 """
566 Write CVE data in each enabled format.
567 """
568
569 if d.getVar("CVE_CHECK_FORMAT_JSON") == "1":
570 cve_write_data_json(d, cve_data, status)
diff --git a/meta/classes/devtool-source.bbclass b/meta/classes/devtool-source.bbclass
deleted file mode 100644
index 2e0070486b..0000000000
--- a/meta/classes/devtool-source.bbclass
+++ /dev/null
@@ -1,192 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# Development tool - source extraction helper class
8#
9# NOTE: this class is intended for use by devtool and should not be
10# inherited manually.
11#
12# Copyright (C) 2014-2017 Intel Corporation
13#
14# This program is free software; you can redistribute it and/or modify
15# it under the terms of the GNU General Public License version 2 as
16# published by the Free Software Foundation.
17#
18# This program is distributed in the hope that it will be useful,
19# but WITHOUT ANY WARRANTY; without even the implied warranty of
20# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21# GNU General Public License for more details.
22#
23# You should have received a copy of the GNU General Public License along
24# with this program; if not, write to the Free Software Foundation, Inc.,
25# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26
27
28DEVTOOL_TEMPDIR ?= ""
29
30python() {
31 tempdir = d.getVar('DEVTOOL_TEMPDIR')
32
33 if not tempdir:
34 bb.fatal('devtool-source class is for internal use by devtool only')
35
36 # Make a subdir so we guard against WORKDIR==S
37 workdir = os.path.join(tempdir, 'workdir')
38 d.setVar('WORKDIR', workdir)
39 if not d.getVar('S').startswith(workdir):
40 # Usually a shared workdir recipe (kernel, gcc)
41 # Try to set a reasonable default
42 if bb.data.inherits_class('kernel', d):
43 d.setVar('S', '${WORKDIR}/source')
44 else:
45 d.setVar('S', '${WORKDIR}/%s' % os.path.basename(d.getVar('S')))
46 if bb.data.inherits_class('kernel', d):
47 # We don't want to move the source to STAGING_KERNEL_DIR here
48 d.setVar('STAGING_KERNEL_DIR', '${S}')
49
50 d.setVar('STAMPS_DIR', os.path.join(tempdir, 'stamps'))
51 d.setVar('T', os.path.join(tempdir, 'temp'))
52
53 # Hook in pre/postfuncs
54 is_kernel_yocto = bb.data.inherits_class('kernel-yocto', d)
55 if is_kernel_yocto:
56 unpacktask = 'do_kernel_checkout'
57 d.appendVarFlag('do_configure', 'postfuncs', ' devtool_post_configure')
58 else:
59 unpacktask = 'do_unpack'
60 d.appendVarFlag(unpacktask, 'postfuncs', ' devtool_post_unpack')
61 d.appendVarFlag('do_patch', 'postfuncs', ' devtool_post_patch')
62
63 # NOTE: in order for the patch stuff to be fully functional,
64 # PATCHTOOL and PATCH_COMMIT_FUNCTIONS need to be set; we can't
65 # do that here because we can't guarantee the order of the anonymous
66 # functions, so it gets done in the bbappend we create.
67}
68
69
70python devtool_post_unpack() {
71 import oe.recipeutils
72 import shutil
73 sys.path.insert(0, os.path.join(d.getVar('COREBASE'), 'scripts', 'lib'))
74 import scriptutils
75 from devtool import setup_git_repo
76
77 tempdir = d.getVar('DEVTOOL_TEMPDIR')
78 workdir = d.getVar('WORKDIR')
79 unpackdir = d.getVar('UNPACKDIR')
80 srcsubdir = d.getVar('S')
81
82 # Add locally copied files to gitignore as we add back to the metadata directly
83 local_files = oe.recipeutils.get_recipe_local_files(d)
84 srcabspath = os.path.abspath(srcsubdir)
85 local_files = [fname for fname in local_files if
86 os.path.exists(os.path.join(unpackdir, fname)) and
87 srcabspath == unpackdir]
88 if local_files:
89 with open(os.path.join(tempdir, '.gitignore'), 'a+') as f:
90 f.write('# Ignore local files, by default. Remove following lines'
91 'if you want to commit the directory to Git\n')
92 for fname in local_files:
93 f.write('%s\n' % fname)
94
95 if srcsubdir.startswith(unpackdir) and os.path.dirname(srcsubdir) != unpackdir:
96 # Handle if S is set to a subdirectory of the source
97 srcsubdir = os.path.normpath(os.path.join(unpackdir, os.path.relpath(srcsubdir, unpackdir).split(os.sep)[0]))
98
99 scriptutils.git_convert_standalone_clone(srcsubdir)
100
101 # Make sure that srcsubdir exists
102 bb.utils.mkdirhier(srcsubdir)
103 if not os.listdir(srcsubdir):
104 bb.warn("No source unpacked to S - either the %s recipe "
105 "doesn't use any source or the correct source "
106 "directory could not be determined" % d.getVar('PN'))
107
108 devbranch = d.getVar('DEVTOOL_DEVBRANCH')
109 setup_git_repo(srcsubdir, d.getVar('PV'), devbranch, d=d)
110
111 (stdout, _) = bb.process.run('git rev-parse HEAD', cwd=srcsubdir)
112 initial_rev = stdout.rstrip()
113 with open(os.path.join(tempdir, 'initial_rev'), 'w') as f:
114 f.write(initial_rev)
115
116 with open(os.path.join(tempdir, 'srcsubdir'), 'w') as f:
117 f.write(srcsubdir)
118}
119
120python devtool_post_patch() {
121 import shutil
122 tempdir = d.getVar('DEVTOOL_TEMPDIR')
123 with open(os.path.join(tempdir, 'srcsubdir'), 'r') as f:
124 srcsubdir = f.read()
125 with open(os.path.join(tempdir, 'initial_rev'), 'r') as f:
126 initial_rev = f.read()
127
128 def rm_patches():
129 patches_dir = os.path.join(srcsubdir, 'patches')
130 if os.path.exists(patches_dir):
131 shutil.rmtree(patches_dir)
132 # Restore any "patches" directory that was actually part of the source tree
133 try:
134 bb.process.run('git checkout -- patches', cwd=srcsubdir)
135 except bb.process.ExecutionError:
136 pass
137
138 extra_overrides = d.getVar('DEVTOOL_EXTRA_OVERRIDES')
139 if extra_overrides:
140 extra_overrides = set(extra_overrides.split(':'))
141 devbranch = d.getVar('DEVTOOL_DEVBRANCH')
142 default_overrides = d.getVar('OVERRIDES').split(':')
143 no_overrides = []
144 # First, we may have some overrides that are referred to in the recipe set in
145 # our configuration, so we need to make a branch that excludes those
146 for override in default_overrides:
147 if override not in extra_overrides:
148 no_overrides.append(override)
149 if default_overrides != no_overrides:
150 # Some overrides are active in the current configuration, so
151 # we need to create a branch where none of the overrides are active
152 bb.process.run('git checkout %s -b devtool-no-overrides' % initial_rev, cwd=srcsubdir)
153 # Run do_patch function with the override applied
154 localdata = bb.data.createCopy(d)
155 localdata.setVar('OVERRIDES', ':'.join(no_overrides))
156 localdata.setVar('FILESOVERRIDES', ':'.join(no_overrides))
157 bb.build.exec_func('do_patch', localdata)
158 rm_patches()
159 # Now we need to reconcile the dev branch with the no-overrides one
160 # (otherwise we'd likely be left with identical commits that have different hashes)
161 bb.process.run('git checkout %s' % devbranch, cwd=srcsubdir)
162 bb.process.run('git rebase devtool-no-overrides', cwd=srcsubdir)
163 else:
164 bb.process.run('git checkout %s -b devtool-no-overrides' % devbranch, cwd=srcsubdir)
165
166 for override in extra_overrides:
167 localdata = bb.data.createCopy(d)
168 if override in default_overrides:
169 bb.process.run('git branch devtool-override-%s %s' % (override, devbranch), cwd=srcsubdir)
170 else:
171 # Reset back to the initial commit on a new branch
172 bb.process.run('git checkout %s -b devtool-override-%s' % (initial_rev, override), cwd=srcsubdir)
173 # Run do_patch function with the override applied
174 localdata.setVar('OVERRIDES', ':'.join(no_overrides + [override]))
175 localdata.setVar('FILESOVERRIDES', ':'.join(no_overrides + [override]))
176 bb.build.exec_func('do_patch', localdata)
177 rm_patches()
178 # Now we need to reconcile the new branch with the no-overrides one
179 # (otherwise we'd likely be left with identical commits that have different hashes)
180 bb.process.run('git rebase devtool-no-overrides', cwd=srcsubdir)
181 bb.process.run('git checkout %s' % devbranch, cwd=srcsubdir)
182 bb.process.run('git tag -f --no-sign devtool-patched', cwd=srcsubdir)
183 if os.path.exists(os.path.join(srcsubdir, '.gitmodules')):
184 bb.process.run('git submodule foreach --recursive "git tag -f --no-sign devtool-patched"', cwd=srcsubdir)
185
186}
187
188python devtool_post_configure() {
189 import shutil
190 tempdir = d.getVar('DEVTOOL_TEMPDIR')
191 shutil.copy2(os.path.join(d.getVar('B'), '.config'), tempdir)
192}
diff --git a/meta/classes/distrooverrides.bbclass b/meta/classes/distrooverrides.bbclass
deleted file mode 100644
index 8d9d7cda7d..0000000000
--- a/meta/classes/distrooverrides.bbclass
+++ /dev/null
@@ -1,38 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# Turns certain DISTRO_FEATURES into overrides with the same
8# name plus a df- prefix. Ensures that these special
9# distro features remain set also for native and nativesdk
10# recipes, so that these overrides can also be used there.
11#
12# This makes it simpler to write .bbappends that only change the
13# task signatures of the recipe if the change is really enabled,
14# for example with:
15# do_install:append:df-my-feature () { ... }
16# where "my-feature" is a DISTRO_FEATURE.
17#
18# The class is meant to be used in a layer.conf or distro
19# .inc file with:
20# INHERIT += "distrooverrides"
21# DISTRO_FEATURES_OVERRIDES += "my-feature"
22#
23# Beware that this part of OVERRIDES changes during parsing, so usage
24# of these overrides should be limited to .bb and .bbappend files,
25# because then DISTRO_FEATURES is final.
26
27DISTRO_FEATURES_OVERRIDES ?= ""
28DISTRO_FEATURES_OVERRIDES[doc] = "A space-separated list of <feature> entries. \
29Each entry is added to OVERRIDES as df-<feature> if <feature> is in DISTRO_FEATURES."
30
31DISTRO_FEATURES_FILTER_NATIVE:append = " ${DISTRO_FEATURES_OVERRIDES}"
32DISTRO_FEATURES_FILTER_NATIVESDK:append = " ${DISTRO_FEATURES_OVERRIDES}"
33
34# If DISTRO_FEATURES_OVERRIDES or DISTRO_FEATURES show up in a task
35# signature because of this line, then the task dependency on
36# OVERRIDES itself should be fixed. Excluding these two variables
37# with DISTROOVERRIDES[vardepsexclude] would just work around the problem.
38DISTROOVERRIDES .= "${@ ''.join([':df-' + x for x in sorted(set(d.getVar('DISTRO_FEATURES_OVERRIDES').split()) & set((d.getVar('DISTRO_FEATURES') or '').split()))]) }"
diff --git a/meta/classes/externalsrc.bbclass b/meta/classes/externalsrc.bbclass
deleted file mode 100644
index 527c99ab69..0000000000
--- a/meta/classes/externalsrc.bbclass
+++ /dev/null
@@ -1,280 +0,0 @@
1# Copyright (C) 2012 Linux Foundation
2# Author: Richard Purdie
3# Some code and influence taken from srctree.bbclass:
4# Copyright (C) 2009 Chris Larson <clarson@kergoth.com>
5#
6# SPDX-License-Identifier: MIT
7#
8# externalsrc.bbclass enables use of an existing source tree, usually external to
9# the build system to build a piece of software rather than the usual fetch/unpack/patch
10# process.
11#
12# To use, add externalsrc to the global inherit and set EXTERNALSRC to point at the
13# directory you want to use containing the sources e.g. from local.conf for a recipe
14# called "myrecipe" you would do:
15#
16# INHERIT += "externalsrc"
17# EXTERNALSRC:pn-myrecipe = "/path/to/my/source/tree"
18#
19# In order to make this class work for both target and native versions (or with
20# multilibs/cross or other BBCLASSEXTEND variants), B is set to point to a separate
21# directory under the work directory (split source and build directories). This is
22# the default, but the build directory can be set to the source directory if
23# circumstances dictate by setting EXTERNALSRC_BUILD to the same value, e.g.:
24#
25# EXTERNALSRC_BUILD:pn-myrecipe = "/path/to/my/source/tree"
26#
27
28SRCTREECOVEREDTASKS ?= "do_patch do_unpack do_fetch"
29EXTERNALSRC_SYMLINKS ?= "oe-workdir:${WORKDIR} oe-logs:${T}"
30
31def find_git_dir(d, s_dir):
32 import subprocess
33 git_dir = None
34 try:
35 git_dir = os.path.join(s_dir,
36 subprocess.check_output(['git', '-C', s_dir, 'rev-parse', '--git-dir'], stderr=subprocess.DEVNULL).decode("utf-8").rstrip())
37 top_git_dir = os.path.join(d.getVar("TOPDIR"),
38 subprocess.check_output(['git', '-C', d.getVar("TOPDIR"), 'rev-parse', '--git-dir'], stderr=subprocess.DEVNULL).decode("utf-8").rstrip())
39 if git_dir == top_git_dir:
40 git_dir = None
41 except subprocess.CalledProcessError:
42 pass
43 return git_dir
44
45python () {
46 externalsrc = d.getVar('EXTERNALSRC')
47 externalsrcbuild = d.getVar('EXTERNALSRC_BUILD')
48
49 if externalsrc and not externalsrc.startswith("/"):
50 bb.error("EXTERNALSRC must be an absolute path")
51 if externalsrcbuild and not externalsrcbuild.startswith("/"):
52 bb.error("EXTERNALSRC_BUILD must be an absolute path")
53
54 # If this is the base recipe and EXTERNALSRC is set for it or any of its
55 # derivatives, then enable BB_DONT_CACHE to force the recipe to always be
56 # re-parsed so that the file-checksums function for do_compile is run every
57 # time.
58 bpn = d.getVar('BPN')
59 classextend = (d.getVar('BBCLASSEXTEND') or '').split()
60 if bpn == d.getVar('PN') or not classextend:
61 if (externalsrc or
62 ('native' in classextend and
63 d.getVar('EXTERNALSRC:pn-%s-native' % bpn)) or
64 ('nativesdk' in classextend and
65 d.getVar('EXTERNALSRC:pn-nativesdk-%s' % bpn)) or
66 ('cross' in classextend and
67 d.getVar('EXTERNALSRC:pn-%s-cross' % bpn))):
68 d.setVar('BB_DONT_CACHE', '1')
69
70 if externalsrc:
71 import oe.recipeutils
72 import oe.path
73
74 d.setVar('S', externalsrc)
75 if externalsrcbuild:
76 d.setVar('B', externalsrcbuild)
77 else:
78 d.setVar('B', '${WORKDIR}/${BPN}-${PV}')
79
80 bb.fetch.get_hashvalue(d)
81 local_srcuri = []
82 fetch = bb.fetch2.Fetch((d.getVar('SRC_URI') or '').split(), d)
83 for url in fetch.urls:
84 url_data = fetch.ud[url]
85 parm = url_data.parm
86 if url_data.type in ['file', 'npmsw', 'crate'] or parm.get('type') in ['kmeta', 'git-dependency']:
87 local_srcuri.append(url)
88
89 d.setVar('SRC_URI', ' '.join(local_srcuri))
90
91 # sstate is never going to work for external source trees, disable it
92 d.setVar('SSTATE_SKIP_CREATION', '1')
93
94 if d.getVar('CONFIGUREOPT_DEPTRACK') == '--disable-dependency-tracking':
95 d.setVar('CONFIGUREOPT_DEPTRACK', '')
96
97 tasks = filter(lambda k: d.getVarFlag(k, "task"), d.keys())
98
99 for task in tasks:
100 if os.path.realpath(d.getVar('S')) == os.path.realpath(d.getVar('B')):
101 # Since configure will likely touch ${S}, ensure only we lock so one task has access at a time
102 d.appendVarFlag(task, "lockfiles", " ${S}/singletask.lock")
103
104 for v in d.keys():
105 cleandirs = d.getVarFlag(v, "cleandirs", False)
106 if cleandirs:
107 # We do not want our source to be wiped out, ever (kernel.bbclass does this for do_clean)
108 cleandirs = oe.recipeutils.split_var_value(cleandirs)
109 setvalue = False
110 for cleandir in cleandirs[:]:
111 if oe.path.is_path_parent(externalsrc, d.expand(cleandir)):
112 cleandirs.remove(cleandir)
113 setvalue = True
114 if setvalue:
115 d.setVarFlag(v, 'cleandirs', ' '.join(cleandirs))
116
117 fetch_tasks = ['do_fetch', 'do_unpack']
118 # If we deltask do_patch, there's no dependency to ensure do_unpack gets run, so add one
119 # Note that we cannot use d.appendVarFlag() here because deps is expected to be a list object, not a string
120 d.setVarFlag('do_configure', 'deps', (d.getVarFlag('do_configure', 'deps', False) or []) + ['do_unpack'])
121 d.setVarFlag('do_populate_lic', 'deps', (d.getVarFlag('do_populate_lic', 'deps', False) or []) + ['do_unpack'])
122
123 for task in d.getVar("SRCTREECOVEREDTASKS").split():
124 if local_srcuri and task in fetch_tasks:
125 continue
126 bb.build.deltask(task, d)
127 if task == 'do_unpack':
128 # The reproducible build create_source_date_epoch_stamp function must
129 # be run after the source is available and before the
130 # do_deploy_source_date_epoch task. In the normal case, it's attached
131 # to do_unpack as a postfuncs, but since we removed do_unpack (above)
132 # we need to move the function elsewhere. The easiest thing to do is
133 # move it into the prefuncs of the do_deploy_source_date_epoch task.
134 # This is safe, as externalsrc runs with the source already unpacked.
135 d.prependVarFlag('do_deploy_source_date_epoch', 'prefuncs', 'create_source_date_epoch_stamp ')
136
137 d.prependVarFlag('do_compile', 'prefuncs', "externalsrc_compile_prefunc ")
138 d.prependVarFlag('do_configure', 'prefuncs', "externalsrc_configure_prefunc ")
139
140 d.setVarFlag('do_compile', 'file-checksums', '${@srctree_hash_files(d)}')
141 d.setVarFlag('do_configure', 'file-checksums', '${@srctree_configure_hash_files(d)}')
142
143 d.appendVarFlag('do_compile', 'prefuncs', ' fetcher_hashes_dummyfunc')
144 d.appendVarFlag('do_configure', 'prefuncs', ' fetcher_hashes_dummyfunc')
145
146 # We don't want the workdir to go away
147 d.appendVar('RM_WORK_EXCLUDE', ' ' + d.getVar('PN'))
148
149 bb.build.addtask('do_buildclean',
150 'do_clean' if d.getVar('S') == d.getVar('B') else None,
151 None, d)
152
153 # If B=S the same builddir is used even for different architectures.
154 # Thus, use a shared CONFIGURESTAMPFILE and STAMP directory so that
155 # change of do_configure task hash is correctly detected and stamps are
156 # invalidated if e.g. MACHINE changes.
157 if d.getVar('S') == d.getVar('B'):
158 configstamp = '${TMPDIR}/work-shared/${PN}/${EXTENDPE}${PV}-${PR}/configure.sstate'
159 d.setVar('CONFIGURESTAMPFILE', configstamp)
160 d.setVar('STAMP', '${STAMPS_DIR}/work-shared/${PN}/${EXTENDPE}${PV}-${PR}')
161 d.setVar('STAMPCLEAN', '${STAMPS_DIR}/work-shared/${PN}/*-*')
162}
163
164python externalsrc_configure_prefunc() {
165 s_dir = d.getVar('S')
166 # Create desired symlinks
167 symlinks = (d.getVar('EXTERNALSRC_SYMLINKS') or '').split()
168 newlinks = []
169 for symlink in symlinks:
170 symsplit = symlink.split(':', 1)
171 lnkfile = os.path.join(s_dir, symsplit[0])
172 target = d.expand(symsplit[1])
173 if len(symsplit) > 1:
174 if os.path.islink(lnkfile):
175 # Link already exists, leave it if it points to the right location already
176 if os.readlink(lnkfile) == target:
177 continue
178 os.unlink(lnkfile)
179 elif os.path.exists(lnkfile):
180 # File/dir exists with same name as link, just leave it alone
181 continue
182 os.symlink(target, lnkfile)
183 newlinks.append(symsplit[0])
184 # Hide the symlinks from git
185 try:
186 git_dir = find_git_dir(d, s_dir)
187 if git_dir:
188 git_exclude_file = os.path.join(git_dir, 'info/exclude')
189 if os.path.exists(git_exclude_file):
190 with open(git_exclude_file, 'r+') as efile:
191 elines = efile.readlines()
192 for link in newlinks:
193 if link in elines or '/'+link in elines:
194 continue
195 efile.write('/' + link + '\n')
196 except IOError as ioe:
197 bb.note('Failed to hide EXTERNALSRC_SYMLINKS from git')
198}
199
200python externalsrc_compile_prefunc() {
201 # Make it obvious that this is happening, since forgetting about it could lead to much confusion
202 bb.plain('NOTE: %s: compiling from external source tree %s' % (d.getVar('PN'), d.getVar('EXTERNALSRC')))
203}
204
205do_buildclean[dirs] = "${S} ${B}"
206do_buildclean[nostamp] = "1"
207do_buildclean[doc] = "Call 'make clean' or equivalent in ${B}"
208externalsrc_do_buildclean() {
209 if [ -e Makefile -o -e makefile -o -e GNUmakefile ]; then
210 rm -f ${@' '.join([x.split(':')[0] for x in (d.getVar('EXTERNALSRC_SYMLINKS') or '').split()])}
211 if [ "${CLEANBROKEN}" != "1" ]; then
212 oe_runmake clean || die "make failed"
213 fi
214 else
215 bbnote "nothing to do - no makefile found"
216 fi
217}
218
219def srctree_hash_files(d, srcdir=None):
220 import shutil
221 import subprocess
222 import tempfile
223 import hashlib
224
225 s_dir = srcdir or d.getVar('EXTERNALSRC')
226 git_dir = find_git_dir(d, s_dir)
227
228 ret = " "
229 if git_dir is not None:
230 oe_hash_file = os.path.join(git_dir, 'oe-devtool-tree-sha1-%s' % d.getVar('PN'))
231 with tempfile.NamedTemporaryFile(prefix='oe-devtool-index') as tmp_index:
232 # Clone index
233 shutil.copyfile(os.path.join(git_dir, 'index'), tmp_index.name)
234 # Update our custom index
235 env = os.environ.copy()
236 env['GIT_INDEX_FILE'] = tmp_index.name
237 subprocess.check_output(['git', 'add', '-A', '.'], cwd=s_dir, env=env)
238 git_sha1 = subprocess.check_output(['git', 'write-tree'], cwd=s_dir, env=env).decode("utf-8")
239 if os.path.exists(os.path.join(s_dir, ".gitmodules")) and os.path.getsize(os.path.join(s_dir, ".gitmodules")) > 0:
240 submodule_helper = subprocess.check_output(["git", "config", "--file", ".gitmodules", "--get-regexp", "path"], cwd=s_dir, env=env).decode("utf-8")
241 for line in submodule_helper.splitlines():
242 module_dir = os.path.join(s_dir, line.rsplit(maxsplit=1)[1])
243 if os.path.isdir(module_dir):
244 proc = subprocess.Popen(['git', 'add', '-A', '.'], cwd=module_dir, env=env, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
245 proc.communicate()
246 proc = subprocess.Popen(['git', 'write-tree'], cwd=module_dir, env=env, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
247 stdout, _ = proc.communicate()
248 git_sha1 += stdout.decode("utf-8")
249 sha1 = hashlib.sha1(git_sha1.encode("utf-8")).hexdigest()
250 with open(oe_hash_file, 'w') as fobj:
251 fobj.write(sha1)
252 ret = oe_hash_file + ':True'
253 else:
254 ret = s_dir + '/*:True'
255 return ret
256
257def srctree_configure_hash_files(d):
258 """
259 Get the list of files that should trigger do_configure to re-execute,
260 based on the value of CONFIGURE_FILES
261 """
262 import fnmatch
263
264 in_files = (d.getVar('CONFIGURE_FILES') or '').split()
265 out_items = []
266 search_files = []
267 for entry in in_files:
268 if entry.startswith('/'):
269 out_items.append('%s:%s' % (entry, os.path.exists(entry)))
270 else:
271 search_files.append(entry)
272 if search_files:
273 s_dir = d.getVar('EXTERNALSRC')
274 for root, _, files in os.walk(s_dir):
275 for p in search_files:
276 for f in fnmatch.filter(files, p):
277 out_items.append('%s:True' % os.path.join(root, f))
278 return ' '.join(out_items)
279
280EXPORT_FUNCTIONS do_buildclean
diff --git a/meta/classes/extrausers.bbclass b/meta/classes/extrausers.bbclass
deleted file mode 100644
index c825c06df9..0000000000
--- a/meta/classes/extrausers.bbclass
+++ /dev/null
@@ -1,77 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# This bbclass is used for image level user/group configuration.
8# Inherit this class if you want to make EXTRA_USERS_PARAMS effective.
9
10# Below is an example showing how to use this functionality.
11# IMAGE_CLASSES += "extrausers"
12# EXTRA_USERS_PARAMS = "\
13# useradd -p '' tester; \
14# groupadd developers; \
15# userdel nobody; \
16# groupdel -g video; \
17# groupmod -g 1020 developers; \
18# usermod -s /bin/sh tester; \
19# "
20
21inherit useradd_base
22
23PACKAGE_INSTALL:append = " ${@['', 'base-passwd shadow'][bool(d.getVar('EXTRA_USERS_PARAMS'))]}"
24
25# Image level user / group settings
26ROOTFS_POSTPROCESS_COMMAND:append = " set_user_group"
27
28# Image level user / group settings
29set_user_group () {
30 user_group_settings="${EXTRA_USERS_PARAMS}"
31 export PSEUDO="${FAKEROOTENV} ${STAGING_DIR_NATIVE}${bindir}/pseudo"
32 setting=`echo $user_group_settings | cut -d ';' -f1`
33 remaining=`echo $user_group_settings | cut -d ';' -f2-`
34 while test "x$setting" != "x"; do
35 cmd=`echo $setting | cut -d ' ' -f1`
36 opts=`echo $setting | cut -d ' ' -f2-`
37 # Different from useradd.bbclass, there's no file locking issue here, as
38 # this setting is actually a serial process. So we only retry once.
39 case $cmd in
40 useradd)
41 perform_useradd "${IMAGE_ROOTFS}" "-R ${IMAGE_ROOTFS} $opts"
42 ;;
43 groupadd)
44 perform_groupadd "${IMAGE_ROOTFS}" "-R ${IMAGE_ROOTFS} $opts"
45 ;;
46 userdel)
47 perform_userdel "${IMAGE_ROOTFS}" "-R ${IMAGE_ROOTFS} $opts"
48 ;;
49 groupdel)
50 perform_groupdel "${IMAGE_ROOTFS}" "-R ${IMAGE_ROOTFS} $opts"
51 ;;
52 usermod)
53 perform_usermod "${IMAGE_ROOTFS}" "-R ${IMAGE_ROOTFS} $opts"
54 ;;
55 passwd-expire)
56 perform_passwd_expire "${IMAGE_ROOTFS}" "$opts"
57 ;;
58 groupmod)
59 perform_groupmod "${IMAGE_ROOTFS}" "-R ${IMAGE_ROOTFS} $opts"
60 ;;
61 *)
62 bbfatal "Invalid command in EXTRA_USERS_PARAMS: $cmd"
63 ;;
64 esac
65 # Avoid infinite loop if the last parameter doesn't end with ';'
66 if [ "$setting" = "$remaining" ]; then
67 break
68 fi
69 # iterate to the next setting
70 setting=`echo $remaining | cut -d ';' -f1`
71 remaining=`echo $remaining | cut -d ';' -f2-`
72 done
73}
74
75USERADDEXTENSION ?= ""
76
77inherit ${USERADDEXTENSION}
diff --git a/meta/classes/go-vendor.bbclass b/meta/classes/go-vendor.bbclass
deleted file mode 100644
index 6ec6178add..0000000000
--- a/meta/classes/go-vendor.bbclass
+++ /dev/null
@@ -1,215 +0,0 @@
1#
2# Copyright 2023 (C) Weidmueller GmbH & Co KG
3# Author: Lukas Funke <lukas.funke@weidmueller.com>
4#
5# Handle Go vendor support for offline builds
6#
7# When importing Go modules, Go downloads the imported modules using
8# a network (proxy) connection ahead of the compile stage. This contradicts
9# the yocto build concept of fetching every source ahead of build-time
10# and supporting offline builds.
11#
12# To support offline builds, we use Go 'vendoring': module dependencies are
13# downloaded during the fetch-phase and unpacked into the modules 'vendor'
14# folder. Additionally a manifest file is generated for the 'vendor' folder
15#
16
17inherit go-mod
18
19def go_src_uri(repo, version, path=None, subdir=None, \
20 vcs='git', replaces=None, pathmajor=None):
21
22 destsuffix = "git/src/import/vendor.fetch"
23 module_path = repo if not path else path
24
25 src_uri = "{}://{};name={}".format(vcs, repo, module_path.replace('/', '.'))
26 src_uri += ";destsuffix={}/{}@{}".format(destsuffix, repo, version)
27
28 if vcs == "git":
29 src_uri += ";nobranch=1;protocol=https"
30
31 src_uri += ";go_module_path={}".format(module_path)
32
33 if replaces:
34 src_uri += ";go_module_replacement={}".format(replaces)
35 if subdir:
36 src_uri += ";go_subdir={}".format(subdir)
37 if pathmajor:
38 src_uri += ";go_pathmajor={}".format(pathmajor)
39 src_uri += ";is_go_dependency=1"
40
41 return src_uri
42
43python do_vendor_unlink() {
44 go_import = d.getVar('GO_IMPORT')
45 source_dir = d.getVar('S')
46 linkname = os.path.join(source_dir, *['src', go_import, 'vendor'])
47
48 os.unlink(linkname)
49}
50
51addtask vendor_unlink before do_package after do_install
52
53python do_go_vendor() {
54 import shutil
55
56 src_uri = (d.getVar('SRC_URI') or "").split()
57
58 if not src_uri:
59 bb.fatal("SRC_URI is empty")
60
61 default_destsuffix = "git/src/import/vendor.fetch"
62 fetcher = bb.fetch2.Fetch(src_uri, d)
63 go_import = d.getVar('GO_IMPORT')
64 source_dir = d.getVar('S')
65
66 linkname = os.path.join(source_dir, *['src', go_import, 'vendor'])
67 vendor_dir = os.path.join(source_dir, *['src', 'import', 'vendor'])
68 import_dir = os.path.join(source_dir, *['src', 'import', 'vendor.fetch'])
69
70 if os.path.exists(vendor_dir):
71 # Nothing to do except re-establish link to actual vendor folder
72 if not os.path.exists(linkname):
73 oe.path.relsymlink(vendor_dir, linkname)
74 return
75
76 bb.utils.mkdirhier(vendor_dir)
77
78 modules = {}
79
80 for url in fetcher.urls:
81 srcuri = fetcher.ud[url].host + fetcher.ud[url].path
82
83 # Skip non Go module src uris
84 if not fetcher.ud[url].parm.get('is_go_dependency'):
85 continue
86
87 destsuffix = fetcher.ud[url].parm.get('destsuffix')
88 # We derive the module repo / version in the following manner (exmaple):
89 #
90 # destsuffix = git/src/import/vendor.fetch/github.com/foo/bar@v1.2.3
91 # p = github.com/foo/bar@v1.2.3
92 # repo = github.com/foo/bar
93 # version = v1.2.3
94
95 p = destsuffix[len(default_destsuffix)+1:]
96 repo, version = p.split('@')
97
98 module_path = fetcher.ud[url].parm.get('go_module_path')
99
100 subdir = fetcher.ud[url].parm.get('go_subdir')
101 subdir = None if not subdir else subdir
102
103 pathMajor = fetcher.ud[url].parm.get('go_pathmajor')
104 pathMajor = None if not pathMajor else pathMajor.strip('/')
105
106 if not (repo, version) in modules:
107 modules[(repo, version)] = {
108 "repo_path": os.path.join(import_dir, p),
109 "module_path": module_path,
110 "subdir": subdir,
111 "pathMajor": pathMajor }
112
113 for module_key, module in modules.items():
114
115 # only take the version which is explicitly listed
116 # as a dependency in the go.mod
117 module_path = module['module_path']
118 rootdir = module['repo_path']
119 subdir = module['subdir']
120 pathMajor = module['pathMajor']
121
122 src = rootdir
123
124 if subdir:
125 src = os.path.join(rootdir, subdir)
126
127 # If the module is released at major version 2 or higher, the module
128 # path must end with a major version suffix like /v2.
129 # This may or may not be part of the subdirectory name
130 #
131 # https://go.dev/ref/mod#modules-overview
132 if pathMajor:
133 tmp = os.path.join(src, pathMajor)
134 # source directory including major version path may or may not exist
135 if os.path.exists(tmp):
136 src = tmp
137
138 dst = os.path.join(vendor_dir, module_path)
139
140 bb.debug(1, "cp %s --> %s" % (src, dst))
141 shutil.copytree(src, dst, symlinks=True, dirs_exist_ok=True, \
142 ignore=shutil.ignore_patterns(".git", \
143 "vendor", \
144 "*._test.go"))
145
146 # If the root directory has a LICENSE file but not the subdir
147 # we copy the root license to the sub module since the license
148 # applies to all modules in the repository
149 # see https://go.dev/ref/mod#vcs-license
150 if subdir:
151 rootdirLicese = os.path.join(rootdir, "LICENSE")
152 subdirLicense = os.path.join(src, "LICENSE")
153
154 if not os.path.exists(subdir) and \
155 os.path.exists(rootdirLicese):
156 shutil.copy2(rootdirLicese, subdirLicense)
157
158 # Copy vendor manifest
159 modules_txt_src = os.path.join(d.getVar('UNPACKDIR'), "modules.txt")
160 bb.debug(1, "cp %s --> %s" % (modules_txt_src, vendor_dir))
161 shutil.copy2(modules_txt_src, vendor_dir)
162
163 # Clean up vendor dir
164 # We only require the modules in the modules_txt file
165 fetched_paths = set([os.path.relpath(x[0], vendor_dir) for x in os.walk(vendor_dir)])
166
167 # Remove toplevel dir
168 fetched_paths.remove('.')
169
170 vendored_paths = set()
171 replaced_paths = dict()
172 with open(modules_txt_src) as f:
173 for line in f:
174 if not line.startswith("#"):
175 line = line.strip()
176 vendored_paths.add(line)
177
178 # Add toplevel dirs into vendored dir, as we want to keep them
179 topdir = os.path.dirname(line)
180 while len(topdir):
181 if not topdir in vendored_paths:
182 vendored_paths.add(topdir)
183
184 topdir = os.path.dirname(topdir)
185 else:
186 replaced_module = line.split("=>")
187 if len(replaced_module) > 1:
188 # This module has been replaced, use a local path
189 # we parse the line that has a pattern "# module-name [module-version] => local-path
190 actual_path = replaced_module[1].strip()
191 vendored_name = replaced_module[0].split()[1]
192 bb.debug(1, "added vendored name %s for actual path %s" % (vendored_name, actual_path))
193 replaced_paths[vendored_name] = actual_path
194
195 for path in fetched_paths:
196 if path not in vendored_paths:
197 realpath = os.path.join(vendor_dir, path)
198 if os.path.exists(realpath):
199 shutil.rmtree(realpath)
200
201 for vendored_name, replaced_path in replaced_paths.items():
202 symlink_target = os.path.join(source_dir, *['src', go_import, replaced_path])
203 symlink_name = os.path.join(vendor_dir, vendored_name)
204 relative_symlink_target = os.path.relpath(symlink_target, os.path.dirname(symlink_name))
205 bb.debug(1, "vendored name %s, symlink name %s" % (vendored_name, symlink_name))
206
207 os.makedirs(os.path.dirname(symlink_name), exist_ok=True)
208 os.symlink(relative_symlink_target, symlink_name)
209
210 # Create a symlink to the actual directory
211 relative_vendor_dir = os.path.relpath(vendor_dir, os.path.dirname(linkname))
212 os.symlink(relative_vendor_dir, linkname)
213}
214
215addtask go_vendor before do_patch after do_unpack
diff --git a/meta/classes/image-buildinfo.bbclass b/meta/classes/image-buildinfo.bbclass
deleted file mode 100644
index b83ce650ad..0000000000
--- a/meta/classes/image-buildinfo.bbclass
+++ /dev/null
@@ -1,81 +0,0 @@
1#
2# Writes build information to target filesystem on /etc/buildinfo
3#
4# Copyright (C) 2014 Intel Corporation
5# Author: Alejandro Enedino Hernandez Samaniego <alejandro.hernandez@intel.com>
6#
7# SPDX-License-Identifier: MIT
8#
9# Usage: add INHERIT += "image-buildinfo" to your conf file
10#
11
12# Desired variables to display
13IMAGE_BUILDINFO_VARS ?= "DISTRO DISTRO_VERSION"
14
15# Desired location of the output file in the image.
16IMAGE_BUILDINFO_FILE ??= "${sysconfdir}/buildinfo"
17SDK_BUILDINFO_FILE ??= "/buildinfo"
18
19# From buildhistory.bbclass
20def image_buildinfo_outputvars(vars, d):
21 vars = vars.split()
22 ret = ""
23 for var in vars:
24 value = d.getVar(var) or ""
25 if (d.getVarFlag(var, 'type') == "list"):
26 value = oe.utils.squashspaces(value)
27 ret += "%s = %s\n" % (var, value)
28 return ret.rstrip('\n')
29
30# Returns layer revisions along with their respective status
31def get_layer_revs(d):
32 revisions = oe.buildcfg.get_layer_revisions(d)
33 medadata_revs = ["%-17s = %s:%s%s" % (r[1], r[2], r[3], r[4]) for r in revisions]
34 return '\n'.join(medadata_revs)
35
36def buildinfo_target(d):
37 # Get context
38 if d.getVar('BB_WORKERCONTEXT') != '1':
39 return ""
40 # Single and list variables to be read
41 vars = (d.getVar("IMAGE_BUILDINFO_VARS") or "")
42 return image_buildinfo_outputvars(vars, d)
43
44python buildinfo() {
45 if not d.getVar('IMAGE_BUILDINFO_FILE'):
46 return
47 destfile = d.expand('${BUILDINFODEST}${IMAGE_BUILDINFO_FILE}')
48 bb.utils.mkdirhier(os.path.dirname(destfile))
49 with open(destfile, 'w') as build:
50 build.writelines((
51 '''-----------------------
52Build Configuration: |
53-----------------------
54''',
55 buildinfo_target(d),
56 '''
57-----------------------
58Layer Revisions: |
59-----------------------
60''',
61 get_layer_revs(d),
62 '''
63'''
64 ))
65}
66
67# Write build information to target filesystem
68python buildinfo_image () {
69 d.setVar("BUILDINFODEST", "${IMAGE_ROOTFS}")
70 bb.build.exec_func("buildinfo", d)
71}
72
73python buildinfo_sdk () {
74 d.setVar("BUILDINFODEST", "${SDK_OUTPUT}/${SDKPATH}")
75 d.setVar("IMAGE_BUILDINFO_FILE", d.getVar("SDK_BUILDINFO_FILE"))
76 bb.build.exec_func("buildinfo", d)
77}
78
79IMAGE_PREPROCESS_COMMAND += "buildinfo_image"
80POPULATE_SDK_PRE_TARGET_COMMAND += "buildinfo_sdk"
81
diff --git a/meta/classes/mcextend.bbclass b/meta/classes/mcextend.bbclass
deleted file mode 100644
index a489eeb3c7..0000000000
--- a/meta/classes/mcextend.bbclass
+++ /dev/null
@@ -1,22 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7python mcextend_virtclass_handler () {
8 cls = e.data.getVar("BBEXTENDCURR")
9 variant = e.data.getVar("BBEXTENDVARIANT")
10 if cls != "mcextend" or not variant:
11 return
12
13 override = ":virtclass-mcextend-" + variant
14
15 e.data.setVar("PN", e.data.getVar("PN", False) + "-" + variant)
16 e.data.setVar("MCNAME", variant)
17 e.data.setVar("OVERRIDES", e.data.getVar("OVERRIDES", False) + override)
18}
19
20addhandler mcextend_virtclass_handler
21mcextend_virtclass_handler[eventmask] = "bb.event.RecipePreFinalise"
22
diff --git a/meta/classes/metadata_scm.bbclass b/meta/classes/metadata_scm.bbclass
deleted file mode 100644
index 6842119b6b..0000000000
--- a/meta/classes/metadata_scm.bbclass
+++ /dev/null
@@ -1,10 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7METADATA_BRANCH := "${@oe.buildcfg.detect_branch(d)}"
8METADATA_BRANCH[vardepvalue] = "${METADATA_BRANCH}"
9METADATA_REVISION := "${@oe.buildcfg.detect_revision(d)}"
10METADATA_REVISION[vardepvalue] = "${METADATA_REVISION}"
diff --git a/meta/classes/multilib.bbclass b/meta/classes/multilib.bbclass
deleted file mode 100644
index 15056dac4d..0000000000
--- a/meta/classes/multilib.bbclass
+++ /dev/null
@@ -1,252 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7python multilib_virtclass_handler () {
8 cls = d.getVar("BBEXTENDCURR")
9 variant = d.getVar("BBEXTENDVARIANT")
10 if cls != "multilib" or not variant:
11 return
12
13 localdata = bb.data.createCopy(d)
14 localdata.delVar('TMPDIR')
15 d.setVar('STAGING_KERNEL_DIR', localdata.getVar('STAGING_KERNEL_DIR'))
16
17 # There should only be one kernel in multilib configs
18 # We also skip multilib setup for module packages.
19 provides = (d.getVar("PROVIDES") or "").split()
20 non_ml_recipes = d.getVar('NON_MULTILIB_RECIPES').split()
21 bpn = d.getVar("BPN")
22 if ("virtual/kernel" in provides
23 or bb.data.inherits_class('module-base', d)
24 or bb.data.inherits_class('kernel-fit-image', d)
25 or bpn in non_ml_recipes):
26 raise bb.parse.SkipRecipe("We shouldn't have multilib variants for %s" % bpn)
27
28 save_var_name = d.getVar("MULTILIB_SAVE_VARNAME") or ""
29 for name in save_var_name.split():
30 val = d.getVar(name)
31 if val:
32 d.setVar(name + "_MULTILIB_ORIGINAL", val)
33
34 # We nearly don't need this but dependencies on NON_MULTILIB_RECIPES don't work without it
35 d.setVar("SSTATE_ARCHS_TUNEPKG", "${@all_multilib_tune_values(d, 'TUNE_PKGARCH')}")
36
37 overrides = e.data.getVar("OVERRIDES", False)
38 pn = e.data.getVar("PN", False)
39 overrides = overrides.replace("pn-${PN}", "pn-${PN}:pn-" + pn)
40 d.setVar("OVERRIDES", overrides)
41
42 if bb.data.inherits_class('image', d):
43 d.setVar("MLPREFIX", variant + "-")
44 d.setVar("PN", variant + "-" + d.getVar("PN", False))
45 d.setVar('SDKTARGETSYSROOT', d.getVar('SDKTARGETSYSROOT'))
46 override = ":virtclass-multilib-" + variant
47 d.setVar("OVERRIDES", d.getVar("OVERRIDES", False) + override)
48 target_vendor = d.getVar("TARGET_VENDOR:" + "virtclass-multilib-" + variant, False)
49 if target_vendor:
50 d.setVar("TARGET_VENDOR", target_vendor)
51 return
52
53 if bb.data.inherits_class('cross-canadian', d):
54 # Multilib cross-candian should use the same nativesdk sysroot without MLPREFIX
55 d.setVar("RECIPE_SYSROOT", "${WORKDIR}/recipe-sysroot")
56 d.setVar("STAGING_DIR_TARGET", "${WORKDIR}/recipe-sysroot")
57 d.setVar("STAGING_DIR_HOST", "${WORKDIR}/recipe-sysroot")
58 d.setVar("RECIPE_SYSROOT_MANIFEST_SUBDIR", "nativesdk-" + variant)
59 d.setVar("MLPREFIX", variant + "-")
60 override = ":virtclass-multilib-" + variant
61 d.setVar("OVERRIDES", d.getVar("OVERRIDES", False) + override)
62 return
63
64 if bb.data.inherits_class('native', d):
65 raise bb.parse.SkipRecipe("We can't extend native recipes")
66
67 if bb.data.inherits_class('nativesdk', d) or bb.data.inherits_class('crosssdk', d):
68 raise bb.parse.SkipRecipe("We can't extend nativesdk recipes")
69
70 if (bb.data.inherits_class('allarch', d)
71 and not d.getVar('MULTILIB_VARIANTS')
72 and not bb.data.inherits_class('packagegroup', d)):
73 raise bb.parse.SkipRecipe("Don't extend allarch recipes which are not packagegroups")
74
75 # Expand this since this won't work correctly once we set a multilib into place
76 d.setVar("ALL_MULTILIB_PACKAGE_ARCHS", d.getVar("ALL_MULTILIB_PACKAGE_ARCHS"))
77
78 override = ":virtclass-multilib-" + variant
79
80 skip_msg = d.getVarFlag('SKIP_RECIPE', d.getVar('PN'))
81 if skip_msg:
82 pn_new = variant + "-" + d.getVar('PN')
83 if not d.getVarFlag('SKIP_RECIPE', pn_new):
84 d.setVarFlag('SKIP_RECIPE', pn_new, skip_msg)
85
86 d.setVar("MLPREFIX", variant + "-")
87 d.setVar("PN", variant + "-" + d.getVar("PN", False))
88 d.setVar("OVERRIDES", d.getVar("OVERRIDES", False) + override)
89
90 # Expand INCOMPATIBLE_LICENSE_EXCEPTIONS with multilib prefix
91 pkgs = d.getVar("INCOMPATIBLE_LICENSE_EXCEPTIONS")
92 if pkgs:
93 for pkg in pkgs.split():
94 pkgs += " " + variant + "-" + pkg
95 d.setVar("INCOMPATIBLE_LICENSE_EXCEPTIONS", pkgs)
96
97 # DEFAULTTUNE can change TARGET_ARCH override so expand this now before update_data
98 newtune = d.getVar("DEFAULTTUNE:" + "virtclass-multilib-" + variant, False)
99 if newtune:
100 d.setVar("DEFAULTTUNE", newtune)
101}
102
103addhandler multilib_virtclass_handler
104multilib_virtclass_handler[eventmask] = "bb.event.RecipePreFinalise"
105
106python __anonymous () {
107 if bb.data.inherits_class('image', d):
108 # set rpm preferred file color for 32-bit multilib image
109 if d.getVar("SITEINFO_BITS") == "32":
110 d.setVar("RPM_PREFER_ELF_ARCH", "1")
111
112 variant = d.getVar("BBEXTENDVARIANT")
113 import oe.classextend
114
115 prefixes = (d.getVar("MULTILIB_VARIANTS") or "").split()
116 clsextend = oe.classextend.ClassExtender(variant, prefixes, d)
117 clsextend.set_filter("PACKAGE_INSTALL", deps=False)
118 clsextend.set_filter("LINGUAS_INSTALL", deps=False)
119 clsextend.set_filter("ROOTFS_RO_UNNEEDED", deps=False)
120 clsextend.set_filter("RDEPENDS", deps=True)
121 pinstall = d.getVar("LINGUAS_INSTALL") + " " + d.getVar("PACKAGE_INSTALL")
122 d.setVar("PACKAGE_INSTALL", pinstall)
123 d.setVar("LINGUAS_INSTALL", "")
124 # FIXME, we need to map this to something, not delete it!
125 d.setVar("PACKAGE_INSTALL_ATTEMPTONLY", "")
126 bb.build.deltask('do_populate_sdk_ext', d)
127 return
128}
129
130python multilib_virtclass_handler_postkeyexp () {
131 cls = d.getVar("BBEXTENDCURR")
132 variant = d.getVar("BBEXTENDVARIANT")
133 if cls != "multilib" or not variant:
134 return
135
136 variant = d.getVar("BBEXTENDVARIANT")
137
138 import oe.classextend
139
140 if bb.data.inherits_class('image', d):
141 return
142
143 prefixes = (d.getVar("MULTILIB_VARIANTS") or "").split()
144 clsextend = oe.classextend.ClassExtender(variant, prefixes, d)
145
146 clsextend.set_filter("DEPENDS", deps=True)
147 clsextend.set_filter("PACKAGE_WRITE_DEPS", deps=False)
148
149 clsextend.set_filter("PROVIDES", deps=False)
150
151 if bb.data.inherits_class('cross-canadian', d):
152 return
153
154 clsextend.rename_package_variables((d.getVar("PACKAGEVARS") or "").split())
155
156 clsextend.map_packagevars()
157
158 clsextend.set_filter("INITSCRIPT_PACKAGES", deps=False)
159 clsextend.set_filter("USERADD_PACKAGES", deps=False)
160 clsextend.set_filter("SYSTEMD_PACKAGES", deps=False)
161 clsextend.set_filter("UPDATERCPN", deps=False)
162
163 reset_alternative_priority(d)
164}
165
166addhandler multilib_virtclass_handler_postkeyexp
167multilib_virtclass_handler_postkeyexp[eventmask] = "bb.event.RecipePostKeyExpansion"
168
169def reset_alternative_priority(d):
170 if not bb.data.inherits_class('update-alternatives', d):
171 return
172
173 # There might be multiple multilibs at the same time, e.g., lib32 and
174 # lib64, each of them should have a different priority.
175 multilib_variants = d.getVar('MULTILIB_VARIANTS')
176 bbextendvariant = d.getVar('BBEXTENDVARIANT')
177 reset_gap = multilib_variants.split().index(bbextendvariant) + 1
178
179 # ALTERNATIVE_PRIORITY = priority
180 alt_priority_recipe = d.getVar('ALTERNATIVE_PRIORITY')
181 # Reset ALTERNATIVE_PRIORITY when found
182 if alt_priority_recipe:
183 reset_priority = int(alt_priority_recipe) - reset_gap
184 bb.debug(1, '%s: Setting ALTERNATIVE_PRIORITY to %s' % (d.getVar('PN'), reset_priority))
185 d.setVar('ALTERNATIVE_PRIORITY', reset_priority)
186
187 handled_pkgs = []
188 for pkg in (d.getVar('PACKAGES') or "").split():
189 # ALTERNATIVE_PRIORITY_pkg = priority
190 alt_priority_pkg = d.getVar('ALTERNATIVE_PRIORITY_%s' % pkg)
191 # Reset ALTERNATIVE_PRIORITY_pkg when found
192 if alt_priority_pkg:
193 reset_priority = int(alt_priority_pkg) - reset_gap
194 if not pkg in handled_pkgs:
195 handled_pkgs.append(pkg)
196 bb.debug(1, '%s: Setting ALTERNATIVE_PRIORITY_%s to %s' % (pkg, pkg, reset_priority))
197 d.setVar('ALTERNATIVE_PRIORITY_%s' % pkg, reset_priority)
198
199 for alt_name in (d.getVar('ALTERNATIVE:%s' % pkg) or "").split():
200 # ALTERNATIVE_PRIORITY_pkg[tool] = priority
201 alt_priority_pkg_name = d.getVarFlag('ALTERNATIVE_PRIORITY_%s' % pkg, alt_name)
202 # ALTERNATIVE_PRIORITY[tool] = priority
203 alt_priority_name = d.getVarFlag('ALTERNATIVE_PRIORITY', alt_name)
204
205 if alt_priority_pkg_name:
206 reset_priority = int(alt_priority_pkg_name) - reset_gap
207 bb.debug(1, '%s: Setting ALTERNATIVE_PRIORITY_%s[%s] to %s' % (pkg, pkg, alt_name, reset_priority))
208 d.setVarFlag('ALTERNATIVE_PRIORITY_%s' % pkg, alt_name, reset_priority)
209 elif alt_priority_name:
210 reset_priority = int(alt_priority_name) - reset_gap
211 bb.debug(1, '%s: Setting ALTERNATIVE_PRIORITY[%s] to %s' % (pkg, alt_name, reset_priority))
212 d.setVarFlag('ALTERNATIVE_PRIORITY', alt_name, reset_priority)
213
214PACKAGEFUNCS:append = " do_package_qa_multilib"
215
216python do_package_qa_multilib() {
217
218 def check_mlprefix(pkg, var, mlprefix):
219 values = bb.utils.explode_deps(d.getVar('%s:%s' % (var, pkg)) or d.getVar(var) or "")
220 candidates = []
221 for i in values:
222 if i.startswith('virtual/'):
223 i = i[len('virtual/'):]
224
225 if (not (i.startswith(mlprefix) or i.startswith("kernel-") \
226 or ('cross-canadian' in i) or i.startswith("nativesdk-") \
227 or i.startswith("rtld") or i.startswith("/"))):
228 candidates.append(i)
229
230 if len(candidates) > 0:
231 msg = "%s package %s - suspicious values '%s' in %s" \
232 % (d.getVar('PN'), pkg, ' '.join(candidates), var)
233 oe.qa.handle_error("multilib", msg, d)
234
235 ml = d.getVar('MLPREFIX')
236 if not ml:
237 return
238
239 # exception for ${MLPREFIX}target-sdk-provides-dummy
240 if 'target-sdk-provides-dummy' in d.getVar('PN'):
241 return
242
243 packages = d.getVar('PACKAGES')
244 for pkg in packages.split():
245 check_mlprefix(pkg, 'RDEPENDS', ml)
246 check_mlprefix(pkg, 'RPROVIDES', ml)
247 check_mlprefix(pkg, 'RRECOMMENDS', ml)
248 check_mlprefix(pkg, 'RSUGGESTS', ml)
249 check_mlprefix(pkg, 'RREPLACES', ml)
250 check_mlprefix(pkg, 'RCONFLICTS', ml)
251 oe.qa.exit_if_errors(d)
252}
diff --git a/meta/classes/multilib_global.bbclass b/meta/classes/multilib_global.bbclass
deleted file mode 100644
index d9372d9ed1..0000000000
--- a/meta/classes/multilib_global.bbclass
+++ /dev/null
@@ -1,233 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7def preferred_ml_updates(d):
8 # If any of PREFERRED_PROVIDER, PREFERRED_RPROVIDER, REQUIRED_VERSION
9 # or PREFERRED_VERSION are set, we need to mirror these variables in
10 # the multilib case;
11 multilibs = d.getVar('MULTILIBS') or ""
12 if not multilibs:
13 return
14
15 prefixes = []
16 for ext in multilibs.split():
17 eext = ext.split(':')
18 if len(eext) > 1 and eext[0] == 'multilib':
19 prefixes.append(eext[1])
20
21 required_versions = []
22 preferred_versions = []
23 providers = []
24 rproviders = []
25 for v in d.keys():
26 if v.startswith("REQUIRED_VERSION_"):
27 required_versions.append(v)
28 if v.startswith("PREFERRED_VERSION_"):
29 preferred_versions.append(v)
30 if v.startswith("PREFERRED_PROVIDER_"):
31 providers.append(v)
32 if v.startswith("PREFERRED_RPROVIDER_"):
33 rproviders.append(v)
34
35 def sort_versions(versions, keyword):
36 version_str = "_".join([keyword, "VERSION", ""])
37 for v in versions:
38 val = d.getVar(v, False)
39 pkg = v.replace(version_str, "")
40 if pkg.endswith("-native") or "-crosssdk-" in pkg or pkg.startswith(("nativesdk-", "virtual/nativesdk-")):
41 continue
42 if '-cross-' in pkg and '${' in pkg:
43 for p in prefixes:
44 localdata = bb.data.createCopy(d)
45 override = ":virtclass-multilib-" + p
46 localdata.setVar("OVERRIDES", localdata.getVar("OVERRIDES", False) + override)
47 if "-canadian-" in pkg:
48 newtune = localdata.getVar("DEFAULTTUNE:" + "virtclass-multilib-" + p, False)
49 if newtune:
50 localdata.setVar("DEFAULTTUNE", newtune)
51 newname = localdata.expand(v)
52 else:
53 newname = localdata.expand(v).replace(version_str, version_str + p + '-')
54 if newname != v:
55 newval = localdata.expand(val)
56 d.setVar(newname, newval)
57 # Avoid future variable key expansion
58 vexp = d.expand(v)
59 if v != vexp and d.getVar(v, False):
60 d.renameVar(v, vexp)
61 continue
62 for p in prefixes:
63 newname = version_str + p + "-" + pkg
64 if not d.getVar(newname, False):
65 d.setVar(newname, val)
66
67 sort_versions(required_versions, "REQUIRED")
68 sort_versions(preferred_versions, "PREFERRED")
69
70 for prov in providers:
71 val = d.getVar(prov, False)
72 pkg = prov.replace("PREFERRED_PROVIDER_", "")
73 if pkg.endswith("-native") or "-crosssdk-" in pkg or pkg.startswith(("nativesdk-", "virtual/nativesdk-")):
74 continue
75 if 'cross-canadian' in pkg:
76 for p in prefixes:
77 localdata = bb.data.createCopy(d)
78 override = ":virtclass-multilib-" + p
79 localdata.setVar("OVERRIDES", localdata.getVar("OVERRIDES", False) + override)
80 newname = localdata.expand(prov)
81 if newname != prov:
82 newval = localdata.expand(val)
83 d.setVar(newname, newval)
84 # Avoid future variable key expansion
85 provexp = d.expand(prov)
86 if prov != provexp and d.getVar(prov, False):
87 d.renameVar(prov, provexp)
88 continue
89 virt = ""
90 if pkg.startswith("virtual/"):
91 pkg = pkg.replace("virtual/", "")
92 virt = "virtual/"
93 for p in prefixes:
94 newval = None
95 if pkg != "kernel":
96 newval = p + "-" + val
97
98 # implement variable keys
99 localdata = bb.data.createCopy(d)
100 override = ":virtclass-multilib-" + p
101 localdata.setVar("OVERRIDES", localdata.getVar("OVERRIDES", False) + override)
102 newname = localdata.expand(prov)
103 if newname != prov and not d.getVar(newname, False):
104 d.setVar(newname, localdata.expand(newval))
105
106 # implement alternative multilib name
107 newname = localdata.expand("PREFERRED_PROVIDER_" + virt + p + "-" + pkg)
108 if not d.getVar(newname, False) and newval != None:
109 d.setVar(newname, localdata.expand(newval))
110 # Avoid future variable key expansion
111 provexp = d.expand(prov)
112 if prov != provexp and d.getVar(prov, False):
113 d.renameVar(prov, provexp)
114
115 for prov in rproviders:
116 val = d.getVar(prov, False)
117 pkg = prov.replace("PREFERRED_RPROVIDER_", "")
118 for p in prefixes:
119 newval = p + "-" + val
120
121 # implement variable keys
122 localdata = bb.data.createCopy(d)
123 override = ":virtclass-multilib-" + p
124 localdata.setVar("OVERRIDES", localdata.getVar("OVERRIDES", False) + override)
125 newname = localdata.expand(prov)
126 if newname != prov and not d.getVar(newname, False):
127 d.setVar(newname, localdata.expand(newval))
128
129 # implement alternative multilib name
130 newname = localdata.expand("PREFERRED_RPROVIDER_" + p + "-" + pkg)
131 if not d.getVar(newname, False) and newval != None:
132 d.setVar(newname, localdata.expand(newval))
133 # Avoid future variable key expansion
134 provexp = d.expand(prov)
135 if prov != provexp and d.getVar(prov, False):
136 d.renameVar(prov, provexp)
137
138 def translate_provide(prefix, prov):
139 # Really need to know if kernel modules class is inherited somehow
140 if prov == "lttng-modules":
141 return prov
142 if not prov.startswith("virtual/"):
143 return prefix + "-" + prov
144 if prov == "virtual/kernel":
145 return prov
146 prov = prov.replace("virtual/", "")
147 return "virtual/" + prefix + "-" + prov
148
149 mp = (d.getVar("BB_MULTI_PROVIDER_ALLOWED") or "").split()
150 extramp = []
151 for p in mp:
152 if p.endswith("-native") or "-crosssdk-" in p or p.startswith(("nativesdk-", "virtual/nativesdk-")) or 'cross-canadian' in p:
153 continue
154 for pref in prefixes:
155 extramp.append(translate_provide(pref, p))
156 d.setVar("BB_MULTI_PROVIDER_ALLOWED", " ".join(mp + extramp))
157
158 virtprovs = d.getVar("BB_RECIPE_VIRTUAL_PROVIDERS").split()
159 for p in virtprovs.copy():
160 for pref in prefixes:
161 virtprovs.append(translate_provide(pref, p))
162 d.setVar("BB_RECIPE_VIRTUAL_PROVIDERS", " ".join(virtprovs))
163
164 abisafe = (d.getVar("SIGGEN_EXCLUDERECIPES_ABISAFE") or "").split()
165 extras = []
166 for p in prefixes:
167 for a in abisafe:
168 extras.append(p + "-" + a)
169 d.appendVar("SIGGEN_EXCLUDERECIPES_ABISAFE", " " + " ".join(extras))
170
171 siggen_exclude = (d.getVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS") or "").split()
172 extras = []
173 for p in prefixes:
174 for a in siggen_exclude:
175 a1, a2 = a.split("->")
176 extras.append(translate_provide(p, a1) + "->" + translate_provide(p, a2))
177 d.appendVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS", " " + " ".join(extras))
178
179python multilib_virtclass_handler_vendor () {
180 for v in d.getVar("MULTILIB_VARIANTS").split():
181 if d.getVar("TARGET_VENDOR:virtclass-multilib-" + v, False) is None:
182 d.setVar("TARGET_VENDOR:virtclass-multilib-" + v, d.getVar("TARGET_VENDOR", False) + "ml" + v)
183 preferred_ml_updates(d)
184}
185addhandler multilib_virtclass_handler_vendor
186multilib_virtclass_handler_vendor[eventmask] = "bb.event.ConfigParsed"
187
188python multilib_virtclass_handler_global () {
189 variant = d.getVar("BBEXTENDVARIANT")
190 if variant:
191 return
192
193 if bb.data.inherits_class('native', d):
194 return
195
196 non_ml_recipes = d.getVar('NON_MULTILIB_RECIPES').split()
197
198 if bb.data.inherits_class('kernel', d) or \
199 bb.data.inherits_class('module-base', d) or \
200 d.getVar('BPN') in non_ml_recipes:
201
202 # We need to avoid expanding KERNEL_VERSION which we can do by deleting it
203 # from a copy of the datastore
204 localdata = bb.data.createCopy(d)
205 localdata.delVar("KERNEL_VERSION")
206 localdata.delVar("KERNEL_VERSION_PKG_NAME")
207
208 variants = (d.getVar("MULTILIB_VARIANTS") or "").split()
209
210 # Process PROVIDES
211 origprovs = provs = localdata.getVar("PROVIDES") or ""
212 for variant in variants:
213 provs = provs + " " + oe.classextend.suffix_filter_deps(localdata.getVar("PROVIDES") or "", variant, variants)
214 d.setVar("PROVIDES", provs)
215
216 # Process RPROVIDES
217 origrprovs = rprovs = localdata.getVar("RPROVIDES") or ""
218 for variant in variants:
219 rprovs = rprovs + " " + oe.classextend.suffix_filter_deps(localdata.getVar("RPROVIDES") or "", variant, variants)
220 if rprovs.strip():
221 d.setVar("RPROVIDES", rprovs)
222
223 # Process RPROVIDES:${PN}...
224 for pkg in (d.getVar("PACKAGES") or "").split():
225 origrprovs = rprovs = localdata.getVar("RPROVIDES:%s" % pkg) or ""
226 for variant in variants:
227 rprovs = rprovs + " " + oe.classextend.suffix_filter_deps(localdata.getVar("RPROVIDES:%s" % pkg) or "", variant, variants)
228 rprovs = rprovs + " " + variant + "-" + pkg
229 d.setVar("RPROVIDES:%s" % pkg, rprovs)
230}
231
232addhandler multilib_virtclass_handler_global
233multilib_virtclass_handler_global[eventmask] = "bb.event.RecipeTaskPreProcess"
diff --git a/meta/classes/oelint.bbclass b/meta/classes/oelint.bbclass
deleted file mode 100644
index 458a25ecc3..0000000000
--- a/meta/classes/oelint.bbclass
+++ /dev/null
@@ -1,90 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7addtask lint before do_build
8do_lint[nostamp] = "1"
9python do_lint() {
10 pkgname = d.getVar("PN")
11
12 ##############################
13 # Test that DESCRIPTION exists
14 #
15 description = d.getVar("DESCRIPTION", False)
16 if description[1:10] == '{SUMMARY}':
17 bb.warn("%s: DESCRIPTION is not set" % pkgname)
18
19
20 ##############################
21 # Test that HOMEPAGE exists
22 #
23 homepage = d.getVar("HOMEPAGE", False)
24 if homepage == '':
25 bb.warn("%s: HOMEPAGE is not set" % pkgname)
26 elif not homepage.startswith("http://") and not homepage.startswith("https://"):
27 bb.warn("%s: HOMEPAGE doesn't start with http:// or https://" % pkgname)
28
29
30 ##############################
31 # Test for valid SECTION
32 #
33 section = d.getVar("SECTION", False)
34 if section == '':
35 bb.warn("%s: SECTION is not set" % pkgname)
36 elif not section.islower():
37 bb.warn("%s: SECTION should only use lower case" % pkgname)
38
39
40 ##############################
41 # Check that all patches have Signed-off-by and Upstream-Status
42 #
43 srcuri = d.getVar("SRC_URI", False).split()
44 fpaths = (d.getVar('FILESPATH') or '').split(':')
45
46 def findPatch(patchname):
47 for dir in fpaths:
48 patchpath = dir + patchname
49 if os.path.exists(patchpath):
50 return patchpath
51
52 def findKey(path, key):
53 ret = True
54 f = open('%s' % path, mode = 'r')
55 line = f.readline()
56 while line:
57 if line.find(key) != -1:
58 ret = False
59 line = f.readline()
60 f.close()
61 return ret
62
63 def checkPN(pkgname, varname, str):
64 if str.find("{PN}") != -1:
65 bb.warn("%s: should use BPN instead of PN in %s" % (pkgname, varname))
66 if str.find("{P}") != -1:
67 bb.warn("%s: should use BP instead of P in %s" % (pkgname, varname))
68
69 length = len("file://")
70 for item in srcuri:
71 if item.startswith("file://"):
72 item = item[length:]
73 if item.endswith(".patch") or item.endswith(".diff"):
74 path = findPatch(item)
75 if findKey(path, "Signed-off-by"):
76 bb.warn("%s: %s doesn't have Signed-off-by" % (pkgname, item))
77 if findKey(path, "Upstream-Status"):
78 bb.warn("%s: %s doesn't have Upstream-Status" % (pkgname, item))
79
80
81 ##############################
82 # Check for ${PN} or ${P} usage in SRC_URI or S
83 # Should use ${BPN} or ${BP} instead to avoid breaking multilib
84 #
85 for s in srcuri:
86 if not s.startswith("file://"):
87 checkPN(pkgname, 'SRC_URI', s)
88
89 checkPN(pkgname, 'S', d.getVar('S', False))
90}
diff --git a/meta/classes/own-mirrors.bbclass b/meta/classes/own-mirrors.bbclass
deleted file mode 100644
index 36c7f8e3f3..0000000000
--- a/meta/classes/own-mirrors.bbclass
+++ /dev/null
@@ -1,22 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7PREMIRRORS:prepend = " \
8cvs://.*/.* ${SOURCE_MIRROR_URL} \
9svn://.*/.* ${SOURCE_MIRROR_URL} \
10git://.*/.* ${SOURCE_MIRROR_URL} \
11gitsm://.*/.* ${SOURCE_MIRROR_URL} \
12hg://.*/.* ${SOURCE_MIRROR_URL} \
13bzr://.*/.* ${SOURCE_MIRROR_URL} \
14p4://.*/.* ${SOURCE_MIRROR_URL} \
15osc://.*/.* ${SOURCE_MIRROR_URL} \
16https?://.*/.* ${SOURCE_MIRROR_URL} \
17ftp://.*/.* ${SOURCE_MIRROR_URL} \
18npm://.*/?.* ${SOURCE_MIRROR_URL} \
19s3://.*/.* ${SOURCE_MIRROR_URL} \
20crate://.*/.* ${SOURCE_MIRROR_URL} \
21gs://.*/.* ${SOURCE_MIRROR_URL} \
22"
diff --git a/meta/classes/prexport.bbclass b/meta/classes/prexport.bbclass
deleted file mode 100644
index e5098e3308..0000000000
--- a/meta/classes/prexport.bbclass
+++ /dev/null
@@ -1,65 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7PRSERV_DUMPOPT_VERSION = "${PRAUTOINX}"
8PRSERV_DUMPOPT_PKGARCH = ""
9PRSERV_DUMPOPT_CHECKSUM = ""
10PRSERV_DUMPOPT_COL = "0"
11
12PRSERV_DUMPDIR ??= "${LOG_DIR}/db"
13PRSERV_DUMPFILE ??= "${PRSERV_DUMPDIR}/prserv.inc"
14
15python prexport_handler () {
16 import bb.event
17 if not e.data or bb.data.inherits_class('native', e.data) or \
18 bb.data.inherits_class('crosssdk', e.data):
19 return
20
21 if isinstance(e, bb.event.RecipeParsed):
22 import oe.prservice
23 #get all PR values for the current PRAUTOINX
24 ver = e.data.getVar('PRSERV_DUMPOPT_VERSION')
25 ver = ver.replace('%','-')
26 retval = oe.prservice.prserv_dump_db(e.data)
27 if not retval:
28 bb.fatal("prexport_handler: export failed!")
29 (metainfo, datainfo) = retval
30 if not datainfo:
31 bb.note("prexport_handler: No AUTOPR values found for %s" % ver)
32 return
33 oe.prservice.prserv_export_tofile(e.data, None, datainfo, False)
34 if 'AUTOINC' in ver:
35 import re
36 srcpv = bb.fetch2.get_srcrev(e.data)
37 base_ver = "AUTOINC-%s" % ver[:ver.find(srcpv)]
38 e.data.setVar('PRSERV_DUMPOPT_VERSION', base_ver)
39 retval = oe.prservice.prserv_dump_db(e.data)
40 if not retval:
41 bb.fatal("prexport_handler: export failed!")
42 (metainfo, datainfo) = retval
43 oe.prservice.prserv_export_tofile(e.data, None, datainfo, False)
44 elif isinstance(e, bb.event.ParseStarted):
45 import bb.utils
46 import oe.prservice
47 oe.prservice.prserv_check_avail(e.data)
48 #remove dumpfile
49 bb.utils.remove(e.data.getVar('PRSERV_DUMPFILE'))
50 elif isinstance(e, bb.event.ParseCompleted):
51 import oe.prservice
52 #dump meta info of tables
53 d = e.data.createCopy()
54 d.setVar('PRSERV_DUMPOPT_COL', "1")
55 retval = oe.prservice.prserv_dump_db(d)
56 if not retval:
57 bb.error("prexport_handler: export failed!")
58 return
59 (metainfo, datainfo) = retval
60 oe.prservice.prserv_export_tofile(d, metainfo, None, True)
61
62}
63
64addhandler prexport_handler
65prexport_handler[eventmask] = "bb.event.RecipeParsed bb.event.ParseStarted bb.event.ParseCompleted"
diff --git a/meta/classes/primport.bbclass b/meta/classes/primport.bbclass
deleted file mode 100644
index 00924174c1..0000000000
--- a/meta/classes/primport.bbclass
+++ /dev/null
@@ -1,27 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7python primport_handler () {
8 import bb.event
9 if not e.data:
10 return
11
12 if isinstance(e, bb.event.ParseCompleted):
13 import oe.prservice
14 #import all exported AUTOPR values
15 imported = oe.prservice.prserv_import_db(e.data)
16 if imported is None:
17 bb.fatal("import failed!")
18
19 for (version, pkgarch, checksum, value) in imported:
20 bb.note("imported (%s,%s,%s,%d)" % (version, pkgarch, checksum, value))
21 elif isinstance(e, bb.event.ParseStarted):
22 import oe.prservice
23 oe.prservice.prserv_check_avail(e.data)
24}
25
26addhandler primport_handler
27primport_handler[eventmask] = "bb.event.ParseCompleted bb.event.ParseStarted"
diff --git a/meta/classes/recipe_sanity.bbclass b/meta/classes/recipe_sanity.bbclass
deleted file mode 100644
index a5cc4315fb..0000000000
--- a/meta/classes/recipe_sanity.bbclass
+++ /dev/null
@@ -1,155 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7def __note(msg, d):
8 bb.note("%s: recipe_sanity: %s" % (d.getVar("P"), msg))
9
10__recipe_sanity_badruntimevars = "RDEPENDS RPROVIDES RRECOMMENDS RCONFLICTS"
11def bad_runtime_vars(cfgdata, d):
12 if bb.data.inherits_class("native", d) or \
13 bb.data.inherits_class("cross", d):
14 return
15
16 for var in d.getVar("__recipe_sanity_badruntimevars").split():
17 val = d.getVar(var, False)
18 if val and val != cfgdata.get(var):
19 __note("%s should be %s:${PN}" % (var, var), d)
20
21__recipe_sanity_reqvars = "DESCRIPTION"
22__recipe_sanity_reqdiffvars = ""
23def req_vars(cfgdata, d):
24 for var in d.getVar("__recipe_sanity_reqvars").split():
25 if not d.getVar(var, False):
26 __note("%s should be set" % var, d)
27
28 for var in d.getVar("__recipe_sanity_reqdiffvars").split():
29 val = d.getVar(var, False)
30 cfgval = cfgdata.get(var)
31
32 if not val:
33 __note("%s should be set" % var, d)
34 elif val == cfgval:
35 __note("%s should be defined to something other than default (%s)" % (var, cfgval), d)
36
37def var_renames_overwrite(cfgdata, d):
38 renames = d.getVar("__recipe_sanity_renames", False)
39 if renames:
40 for (key, newkey, oldvalue, newvalue) in renames:
41 if oldvalue != newvalue and oldvalue != cfgdata.get(newkey):
42 __note("rename of variable '%s' to '%s' overwrote existing value '%s' with '%s'." % (key, newkey, oldvalue, newvalue), d)
43
44def incorrect_nonempty_PACKAGES(cfgdata, d):
45 if bb.data.inherits_class("native", d) or \
46 bb.data.inherits_class("cross", d):
47 if d.getVar("PACKAGES"):
48 return True
49
50def can_use_autotools_base(cfgdata, d):
51 cfg = d.getVar("do_configure")
52 if not bb.data.inherits_class("autotools", d):
53 return False
54
55 for i in ["autoreconf"] + ["%s_do_configure" % cls for cls in ["gnomebase", "gnome", "e", "autotools", "efl", "gpephone", "openmoko", "openmoko2", "xfce", "xlibs"]]:
56 if cfg.find(i) != -1:
57 return False
58
59 for clsfile in d.getVar("__inherit_cache", False):
60 (base, _) = os.path.splitext(os.path.basename(clsfile))
61 if cfg.find("%s_do_configure" % base) != -1:
62 __note("autotools_base usage needs verification, spotted %s_do_configure" % base, d)
63
64 return True
65
66def can_delete_FILESPATH(cfgdata, d):
67 expected = cfgdata.get("FILESPATH")
68 expectedpaths = d.expand(expected)
69 unexpanded = d.getVar("FILESPATH", False)
70 filespath = d.getVar("FILESPATH").split(":")
71 filespath = [os.path.normpath(f) for f in filespath if os.path.exists(f)]
72 for fp in filespath:
73 if not fp in expectedpaths:
74 # __note("Path %s in FILESPATH not in the expected paths %s" %
75 # (fp, expectedpaths), d)
76 return False
77 return expected != unexpanded
78
79def can_delete_others(p, cfgdata, d):
80 for k in ["S", "PV", "PN", "DESCRIPTION", "DEPENDS",
81 "SECTION", "PACKAGES", "EXTRA_OECONF", "EXTRA_OEMAKE"]:
82 #for k in cfgdata:
83 unexpanded = d.getVar(k, False)
84 cfgunexpanded = cfgdata.get(k)
85 if not cfgunexpanded:
86 continue
87
88 try:
89 expanded = d.getVar(k)
90 cfgexpanded = d.expand(cfgunexpanded)
91 except bb.fetch.ParameterError:
92 continue
93
94 if unexpanded != cfgunexpanded and \
95 cfgexpanded == expanded:
96 __note("candidate for removal of %s" % k, d)
97 bb.debug(1, "%s: recipe_sanity: cfg's '%s' and d's '%s' both expand to %s" %
98 (p, cfgunexpanded, unexpanded, expanded))
99
100python do_recipe_sanity () {
101 p = d.getVar("P")
102 p = "%s %s %s" % (d.getVar("PN"), d.getVar("PV"), d.getVar("PR"))
103
104 sanitychecks = [
105 (can_delete_FILESPATH, "candidate for removal of FILESPATH"),
106 #(can_use_autotools_base, "candidate for use of autotools_base"),
107 (incorrect_nonempty_PACKAGES, "native or cross recipe with non-empty PACKAGES"),
108 ]
109 cfgdata = d.getVar("__recipe_sanity_cfgdata", False)
110
111 for (func, msg) in sanitychecks:
112 if func(cfgdata, d):
113 __note(msg, d)
114
115 can_delete_others(p, cfgdata, d)
116 var_renames_overwrite(cfgdata, d)
117 req_vars(cfgdata, d)
118 bad_runtime_vars(cfgdata, d)
119}
120do_recipe_sanity[nostamp] = "1"
121addtask recipe_sanity
122
123do_recipe_sanity_all[nostamp] = "1"
124do_recipe_sanity_all[recrdeptask] = "do_recipe_sanity_all do_recipe_sanity"
125do_recipe_sanity_all () {
126 :
127}
128addtask recipe_sanity_all after do_recipe_sanity
129
130python recipe_sanity_eh () {
131 d = e.data
132
133 cfgdata = {}
134 for k in d.keys():
135 if not isinstance(d.getVar(k, False), bb.data_smart.DataSmart):
136 cfgdata[k] = d.getVar(k, False)
137
138 d.setVar("__recipe_sanity_cfgdata", cfgdata)
139 #d.setVar("__recipe_sanity_cfgdata", d)
140
141 # Sick, very sick..
142 from bb.data_smart import DataSmart
143 old = DataSmart.renameVar
144 def myrename(self, key, newkey):
145 oldvalue = self.getVar(newkey, 0)
146 old(self, key, newkey)
147 newvalue = self.getVar(newkey, 0)
148 if oldvalue:
149 renames = self.getVar("__recipe_sanity_renames", 0) or set()
150 renames.add((key, newkey, oldvalue, newvalue))
151 self.setVar("__recipe_sanity_renames", renames)
152 DataSmart.renameVar = myrename
153}
154addhandler recipe_sanity_eh
155recipe_sanity_eh[eventmask] = "bb.event.ConfigParsed"
diff --git a/meta/classes/relative_symlinks.bbclass b/meta/classes/relative_symlinks.bbclass
deleted file mode 100644
index 9ee20e0d09..0000000000
--- a/meta/classes/relative_symlinks.bbclass
+++ /dev/null
@@ -1,11 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7do_install[postfuncs] += "install_relative_symlinks"
8
9python install_relative_symlinks () {
10 oe.path.replace_absolute_symlinks(d.getVar('D'), d)
11}
diff --git a/meta/classes/relocatable.bbclass b/meta/classes/relocatable.bbclass
deleted file mode 100644
index d0a623fb0a..0000000000
--- a/meta/classes/relocatable.bbclass
+++ /dev/null
@@ -1,26 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7inherit chrpath
8
9SYSROOT_PREPROCESS_FUNCS += "relocatable_binaries_preprocess relocatable_native_pcfiles"
10
11python relocatable_binaries_preprocess() {
12 rpath_replace(d.expand('${SYSROOT_DESTDIR}'), d)
13}
14
15relocatable_native_pcfiles() {
16 for dir in ${libdir}/pkgconfig ${datadir}/pkgconfig; do
17 files_template=${SYSROOT_DESTDIR}$dir/*.pc
18 # Expand to any files matching $files_template
19 files=$(echo $files_template)
20 # $files_template and $files will differ if any files were found
21 if [ "$files_template" != "$files" ]; then
22 rel=$(realpath -m --relative-to=$dir ${base_prefix})
23 sed -i -e "s:${base_prefix}:\${pcfiledir}/$rel:g" $files
24 fi
25 done
26}
diff --git a/meta/classes/remove-libtool.bbclass b/meta/classes/remove-libtool.bbclass
deleted file mode 100644
index 8e987388c8..0000000000
--- a/meta/classes/remove-libtool.bbclass
+++ /dev/null
@@ -1,17 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# This class removes libtool .la files after do_install
8
9REMOVE_LIBTOOL_LA ?= "1"
10
11remove_libtool_la() {
12 if [ "${REMOVE_LIBTOOL_LA}" != "0" ]; then
13 find "${D}" -ignore_readdir_race -name "*.la" -delete
14 fi
15}
16
17do_install[postfuncs] += "remove_libtool_la"
diff --git a/meta/classes/report-error.bbclass b/meta/classes/report-error.bbclass
deleted file mode 100644
index 2b880c8b0c..0000000000
--- a/meta/classes/report-error.bbclass
+++ /dev/null
@@ -1,159 +0,0 @@
1#
2# Collects debug information in order to create error report files.
3#
4# Copyright (C) 2013 Intel Corporation
5# Author: Andreea Brandusa Proca <andreea.b.proca@intel.com>
6#
7# SPDX-License-Identifier: MIT
8#
9
10ERR_REPORT_DIR ?= "${LOG_DIR}/error-report"
11
12def errorreport_getdata(e):
13 import codecs
14 logpath = e.data.getVar('ERR_REPORT_DIR')
15 datafile = os.path.join(logpath, "error-report.txt")
16 with codecs.open(datafile, 'r', 'utf-8') as f:
17 data = f.read()
18 return data
19
20def errorreport_savedata(e, newdata, file):
21 import json
22 import codecs
23 logpath = e.data.getVar('ERR_REPORT_DIR')
24 datafile = os.path.join(logpath, file)
25 with codecs.open(datafile, 'w', 'utf-8') as f:
26 json.dump(newdata, f, indent=4, sort_keys=True)
27 return datafile
28
29def get_conf_data(e, filename):
30 builddir = e.data.getVar('TOPDIR')
31 filepath = os.path.join(builddir, "conf", filename)
32 jsonstring = ""
33 if os.path.exists(filepath):
34 with open(filepath, 'r') as f:
35 for line in f.readlines():
36 if line.startswith("#") or len(line.strip()) == 0:
37 continue
38 else:
39 jsonstring=jsonstring + line
40 return jsonstring
41
42def get_common_data(e):
43 data = {}
44 data['machine'] = e.data.getVar("MACHINE")
45 data['build_sys'] = e.data.getVar("BUILD_SYS")
46 data['distro'] = e.data.getVar("DISTRO")
47 data['target_sys'] = e.data.getVar("TARGET_SYS")
48 data['branch_commit'] = str(oe.buildcfg.detect_branch(e.data)) + ": " + str(oe.buildcfg.detect_revision(e.data))
49 data['bitbake_version'] = e.data.getVar("BB_VERSION")
50 data['layer_version'] = get_layers_branch_rev(e.data)
51 data['local_conf'] = get_conf_data(e, 'local.conf')
52 data['auto_conf'] = get_conf_data(e, 'auto.conf')
53 return data
54
55python errorreport_handler () {
56 import json
57 import codecs
58
59 def nativelsb():
60 nativelsbstr = e.data.getVar("NATIVELSBSTRING")
61 # provide a bit more host info in case of uninative build
62 if e.data.getVar('UNINATIVE_URL') != 'unset':
63 return '/'.join([nativelsbstr, lsb_distro_identifier(e.data)])
64 return nativelsbstr
65
66 logpath = e.data.getVar('ERR_REPORT_DIR')
67 datafile = os.path.join(logpath, "error-report.txt")
68
69 if isinstance(e, bb.event.BuildStarted):
70 bb.utils.mkdirhier(logpath)
71 data = {}
72 data = get_common_data(e)
73 data['nativelsb'] = nativelsb()
74 data['failures'] = []
75 data['component'] = " ".join(e.getPkgs())
76 lock = bb.utils.lockfile(datafile + '.lock')
77 errorreport_savedata(e, data, "error-report.txt")
78 bb.utils.unlockfile(lock)
79
80 elif isinstance(e, bb.build.TaskFailed):
81 task = e.task
82 taskdata={}
83 log = e.data.getVar('BB_LOGFILE')
84 taskdata['recipe'] = e.data.expand("${PN}")
85 taskdata['package'] = e.data.expand("${PF}")
86 taskdata['task'] = task
87 if log:
88 try:
89 with codecs.open(log, encoding='utf-8') as logFile:
90 logdata = logFile.read()
91 # Replace host-specific paths so the logs are cleaner
92 for d in ("TOPDIR", "TMPDIR"):
93 s = e.data.getVar(d)
94 if s:
95 logdata = logdata.replace(s, d)
96 except:
97 logdata = "Unable to read log file"
98 else:
99 logdata = "No Log"
100
101 # server will refuse failures longer than param specified in project.settings.py
102 # MAX_UPLOAD_SIZE = "5242880"
103 # use lower value, because 650 chars can be spent in task, package, version
104 max_logdata_size = 5242000
105 # upload last max_logdata_size characters
106 if len(logdata) > max_logdata_size:
107 logdata = "..." + logdata[-max_logdata_size:]
108 taskdata['log'] = logdata
109 lock = bb.utils.lockfile(datafile + '.lock')
110 jsondata = json.loads(errorreport_getdata(e))
111 jsondata['failures'].append(taskdata)
112 errorreport_savedata(e, jsondata, "error-report.txt")
113 bb.utils.unlockfile(lock)
114
115 elif isinstance(e, bb.event.NoProvider):
116 bb.utils.mkdirhier(logpath)
117 data = {}
118 data = get_common_data(e)
119 data['nativelsb'] = nativelsb()
120 data['failures'] = []
121 data['component'] = str(e._item)
122 taskdata={}
123 taskdata['log'] = str(e)
124 taskdata['package'] = str(e._item)
125 taskdata['task'] = "Nothing provides " + "'" + str(e._item) + "'"
126 data['failures'].append(taskdata)
127 lock = bb.utils.lockfile(datafile + '.lock')
128 errorreport_savedata(e, data, "error-report.txt")
129 bb.utils.unlockfile(lock)
130
131 elif isinstance(e, bb.event.ParseError):
132 bb.utils.mkdirhier(logpath)
133 data = {}
134 data = get_common_data(e)
135 data['nativelsb'] = nativelsb()
136 data['failures'] = []
137 data['component'] = "parse"
138 taskdata={}
139 taskdata['log'] = str(e._msg)
140 taskdata['task'] = str(e._msg)
141 data['failures'].append(taskdata)
142 lock = bb.utils.lockfile(datafile + '.lock')
143 errorreport_savedata(e, data, "error-report.txt")
144 bb.utils.unlockfile(lock)
145
146 elif isinstance(e, bb.event.BuildCompleted):
147 lock = bb.utils.lockfile(datafile + '.lock')
148 jsondata = json.loads(errorreport_getdata(e))
149 bb.utils.unlockfile(lock)
150 failures = jsondata['failures']
151 if(len(failures) > 0):
152 filename = "error_report_" + e.data.getVar("BUILDNAME")+".txt"
153 datafile = errorreport_savedata(e, jsondata, filename)
154 bb.note("The errors for this build are stored in %s\nYou can send the errors to a reports server by running:\n send-error-report %s [-s server]" % (datafile, datafile))
155 bb.note("The contents of these logs will be posted in public if you use the above command with the default server. Please ensure you remove any identifying or proprietary information when prompted before sending.")
156}
157
158addhandler errorreport_handler
159errorreport_handler[eventmask] = "bb.event.BuildStarted bb.event.BuildCompleted bb.build.TaskFailed bb.event.NoProvider bb.event.ParseError"
diff --git a/meta/classes/rm_work.bbclass b/meta/classes/rm_work.bbclass
deleted file mode 100644
index 52ecfafb72..0000000000
--- a/meta/classes/rm_work.bbclass
+++ /dev/null
@@ -1,197 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7#
8# Removes source after build
9#
10# To use it add that line to conf/local.conf:
11#
12# INHERIT += "rm_work"
13#
14# To inhibit rm_work for some recipes, specify them in RM_WORK_EXCLUDE.
15# For example, in conf/local.conf:
16#
17# RM_WORK_EXCLUDE += "icu-native icu busybox"
18#
19# Recipes can also configure which entries in their ${WORKDIR}
20# are preserved besides temp, which already gets excluded by default
21# because it contains logs:
22# do_install:append () {
23# echo "bar" >${WORKDIR}/foo
24# }
25# RM_WORK_EXCLUDE_ITEMS += "foo"
26RM_WORK_EXCLUDE_ITEMS = "temp"
27
28# Use the completion scheduler by default when rm_work is active
29# to try and reduce disk usage
30BB_SCHEDULER ?= "completion"
31
32# Run the rm_work task in the idle scheduling class
33BB_TASK_IONICE_LEVEL:task-rm_work = "3.0"
34
35do_rm_work () {
36 # Force using the HOSTTOOLS 'rm' - otherwise the SYSROOT_NATIVE 'rm' can be selected depending on PATH
37 # Avoids race-condition accessing 'rm' when deleting WORKDIR folders at the end of this function
38 RM_BIN="$(PATH=${HOSTTOOLS_DIR} command -v rm)"
39 if [ -z "${RM_BIN}" ]; then
40 bbfatal "Binary 'rm' not found in HOSTTOOLS_DIR, cannot remove WORKDIR data."
41 fi
42
43 # If the recipe name is in the RM_WORK_EXCLUDE, skip the recipe.
44 for p in ${RM_WORK_EXCLUDE}; do
45 if [ "$p" = "${PN}" ]; then
46 bbnote "rm_work: Skipping ${PN} since it is in RM_WORK_EXCLUDE"
47 exit 0
48 fi
49 done
50
51 # Need to add pseudo back or subsqeuent work in this workdir
52 # might fail since setscene may not rerun to recreate it
53 mkdir -p ${WORKDIR}/pseudo/
54
55 excludes='${RM_WORK_EXCLUDE_ITEMS}'
56
57 # Change normal stamps into setscene stamps as they better reflect the
58 # fact WORKDIR is now empty
59 # Also leave noexec stamps since setscene stamps don't cover them
60 STAMPDIR=`dirname ${STAMP}`
61 if test -d $STAMPDIR; then
62 cd $STAMPDIR
63 for i in `basename ${STAMP}`*
64 do
65 case $i in
66 *sigdata*|*sigbasedata*)
67 # Save/skip anything that looks like a signature data file.
68 ;;
69 *do_image_complete_setscene*|*do_image_qa_setscene*)
70 # Ensure we don't 'stack' setscene extensions to these stamps with the sections below
71 ;;
72 *do_image_complete*)
73 # Promote do_image_complete stamps to setscene versions (ahead of *do_image* below)
74 mv $i `echo $i | sed -e "s#do_image_complete#do_image_complete_setscene#"`
75 ;;
76 *do_image_qa*)
77 # Promote do_image_qa stamps to setscene versions (ahead of *do_image* below)
78 mv $i `echo $i | sed -e "s#do_image_qa#do_image_qa_setscene#"`
79 ;;
80 *do_package_write*|*do_rootfs*|*do_image*|*do_bootimg*|*do_write_qemuboot_conf*|*do_build*)
81 ;;
82 *do_addto_recipe_sysroot*)
83 # Preserve recipe-sysroot-native if do_addto_recipe_sysroot has been used
84 excludes="$excludes recipe-sysroot-native"
85 ;;
86 *do_package|*do_package.*|*do_package_setscene.*)
87 # We remove do_package entirely, including any
88 # sstate version since otherwise we'd need to leave 'plaindirs' around
89 # such as 'packages' and 'packages-split' and these can be large. No end
90 # of chain tasks depend directly on do_package anymore.
91 "${RM_BIN}" -f -- $i;
92 ;;
93 *_setscene*)
94 # Skip stamps which are already setscene versions
95 ;;
96 *)
97 # For everything else: if suitable, promote the stamp to a setscene
98 # version, otherwise remove it
99 for j in ${SSTATETASKS} do_shared_workdir
100 do
101 case $i in
102 *$j|*$j.*)
103 mv $i `echo $i | sed -e "s#${j}#${j}_setscene#"`
104 break
105 ;;
106 esac
107 done
108 "${RM_BIN}" -f -- $i
109 esac
110 done
111 fi
112
113 cd ${WORKDIR}
114 for dir in *
115 do
116 # Retain only logs and other files in temp, safely ignore
117 # failures of removing pseudo folers on NFS2/3 server.
118 if [ $dir = 'pseudo' ]; then
119 "${RM_BIN}" -rf -- $dir 2> /dev/null || true
120 elif ! echo "$excludes" | grep -q -w "$dir"; then
121 "${RM_BIN}" -rf -- $dir
122 fi
123 done
124}
125do_rm_work[vardepsexclude] += "SSTATETASKS"
126
127do_rm_work_all () {
128 :
129}
130do_rm_work_all[recrdeptask] = "do_rm_work"
131do_rm_work_all[noexec] = "1"
132addtask rm_work_all before do_build
133
134do_populate_sdk[postfuncs] += "rm_work_populatesdk"
135rm_work_populatesdk () {
136 :
137}
138rm_work_populatesdk[cleandirs] = "${WORKDIR}/sdk"
139
140do_image_complete[postfuncs] += "rm_work_rootfs"
141rm_work_rootfs () {
142 :
143}
144rm_work_rootfs[cleandirs] = "${WORKDIR}/rootfs"
145
146# This task can be used instead of do_build to trigger building
147# without also invoking do_rm_work. It only exists when rm_work.bbclass
148# is active, otherwise do_build needs to be used.
149#
150# The intended usage is
151# ${@ d.getVar('RM_WORK_BUILD_WITHOUT') or 'do_build'}
152# in places that previously used just 'do_build'.
153RM_WORK_BUILD_WITHOUT = "do_build_without_rm_work"
154do_build_without_rm_work () {
155 :
156}
157do_build_without_rm_work[noexec] = "1"
158
159# We have to add these tasks already now, because all tasks are
160# meant to be defined before the RecipeTaskPreProcess event triggers.
161# The inject_rm_work event handler then merely changes task dependencies.
162addtask do_rm_work
163addtask do_build_without_rm_work
164addhandler inject_rm_work
165inject_rm_work[eventmask] = "bb.event.RecipeTaskPreProcess"
166python inject_rm_work() {
167 if bb.data.inherits_class('kernel', d):
168 d.appendVar("RM_WORK_EXCLUDE", ' ' + d.getVar("PN"))
169 # If the recipe name is in the RM_WORK_EXCLUDE, skip the recipe.
170 excludes = (d.getVar("RM_WORK_EXCLUDE") or "").split()
171 pn = d.getVar("PN")
172
173 # Determine what do_build depends upon, without including do_build
174 # itself or our own special do_rm_work_all.
175 deps = sorted((set(bb.build.preceedtask('do_build', True, d))).difference(('do_build', 'do_rm_work_all')) or "")
176
177 # deps can be empty if do_build doesn't exist, e.g. *-inital recipes
178 if not deps:
179 deps = ["do_populate_sysroot", "do_populate_lic"]
180
181 if pn in excludes:
182 d.delVarFlag('rm_work_rootfs', 'cleandirs')
183 d.delVarFlag('rm_work_populatesdk', 'cleandirs')
184 else:
185 # Inject do_rm_work into the tasks of the current recipe such that do_build
186 # depends on it and that it runs after all other tasks that block do_build,
187 # i.e. after all work on the current recipe is done. The reason for taking
188 # this approach instead of making do_rm_work depend on do_build is that
189 # do_build inherits additional runtime dependencies on
190 # other recipes and thus will typically run much later than completion of
191 # work in the recipe itself.
192 # In practice, addtask() here merely updates the dependencies.
193 bb.build.addtask('do_rm_work', 'do_rm_work_all do_build', ' '.join(deps), d)
194
195 # Always update do_build_without_rm_work dependencies.
196 bb.build.addtask('do_build_without_rm_work', '', ' '.join(deps), d)
197}
diff --git a/meta/classes/rm_work_and_downloads.bbclass b/meta/classes/rm_work_and_downloads.bbclass
deleted file mode 100644
index 2695a3807f..0000000000
--- a/meta/classes/rm_work_and_downloads.bbclass
+++ /dev/null
@@ -1,32 +0,0 @@
1# Author: Patrick Ohly <patrick.ohly@intel.com>
2# Copyright: Copyright (C) 2015 Intel Corporation
3#
4# SPDX-License-Identifier: MIT
5
6# This class is used like rm_work:
7# INHERIT += "rm_work_and_downloads"
8#
9# In addition to removing local build directories of a recipe, it also
10# removes the downloaded source. This is achieved by making the DL_DIR
11# recipe-specific. While reducing disk usage, it increases network usage (for
12# example, compiling the same source for target and host implies downloading
13# the source twice).
14#
15# Because the "do_fetch" task does not get re-run after removing the downloaded
16# sources, this class is also not suitable for incremental builds.
17#
18# Where it works well is in well-connected build environments with limited
19# disk space (like TravisCI).
20
21inherit rm_work
22
23# This would ensure that the existing do_rm_work() removes the downloads,
24# but does not work because some recipes have a circular dependency between
25# WORKDIR and DL_DIR (via ${SRCPV}?).
26# DL_DIR = "${WORKDIR}/downloads"
27
28# Instead go up one level and remove ourself.
29DL_DIR = "${BASE_WORKDIR}/${MULTIMACH_TARGET_SYS}/${PN}/downloads"
30do_rm_work:append () {
31 rm -rf ${DL_DIR}
32}
diff --git a/meta/classes/sign_ipk.bbclass b/meta/classes/sign_ipk.bbclass
deleted file mode 100644
index 51c24b38b2..0000000000
--- a/meta/classes/sign_ipk.bbclass
+++ /dev/null
@@ -1,58 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# Class for generating signed IPK packages.
8#
9# Configuration variables used by this class:
10# IPK_GPG_PASSPHRASE_FILE
11# Path to a file containing the passphrase of the signing key.
12# IPK_GPG_NAME
13# Name of the key to sign with.
14# IPK_GPG_BACKEND
15# Optional variable for specifying the backend to use for signing.
16# Currently the only available option is 'local', i.e. local signing
17# on the build host.
18# IPK_GPG_SIGNATURE_TYPE
19# Optional variable for specifying the type of gpg signatures, can be:
20# 1. Ascii armored (ASC), default if not set
21# 2. Binary (BIN)
22# GPG_BIN
23# Optional variable for specifying the gpg binary/wrapper to use for
24# signing.
25# GPG_PATH
26# Optional variable for specifying the gnupg "home" directory:
27#
28
29inherit sanity
30
31IPK_SIGN_PACKAGES = '1'
32IPK_GPG_BACKEND ?= 'local'
33IPK_GPG_SIGNATURE_TYPE ?= 'ASC'
34
35python () {
36 # Check configuration
37 for var in ('IPK_GPG_NAME', 'IPK_GPG_PASSPHRASE_FILE'):
38 if not d.getVar(var):
39 raise_sanity_error("You need to define %s in the config" % var, d)
40
41 sigtype = d.getVar("IPK_GPG_SIGNATURE_TYPE")
42 if sigtype.upper() != "ASC" and sigtype.upper() != "BIN":
43 raise_sanity_error("Bad value for IPK_GPG_SIGNATURE_TYPE (%s), use either ASC or BIN" % sigtype)
44}
45
46def sign_ipk(d, ipk_to_sign):
47 from oe.gpg_sign import get_signer
48
49 bb.debug(1, 'Signing ipk: %s' % ipk_to_sign)
50
51 signer = get_signer(d, d.getVar('IPK_GPG_BACKEND'))
52 sig_type = d.getVar('IPK_GPG_SIGNATURE_TYPE')
53 is_ascii_sig = (sig_type.upper() != "BIN")
54
55 signer.detach_sign(ipk_to_sign,
56 d.getVar('IPK_GPG_NAME'),
57 d.getVar('IPK_GPG_PASSPHRASE_FILE'),
58 armor=is_ascii_sig)
diff --git a/meta/classes/sign_package_feed.bbclass b/meta/classes/sign_package_feed.bbclass
deleted file mode 100644
index e9d664750c..0000000000
--- a/meta/classes/sign_package_feed.bbclass
+++ /dev/null
@@ -1,53 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# Class for signing package feeds
8#
9# Related configuration variables that will be used after this class is
10# iherited:
11# PACKAGE_FEED_PASSPHRASE_FILE
12# Path to a file containing the passphrase of the signing key.
13# PACKAGE_FEED_GPG_NAME
14# Name of the key to sign with. May be key id or key name.
15# PACKAGE_FEED_GPG_BACKEND
16# Optional variable for specifying the backend to use for signing.
17# Currently the only available option is 'local', i.e. local signing
18# on the build host.
19# PACKAGE_FEED_GPG_SIGNATURE_TYPE
20# Optional variable for specifying the type of gpg signature, can be:
21# 1. Ascii armored (ASC), default if not set
22# 2. Binary (BIN)
23# This variable is only available for IPK feeds. It is ignored on
24# other packaging backends.
25# GPG_BIN
26# Optional variable for specifying the gpg binary/wrapper to use for
27# signing.
28# GPG_PATH
29# Optional variable for specifying the gnupg "home" directory:
30#
31inherit sanity
32
33PACKAGE_FEED_SIGN = '1'
34PACKAGE_FEED_GPG_BACKEND ?= 'local'
35PACKAGE_FEED_GPG_SIGNATURE_TYPE ?= 'ASC'
36PACKAGEINDEXDEPS += "gnupg-native:do_populate_sysroot"
37
38# Make feed signing key to be present in rootfs
39FEATURE_PACKAGES_package-management:append = " signing-keys-packagefeed"
40
41python () {
42 # Check sanity of configuration
43 for var in ('PACKAGE_FEED_GPG_NAME', 'PACKAGE_FEED_GPG_PASSPHRASE_FILE'):
44 if not d.getVar(var):
45 raise_sanity_error("You need to define %s in the config" % var, d)
46
47 sigtype = d.getVar("PACKAGE_FEED_GPG_SIGNATURE_TYPE")
48 if sigtype.upper() != "ASC" and sigtype.upper() != "BIN":
49 raise_sanity_error("Bad value for PACKAGE_FEED_GPG_SIGNATURE_TYPE (%s), use either ASC or BIN" % sigtype)
50}
51
52do_package_index[depends] += "signing-keys:do_deploy"
53do_rootfs[depends] += "signing-keys:do_populate_sysroot gnupg-native:do_populate_sysroot"
diff --git a/meta/classes/sign_rpm.bbclass b/meta/classes/sign_rpm.bbclass
deleted file mode 100644
index b5b21b0db1..0000000000
--- a/meta/classes/sign_rpm.bbclass
+++ /dev/null
@@ -1,78 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# Class for generating signed RPM packages.
8#
9# Configuration variables used by this class:
10# RPM_GPG_PASSPHRASE
11# The passphrase of the signing key.
12# RPM_GPG_NAME
13# Name of the key to sign with. May be key id or key name.
14# RPM_GPG_BACKEND
15# Optional variable for specifying the backend to use for signing.
16# Currently the only available option is 'local', i.e. local signing
17# on the build host.
18# RPM_FILE_CHECKSUM_DIGEST
19# Optional variable for specifying the algorithm for generating file
20# checksum digest.
21# RPM_FSK_PATH
22# Optional variable for the file signing key.
23# RPM_FSK_PASSWORD
24# Optional variable for the file signing key password.
25# GPG_BIN
26# Optional variable for specifying the gpg binary/wrapper to use for
27# signing.
28# RPM_GPG_SIGN_CHUNK
29# Optional variable indicating the number of packages used per gpg
30# invocation
31# GPG_PATH
32# Optional variable for specifying the gnupg "home" directory:
33
34inherit sanity
35
36RPM_SIGN_PACKAGES = '1'
37RPM_SIGN_FILES ?= '0'
38RPM_GPG_BACKEND ?= 'local'
39# SHA-256 is used by default
40RPM_FILE_CHECKSUM_DIGEST ?= '8'
41RPM_GPG_SIGN_CHUNK ?= "${BB_NUMBER_THREADS}"
42
43
44python () {
45 if d.getVar('RPM_GPG_PASSPHRASE_FILE'):
46 raise_sanity_error('RPM_GPG_PASSPHRASE_FILE is replaced by RPM_GPG_PASSPHRASE', d)
47 # Check configuration
48 for var in ('RPM_GPG_NAME', 'RPM_GPG_PASSPHRASE'):
49 if not d.getVar(var):
50 raise_sanity_error("You need to define %s in the config" % var, d)
51
52 if d.getVar('RPM_SIGN_FILES') == '1':
53 for var in ('RPM_FSK_PATH', 'RPM_FSK_PASSWORD'):
54 if not d.getVar(var):
55 raise_sanity_error("You need to define %s in the config" % var, d)
56}
57
58python sign_rpm () {
59 import glob
60 from oe.gpg_sign import get_signer
61
62 signer = get_signer(d, d.getVar('RPM_GPG_BACKEND'))
63 rpms = glob.glob(d.getVar('RPM_PKGWRITEDIR') + '/*')
64
65 signer.sign_rpms(rpms,
66 d.getVar('RPM_GPG_NAME'),
67 d.getVar('RPM_GPG_PASSPHRASE'),
68 d.getVar('RPM_FILE_CHECKSUM_DIGEST'),
69 int(d.getVar('RPM_GPG_SIGN_CHUNK')),
70 d.getVar('RPM_FSK_PATH'),
71 d.getVar('RPM_FSK_PASSWORD'))
72}
73sign_rpm[vardepsexclude] += "RPM_GPG_SIGN_CHUNK"
74
75do_package_index[depends] += "signing-keys:do_deploy"
76do_rootfs[depends] += "signing-keys:do_populate_sysroot"
77
78PACKAGE_WRITE_DEPS += "gnupg-native"
diff --git a/meta/classes/spdx-common.bbclass b/meta/classes/spdx-common.bbclass
deleted file mode 100644
index ca0416d1c7..0000000000
--- a/meta/classes/spdx-common.bbclass
+++ /dev/null
@@ -1,107 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: GPL-2.0-only
5#
6
7SPDX_VERSION ??= ""
8DEPLOY_DIR_SPDX ??= "${DEPLOY_DIR}/spdx/${SPDX_VERSION}"
9
10# The product name that the CVE database uses. Defaults to BPN, but may need to
11# be overriden per recipe (for example tiff.bb sets CVE_PRODUCT=libtiff).
12CVE_PRODUCT ??= "${BPN}"
13CVE_VERSION ??= "${PV}"
14
15SPDXDIR ??= "${WORKDIR}/spdx/${SPDX_VERSION}"
16SPDXDEPLOY = "${SPDXDIR}/deploy"
17SPDXWORK = "${SPDXDIR}/work"
18SPDXIMAGEWORK = "${SPDXDIR}/image-work"
19SPDXSDKWORK = "${SPDXDIR}/sdk-work"
20SPDXSDKEXTWORK = "${SPDXDIR}/sdk-ext-work"
21SPDXDEPS = "${SPDXDIR}/deps.json"
22
23SPDX_TOOL_NAME ??= "oe-spdx-creator"
24SPDX_TOOL_VERSION ??= "1.0"
25
26SPDXRUNTIMEDEPLOY = "${SPDXDIR}/runtime-deploy"
27
28SPDX_INCLUDE_SOURCES ??= "0"
29SPDX_INCLUDE_COMPILED_SOURCES ??= "0"
30
31SPDX_UUID_NAMESPACE ??= "sbom.openembedded.org"
32SPDX_NAMESPACE_PREFIX ??= "http://spdx.org/spdxdocs"
33SPDX_PRETTY ??= "0"
34
35SPDX_LICENSES ??= "${COREBASE}/meta/files/spdx-licenses.json"
36
37SPDX_CUSTOM_ANNOTATION_VARS ??= ""
38
39SPDX_MULTILIB_SSTATE_ARCHS ??= "${SSTATE_ARCHS}"
40
41python () {
42 from oe.cve_check import extend_cve_status
43 extend_cve_status(d)
44 if d.getVar("SPDX_INCLUDE_COMPILED_SOURCES") == "1":
45 d.setVar("SPDX_INCLUDE_SOURCES", "1")
46}
47
48def create_spdx_source_deps(d):
49 import oe.spdx_common
50
51 deps = []
52 if d.getVar("SPDX_INCLUDE_SOURCES") == "1":
53 pn = d.getVar('PN')
54 # do_unpack is a hack for now; we only need it to get the
55 # dependencies do_unpack already has so we can extract the source
56 # ourselves
57 if oe.spdx_common.has_task(d, "do_unpack"):
58 deps.append("%s:do_unpack" % pn)
59
60 if oe.spdx_common.is_work_shared_spdx(d) and \
61 oe.spdx_common.process_sources(d):
62 # For kernel source code
63 if oe.spdx_common.has_task(d, "do_shared_workdir"):
64 deps.append("%s:do_shared_workdir" % pn)
65 elif d.getVar('S') == d.getVar('STAGING_KERNEL_DIR'):
66 deps.append("virtual/kernel:do_shared_workdir")
67
68 # For gcc-source-${PV} source code
69 if oe.spdx_common.has_task(d, "do_preconfigure"):
70 deps.append("%s:do_preconfigure" % pn)
71 elif oe.spdx_common.has_task(d, "do_patch"):
72 deps.append("%s:do_patch" % pn)
73 # For gcc-cross-x86_64 source code
74 elif oe.spdx_common.has_task(d, "do_configure"):
75 deps.append("%s:do_configure" % pn)
76
77 return " ".join(deps)
78
79
80python do_collect_spdx_deps() {
81 # This task calculates the build time dependencies of the recipe, and is
82 # required because while a task can deptask on itself, those dependencies
83 # do not show up in BB_TASKDEPDATA. To work around that, this task does the
84 # deptask on do_create_spdx and writes out the dependencies it finds, then
85 # do_create_spdx reads in the found dependencies when writing the actual
86 # SPDX document
87 import json
88 import oe.spdx_common
89 from pathlib import Path
90
91 spdx_deps_file = Path(d.getVar("SPDXDEPS"))
92
93 deps = oe.spdx_common.collect_direct_deps(d, "do_create_spdx")
94
95 with spdx_deps_file.open("w") as f:
96 json.dump(deps, f)
97}
98# NOTE: depending on do_unpack is a hack that is necessary to get it's dependencies for archive the source
99addtask do_collect_spdx_deps after do_unpack
100do_collect_spdx_deps[depends] += "${PATCHDEPENDENCY}"
101do_collect_spdx_deps[deptask] = "do_create_spdx"
102do_collect_spdx_deps[dirs] = "${SPDXDIR}"
103
104oe.spdx_common.collect_direct_deps[vardepsexclude] += "BB_TASKDEPDATA"
105oe.spdx_common.collect_direct_deps[vardeps] += "DEPENDS"
106oe.spdx_common.collect_package_providers[vardepsexclude] += "BB_TASKDEPDATA"
107oe.spdx_common.get_patched_src[vardepsexclude] += "STAGING_KERNEL_DIR"
diff --git a/meta/classes/terminal.bbclass b/meta/classes/terminal.bbclass
deleted file mode 100644
index 2dfc7db255..0000000000
--- a/meta/classes/terminal.bbclass
+++ /dev/null
@@ -1,115 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7OE_TERMINAL ?= 'auto'
8OE_TERMINAL[type] = 'choice'
9OE_TERMINAL[choices] = 'auto none \
10 ${@oe_terminal_prioritized()}'
11
12OE_TERMINAL_EXPORTS += 'EXTRA_OEMAKE CACHED_CONFIGUREVARS CONFIGUREOPTS EXTRA_OECONF'
13OE_TERMINAL_EXPORTS[type] = 'list'
14
15XAUTHORITY ?= "${HOME}/.Xauthority"
16SHELL ?= "bash"
17
18def oe_terminal_prioritized():
19 import oe.terminal
20 return " ".join(o.name for o in oe.terminal.prioritized())
21
22def emit_terminal_func(command, envdata, d):
23 import bb.build
24 cmd_func = 'do_terminal'
25
26 envdata.setVar(cmd_func, 'exec ' + command)
27 envdata.setVarFlag(cmd_func, 'func', '1')
28
29 runfmt = d.getVar('BB_RUNFMT') or "run.{func}.{pid}"
30 runfile = runfmt.format(func=cmd_func, task=cmd_func, taskfunc=cmd_func, pid=os.getpid())
31 runfile = os.path.join(d.getVar('T'), runfile)
32 bb.utils.mkdirhier(os.path.dirname(runfile))
33
34 with open(runfile, 'w') as script:
35 # Override the shell shell_trap_code specifies.
36 # If our shell is bash, we might well face silent death.
37 script.write("#!/bin/bash\n")
38 script.write(bb.build.shell_trap_code())
39 bb.data.emit_func(cmd_func, script, envdata)
40 script.write(cmd_func)
41 script.write("\n")
42 os.chmod(runfile, 0o755)
43
44 return runfile
45
46def oe_terminal(command, title, d):
47 import oe.data
48 import oe.terminal
49
50 envdata = bb.data.init()
51
52 for v in os.environ:
53 envdata.setVar(v, os.environ[v])
54 envdata.setVarFlag(v, 'export', '1')
55
56 for export in oe.data.typed_value('OE_TERMINAL_EXPORTS', d):
57 value = d.getVar(export)
58 if value is not None:
59 os.environ[export] = str(value)
60 envdata.setVar(export, str(value))
61 envdata.setVarFlag(export, 'export', '1')
62 if export == "PSEUDO_DISABLED":
63 if "PSEUDO_UNLOAD" in os.environ:
64 del os.environ["PSEUDO_UNLOAD"]
65 envdata.delVar("PSEUDO_UNLOAD")
66
67 # Add in all variables from the user's original environment which
68 # haven't subsequntly been set/changed
69 origbbenv = d.getVar("BB_ORIGENV", False) or {}
70 for key in origbbenv:
71 if key in envdata:
72 continue
73 value = origbbenv.getVar(key)
74 if value is not None:
75 os.environ[key] = str(value)
76 envdata.setVar(key, str(value))
77 envdata.setVarFlag(key, 'export', '1')
78
79 # Use original PATH as a fallback
80 path = d.getVar('PATH') + ":" + origbbenv.getVar('PATH')
81 os.environ['PATH'] = path
82 envdata.setVar('PATH', path)
83
84 # A complex PS1 might need more escaping of chars.
85 # Lets not export PS1 instead.
86 envdata.delVar("PS1")
87
88 # Replace command with an executable wrapper script
89 command = emit_terminal_func(command, envdata, d)
90
91 terminal = oe.data.typed_value('OE_TERMINAL', d).lower()
92 if terminal == 'none':
93 bb.fatal('Devshell usage disabled with OE_TERMINAL')
94 elif terminal != 'auto':
95 try:
96 oe.terminal.spawn(terminal, command, title, None, d)
97 return
98 except oe.terminal.UnsupportedTerminal:
99 bb.warn('Unsupported terminal "%s", defaulting to "auto"' %
100 terminal)
101 except oe.terminal.ExecutionError as exc:
102 bb.fatal('Unable to spawn terminal %s: %s' % (terminal, exc))
103
104 try:
105 oe.terminal.spawn_preferred(command, title, None, d)
106 except oe.terminal.NoSupportedTerminals as nosup:
107 nosup.terms.remove("false")
108 cmds = '\n\t'.join(nosup.terms).replace("{command}",
109 "do_terminal").replace("{title}", title)
110 bb.fatal('No valid terminal found, unable to open devshell.\n' +
111 'Tried the following commands:\n\t%s' % cmds)
112 except oe.terminal.ExecutionError as exc:
113 bb.fatal('Unable to spawn terminal %s: %s' % (terminal, exc))
114
115oe_terminal[vardepsexclude] = "BB_ORIGENV"
diff --git a/meta/classes/toaster.bbclass b/meta/classes/toaster.bbclass
deleted file mode 100644
index af7c457808..0000000000
--- a/meta/classes/toaster.bbclass
+++ /dev/null
@@ -1,388 +0,0 @@
1#
2# Toaster helper class
3#
4# Copyright (C) 2013 Intel Corporation
5#
6# SPDX-License-Identifier: MIT
7#
8# This bbclass is designed to extract data used by OE-Core during the build process,
9# for recording in the Toaster system.
10# The data access is synchronous, preserving the build data integrity across
11# different builds.
12#
13# The data is transferred through the event system, using the MetadataEvent objects.
14#
15# The model is to enable the datadump functions as postfuncs, and have the dump
16# executed after the real taskfunc has been executed. This prevents task signature changing
17# is toaster is enabled or not. Build performance is not affected if Toaster is not enabled.
18#
19# To enable, use INHERIT in local.conf:
20#
21# INHERIT += "toaster"
22#
23#
24#
25#
26
27# Find and dump layer info when we got the layers parsed
28
29
30
31python toaster_layerinfo_dumpdata() {
32 import subprocess
33
34 def _get_git_branch(layer_path):
35 branch = subprocess.Popen("git symbolic-ref HEAD 2>/dev/null ", cwd=layer_path, shell=True, stdout=subprocess.PIPE).communicate()[0]
36 branch = branch.decode('utf-8')
37 branch = branch.replace('refs/heads/', '').rstrip()
38 return branch
39
40 def _get_git_revision(layer_path):
41 revision = subprocess.Popen("git rev-parse HEAD 2>/dev/null ", cwd=layer_path, shell=True, stdout=subprocess.PIPE).communicate()[0].rstrip()
42 return revision
43
44 def _get_url_map_name(layer_name):
45 """ Some layers have a different name on openembedded.org site,
46 this method returns the correct name to use in the URL
47 """
48
49 url_name = layer_name
50 url_mapping = {'meta': 'openembedded-core'}
51
52 for key in url_mapping.keys():
53 if key == layer_name:
54 url_name = url_mapping[key]
55
56 return url_name
57
58 def _get_layer_version_information(layer_path):
59
60 layer_version_info = {}
61 layer_version_info['branch'] = _get_git_branch(layer_path)
62 layer_version_info['commit'] = _get_git_revision(layer_path)
63 layer_version_info['priority'] = 0
64
65 return layer_version_info
66
67
68 def _get_layer_dict(layer_path):
69
70 layer_info = {}
71 layer_name = layer_path.split('/')[-1]
72 layer_url = 'http://layers.openembedded.org/layerindex/layer/{layer}/'
73 layer_url_name = _get_url_map_name(layer_name)
74
75 layer_info['name'] = layer_url_name
76 layer_info['local_path'] = layer_path
77 layer_info['layer_index_url'] = layer_url.format(layer=layer_url_name)
78 layer_info['version'] = _get_layer_version_information(layer_path)
79
80 return layer_info
81
82
83 bblayers = e.data.getVar("BBLAYERS")
84
85 llayerinfo = {}
86
87 for layer in { l for l in bblayers.strip().split() if len(l) }:
88 llayerinfo[layer] = _get_layer_dict(layer)
89
90
91 bb.event.fire(bb.event.MetadataEvent("LayerInfo", llayerinfo), e.data)
92}
93
94# Dump package file info data
95
96def _toaster_load_pkgdatafile(dirpath, filepath):
97 import json
98 import re
99 pkgdata = {}
100 with open(os.path.join(dirpath, filepath), "r") as fin:
101 for line in fin:
102 try:
103 kn, kv = line.strip().split(": ", 1)
104 m = re.match(r"^PKG:([^A-Z:]*)", kn)
105 if m:
106 pkgdata['OPKGN'] = m.group(1)
107 kn = kn.split(":")[0]
108 pkgdata[kn] = kv
109 if kn.startswith('FILES_INFO'):
110 pkgdata[kn] = json.loads(kv)
111
112 except ValueError:
113 pass # ignore lines without valid key: value pairs
114 return pkgdata
115
116def _toaster_dumpdata(pkgdatadir, d):
117 """
118 Dumps the data about the packages created by a recipe
119 """
120
121 # No need to try and dumpdata if the recipe isn't generating packages
122 if not d.getVar('PACKAGES'):
123 return
124
125 lpkgdata = {}
126 datadir = os.path.join(pkgdatadir, 'runtime')
127
128 # scan and send data for each generated package
129 if os.path.exists(datadir):
130 for datafile in os.listdir(datadir):
131 if not datafile.endswith('.packaged'):
132 lpkgdata = _toaster_load_pkgdatafile(datadir, datafile)
133 # Fire an event containing the pkg data
134 bb.event.fire(bb.event.MetadataEvent("SinglePackageInfo", lpkgdata), d)
135
136python toaster_package_dumpdata() {
137 _toaster_dumpdata(d.getVar('PKGDESTWORK'), d)
138}
139
140python toaster_packagedata_dumpdata() {
141 # This path needs to match do_packagedata[sstate-inputdirs]
142 _toaster_dumpdata(os.path.join(d.getVar('WORKDIR'), 'pkgdata-pdata-input'), d)
143}
144
145# 2. Dump output image files information
146
147python toaster_artifact_dumpdata() {
148 """
149 Dump data about SDK variables
150 """
151
152 event_data = {
153 "TOOLCHAIN_OUTPUTNAME": d.getVar("TOOLCHAIN_OUTPUTNAME")
154 }
155
156 bb.event.fire(bb.event.MetadataEvent("SDKArtifactInfo", event_data), d)
157}
158
159# collect list of buildstats files based on fired events; when the build completes, collect all stats and fire an event with collected data
160
161python toaster_collect_task_stats() {
162 import bb.build
163 import bb.event
164 import bb.data
165 import bb.utils
166 import os
167
168 if not e.data.getVar('BUILDSTATS_BASE'):
169 return # if we don't have buildstats, we cannot collect stats
170
171 toaster_statlist_file = os.path.join(e.data.getVar('BUILDSTATS_BASE'), "toasterstatlist")
172
173 def stat_to_float(value):
174 return float(value.strip('% \n\r'))
175
176 def _append_read_list(v):
177 lock = bb.utils.lockfile(e.data.expand("${TOPDIR}/toaster.lock"), False, True)
178
179 with open(toaster_statlist_file, "a") as fout:
180 taskdir = e.data.expand("${BUILDSTATS_BASE}/${BUILDNAME}/${PF}")
181 fout.write("%s::%s::%s::%s\n" % (e.taskfile, e.taskname, os.path.join(taskdir, e.task), e.data.expand("${PN}")))
182
183 bb.utils.unlockfile(lock)
184
185 def _read_stats(filename):
186 # seconds
187 cpu_time_user = 0
188 cpu_time_system = 0
189
190 # bytes
191 disk_io_read = 0
192 disk_io_write = 0
193
194 started = 0
195 ended = 0
196
197 taskname = ''
198
199 statinfo = {}
200
201 with open(filename, 'r') as task_bs:
202 for line in task_bs.readlines():
203 k,v = line.strip().split(": ", 1)
204 statinfo[k] = v
205
206 if "Started" in statinfo:
207 started = stat_to_float(statinfo["Started"])
208
209 if "Ended" in statinfo:
210 ended = stat_to_float(statinfo["Ended"])
211
212 if "Child rusage ru_utime" in statinfo:
213 cpu_time_user = cpu_time_user + stat_to_float(statinfo["Child rusage ru_utime"])
214
215 if "Child rusage ru_stime" in statinfo:
216 cpu_time_system = cpu_time_system + stat_to_float(statinfo["Child rusage ru_stime"])
217
218 if "IO write_bytes" in statinfo:
219 write_bytes = int(statinfo["IO write_bytes"].strip('% \n\r'))
220 disk_io_write = disk_io_write + write_bytes
221
222 if "IO read_bytes" in statinfo:
223 read_bytes = int(statinfo["IO read_bytes"].strip('% \n\r'))
224 disk_io_read = disk_io_read + read_bytes
225
226 return {
227 'stat_file': filename,
228 'cpu_time_user': cpu_time_user,
229 'cpu_time_system': cpu_time_system,
230 'disk_io_read': disk_io_read,
231 'disk_io_write': disk_io_write,
232 'started': started,
233 'ended': ended
234 }
235
236 if isinstance(e, (bb.build.TaskSucceeded, bb.build.TaskFailed)):
237 _append_read_list(e)
238 pass
239
240 if isinstance(e, bb.event.BuildCompleted) and os.path.exists(toaster_statlist_file):
241 events = []
242 with open(toaster_statlist_file, "r") as fin:
243 for line in fin:
244 (taskfile, taskname, filename, recipename) = line.strip().split("::")
245 stats = _read_stats(filename)
246 events.append((taskfile, taskname, stats, recipename))
247 bb.event.fire(bb.event.MetadataEvent("BuildStatsList", events), e.data)
248 os.unlink(toaster_statlist_file)
249}
250
251# dump relevant build history data as an event when the build is completed
252
253python toaster_buildhistory_dump() {
254 import re
255 BUILDHISTORY_DIR = e.data.expand("${TOPDIR}/buildhistory")
256 BUILDHISTORY_DIR_IMAGE_BASE = e.data.expand("%s/images/${MACHINE_ARCH}/${TCLIBC}/"% BUILDHISTORY_DIR)
257 pkgdata_dir = e.data.getVar("PKGDATA_DIR")
258
259
260 # scan the build targets for this build
261 images = {}
262 allpkgs = {}
263 files = {}
264 for target in e._pkgs:
265 target = target.split(':')[0] # strip ':<task>' suffix from the target
266 installed_img_path = e.data.expand(os.path.join(BUILDHISTORY_DIR_IMAGE_BASE, target))
267 if os.path.exists(installed_img_path):
268 images[target] = {}
269 files[target] = {}
270 files[target]['dirs'] = []
271 files[target]['syms'] = []
272 files[target]['files'] = []
273 with open("%s/installed-package-sizes.txt" % installed_img_path, "r") as fin:
274 for line in fin:
275 line = line.rstrip(";")
276 psize, punit, pname = line.split()
277 # this size is "installed-size" as it measures how much space it takes on disk
278 images[target][pname.strip()] = {'size':int(psize)*1024, 'depends' : []}
279
280 with open("%s/depends.dot" % installed_img_path, "r") as fin:
281 p = re.compile(r'\s*"(?P<name>[^"]+)"\s*->\s*"(?P<dep>[^"]+)"(?P<rec>.*?\[style=dotted\])?')
282 for line in fin:
283 m = p.match(line)
284 if not m:
285 continue
286 pname = m.group('name')
287 dependsname = m.group('dep')
288 deptype = 'recommends' if m.group('rec') else 'depends'
289
290 # If RPM is used for packaging, then there may be
291 # dependencies such as "/bin/sh", which will confuse
292 # _toaster_load_pkgdatafile() later on. While at it, ignore
293 # any dependencies that contain parentheses, e.g.,
294 # "libc.so.6(GLIBC_2.7)".
295 if dependsname.startswith('/') or '(' in dependsname:
296 continue
297
298 if not pname in images[target]:
299 images[target][pname] = {'size': 0, 'depends' : []}
300 if not dependsname in images[target]:
301 images[target][dependsname] = {'size': 0, 'depends' : []}
302 images[target][pname]['depends'].append((dependsname, deptype))
303
304 # files-in-image.txt is only generated if an image file is created,
305 # so the file entries ('syms', 'dirs', 'files') for a target will be
306 # empty for rootfs builds and other "image" tasks which don't
307 # produce image files
308 # (e.g. "bitbake core-image-minimal -c populate_sdk")
309 files_in_image_path = "%s/files-in-image.txt" % installed_img_path
310 if os.path.exists(files_in_image_path):
311 with open(files_in_image_path, "r") as fin:
312 for line in fin:
313 lc = [ x for x in line.strip().split(" ") if len(x) > 0 ]
314 if lc[0].startswith("l"):
315 files[target]['syms'].append(lc)
316 elif lc[0].startswith("d"):
317 files[target]['dirs'].append(lc)
318 else:
319 files[target]['files'].append(lc)
320
321 for pname in images[target]:
322 if not pname in allpkgs:
323 try:
324 pkgdata = _toaster_load_pkgdatafile("%s/runtime-reverse/" % pkgdata_dir, pname)
325 except IOError as err:
326 if err.errno == 2:
327 # We expect this e.g. for RRECOMMENDS that are unsatisfied at runtime
328 continue
329 else:
330 raise
331 allpkgs[pname] = pkgdata
332
333
334 data = { 'pkgdata' : allpkgs, 'imgdata' : images, 'filedata' : files }
335
336 bb.event.fire(bb.event.MetadataEvent("ImagePkgList", data), e.data)
337
338}
339
340# get list of artifacts from sstate manifest
341python toaster_artifacts() {
342 if e.taskname in ["do_deploy", "do_image_complete", "do_populate_sdk", "do_populate_sdk_ext"]:
343 d2 = d.createCopy()
344 d2.setVar('FILE', e.taskfile)
345 # Use 'stamp-extra-info' if present, else use workaround
346 # to determine 'SSTATE_MANMACH'
347 extrainf = d2.getVarFlag(e.taskname, 'stamp-extra-info')
348 if extrainf:
349 d2.setVar('SSTATE_MANMACH', extrainf)
350 else:
351 if "do_populate_sdk" == e.taskname:
352 d2.setVar('SSTATE_MANMACH', d2.expand("${MACHINE}${SDKMACHINE}"))
353 else:
354 d2.setVar('SSTATE_MANMACH', d2.expand("${MACHINE}"))
355 manifest = oe.sstatesig.sstate_get_manifest_filename(e.taskname[3:], d2)[0]
356
357 if os.access(manifest, os.R_OK):
358 with open(manifest) as fmanifest:
359 artifacts = [fname.strip() for fname in fmanifest]
360 data = {"task": e.taskid, "artifacts": artifacts}
361 bb.event.fire(bb.event.MetadataEvent("TaskArtifacts", data), d2)
362}
363
364# set event handlers
365addhandler toaster_layerinfo_dumpdata
366toaster_layerinfo_dumpdata[eventmask] = "bb.event.TreeDataPreparationCompleted"
367
368addhandler toaster_collect_task_stats
369toaster_collect_task_stats[eventmask] = "bb.event.BuildCompleted bb.build.TaskSucceeded bb.build.TaskFailed"
370
371addhandler toaster_buildhistory_dump
372toaster_buildhistory_dump[eventmask] = "bb.event.BuildCompleted"
373
374addhandler toaster_artifacts
375toaster_artifacts[eventmask] = "bb.runqueue.runQueueTaskSkipped bb.runqueue.runQueueTaskCompleted"
376
377do_packagedata_setscene[postfuncs] += "toaster_packagedata_dumpdata "
378do_packagedata_setscene[vardepsexclude] += "toaster_packagedata_dumpdata "
379
380do_package[postfuncs] += "toaster_package_dumpdata "
381do_package[vardepsexclude] += "toaster_package_dumpdata "
382
383#do_populate_sdk[postfuncs] += "toaster_artifact_dumpdata "
384#do_populate_sdk[vardepsexclude] += "toaster_artifact_dumpdata "
385
386#do_populate_sdk_ext[postfuncs] += "toaster_artifact_dumpdata "
387#do_populate_sdk_ext[vardepsexclude] += "toaster_artifact_dumpdata "
388
diff --git a/meta/classes/toolchain/clang-native.bbclass b/meta/classes/toolchain/clang-native.bbclass
deleted file mode 100644
index 006be9fadd..0000000000
--- a/meta/classes/toolchain/clang-native.bbclass
+++ /dev/null
@@ -1,18 +0,0 @@
1BUILD_CC = "${CCACHE}${BUILD_PREFIX}clang ${BUILD_CC_ARCH}"
2BUILD_CXX = "${CCACHE}${BUILD_PREFIX}clang++ ${BUILD_CC_ARCH}"
3BUILD_FC = "${BUILD_PREFIX}gfortran ${BUILD_CC_ARCH}"
4BUILD_CPP = "${BUILD_PREFIX}clang ${BUILD_CC_ARCH} -E"
5BUILD_LD = "${BUILD_PREFIX}ld ${BUILD_LD_ARCH}"
6BUILD_CCLD = "${BUILD_PREFIX}clang ${BUILD_CC_ARCH}"
7BUILD_AR = "${BUILD_PREFIX}llvm-ar"
8BUILD_AS = "${BUILD_PREFIX}as ${BUILD_AS_ARCH}"
9BUILD_RANLIB = "${BUILD_PREFIX}llvm-ranlib -D"
10BUILD_STRIP = "${BUILD_PREFIX}llvm-strip"
11BUILD_OBJCOPY = "${BUILD_PREFIX}llvm-objcopy"
12BUILD_OBJDUMP = "${BUILD_PREFIX}llvm-objdump"
13BUILD_NM = "${BUILD_PREFIX}llvm-nm"
14BUILD_READELF = "${BUILD_PREFIX}llvm-readelf"
15
16DEPENDS += "clang-native libcxx-native compiler-rt-native"
17
18BUILD_LDFLAGS += " --rtlib=libgcc --unwindlib=libgcc"
diff --git a/meta/classes/toolchain/clang.bbclass b/meta/classes/toolchain/clang.bbclass
deleted file mode 100644
index 9a3cd0e584..0000000000
--- a/meta/classes/toolchain/clang.bbclass
+++ /dev/null
@@ -1,40 +0,0 @@
1CC = "${CCACHE}${HOST_PREFIX}clang ${HOST_CC_ARCH}${TOOLCHAIN_OPTIONS}"
2CXX = "${CCACHE}${HOST_PREFIX}clang++ ${HOST_CC_ARCH}${TOOLCHAIN_OPTIONS}"
3FC = "${HOST_PREFIX}gfortran ${HOST_CC_ARCH}${TOOLCHAIN_OPTIONS}"
4CPP = "${CCACHE}${HOST_PREFIX}clang ${HOST_CC_ARCH}${TOOLCHAIN_OPTIONS} -E"
5LD = "${@bb.utils.contains('DISTRO_FEATURES', 'ld-is-lld', '${HOST_PREFIX}ld.lld${TOOLCHAIN_OPTIONS} ${HOST_LD_ARCH}', '${HOST_PREFIX}ld${TOOLCHAIN_OPTIONS} ${HOST_LD_ARCH}', d)}"
6CCLD = "${CCACHE}${HOST_PREFIX}clang ${HOST_CC_ARCH}${TOOLCHAIN_OPTIONS}"
7RANLIB = "${HOST_PREFIX}llvm-ranlib"
8AR = "${HOST_PREFIX}llvm-ar"
9AS = "${HOST_PREFIX}as ${HOST_AS_ARCH}"
10STRIP = "${HOST_PREFIX}llvm-strip"
11OBJCOPY = "${HOST_PREFIX}llvm-objcopy"
12OBJDUMP = "${HOST_PREFIX}llvm-objdump"
13STRINGS = "${HOST_PREFIX}llvm-strings"
14NM = "${HOST_PREFIX}llvm-nm"
15READELF = "${HOST_PREFIX}llvm-readelf"
16
17PREFERRED_PROVIDER_virtual/${MLPREFIX}cross-cc = "${MLPREFIX}clang-cross-${TARGET_ARCH}"
18PREFERRED_PROVIDER_virtual/${MLPREFIX}cross-c++ = "${MLPREFIX}clang-cross-${TARGET_ARCH}"
19PREFERRED_PROVIDER_virtual/${MLPREFIX}compilerlibs = "${MLPREFIX}gcc-runtime"
20PREFERRED_PROVIDER_virtual/${MLPREFIX}cross-cc:class-nativesdk = "clang-crosssdk-${SDK_SYS}"
21PREFERRED_PROVIDER_virtual/${MLPREFIX}cross-c++:class-nativesdk = "clang-crosssdk-${SDK_SYS}"
22
23PREFERRED_PROVIDER_virtual/nativesdk-cross-cc:class-crosssdk = "clang-crosssdk-${SDK_SYS}"
24PREFERRED_PROVIDER_virtual/nativesdk-cross-c++:class-crosssdk = "clang-crosssdk-${SDK_SYS}"
25
26PREFERRED_PROVIDER_virtual/nativesdk-cross-cc:class-cross-canadian = "clang-crosssdk-${SDK_SYS}"
27PREFERRED_PROVIDER_virtual/nativesdk-cross-c++:class-cross-canadian = "clang-crosssdk-${SDK_SYS}"
28
29BASE_DEFAULT_DEPS:append = " compiler-rt libcxx"
30
31TUNE_CCARGS += "${@bb.utils.contains("DISTRO_FEATURES", "usrmerge", " --dyld-prefix=/usr", "", d)}"
32
33LDFLAGS:append:class-nativesdk:x86-64 = " -Wl,-dynamic-linker,${base_libdir}/ld-linux-x86-64.so.2"
34LDFLAGS:append:class-nativesdk:aarch64 = " -Wl,-dynamic-linker,${base_libdir}/ld-linux-aarch64.so.1"
35LDFLAGS:append:class-cross-canadian = " -Wl,-dynamic-linker,${base_libdir}/placeholder/to/be/rewritten/by/sdk/installer"
36
37# do_populate_sysroot needs STRIP, do_package_qa needs OBJDUMP
38POPULATESYSROOTDEPS:append:class-target = " llvm-native:do_populate_sysroot"
39
40TCOVERRIDE = "toolchain-clang"
diff --git a/meta/classes/toolchain/gcc-native.bbclass b/meta/classes/toolchain/gcc-native.bbclass
deleted file mode 100644
index a708bd0389..0000000000
--- a/meta/classes/toolchain/gcc-native.bbclass
+++ /dev/null
@@ -1,15 +0,0 @@
1BUILD_CC = "${CCACHE}${BUILD_PREFIX}gcc ${BUILD_CC_ARCH}"
2BUILD_CXX = "${CCACHE}${BUILD_PREFIX}g++ ${BUILD_CC_ARCH}"
3BUILD_FC = "${BUILD_PREFIX}gfortran ${BUILD_CC_ARCH}"
4BUILD_CPP = "${BUILD_PREFIX}gcc ${BUILD_CC_ARCH} -E"
5BUILD_LD = "${BUILD_PREFIX}ld ${BUILD_LD_ARCH}"
6BUILD_CCLD = "${BUILD_PREFIX}gcc ${BUILD_CC_ARCH}"
7BUILD_AR = "${BUILD_PREFIX}ar"
8BUILD_AS = "${BUILD_PREFIX}as ${BUILD_AS_ARCH}"
9BUILD_RANLIB = "${BUILD_PREFIX}ranlib -D"
10BUILD_STRIP = "${BUILD_PREFIX}strip"
11BUILD_OBJCOPY = "${BUILD_PREFIX}objcopy"
12BUILD_OBJDUMP = "${BUILD_PREFIX}objdump"
13BUILD_NM = "${BUILD_PREFIX}nm"
14BUILD_READELF = "${BUILD_PREFIX}readelf"
15
diff --git a/meta/classes/toolchain/gcc.bbclass b/meta/classes/toolchain/gcc.bbclass
deleted file mode 100644
index a5adb5ca37..0000000000
--- a/meta/classes/toolchain/gcc.bbclass
+++ /dev/null
@@ -1,33 +0,0 @@
1CC = "${CCACHE}${HOST_PREFIX}gcc ${HOST_CC_ARCH}${TOOLCHAIN_OPTIONS}"
2CXX = "${CCACHE}${HOST_PREFIX}g++ ${HOST_CC_ARCH}${TOOLCHAIN_OPTIONS}"
3FC = "${HOST_PREFIX}gfortran ${HOST_CC_ARCH}${TOOLCHAIN_OPTIONS}"
4CPP = "${HOST_PREFIX}gcc -E${TOOLCHAIN_OPTIONS} ${HOST_CC_ARCH}"
5LD = "${HOST_PREFIX}ld${TOOLCHAIN_OPTIONS} ${HOST_LD_ARCH}"
6CCLD = "${CC}"
7AR = "${HOST_PREFIX}gcc-ar"
8AS = "${HOST_PREFIX}as ${HOST_AS_ARCH}"
9RANLIB = "${HOST_PREFIX}gcc-ranlib"
10STRIP = "${HOST_PREFIX}strip"
11OBJCOPY = "${HOST_PREFIX}objcopy"
12OBJDUMP = "${HOST_PREFIX}objdump"
13STRINGS = "${HOST_PREFIX}strings"
14NM = "${HOST_PREFIX}gcc-nm"
15READELF = "${HOST_PREFIX}readelf"
16
17PREFERRED_PROVIDER_virtual/${MLPREFIX}cross-cc = "${MLPREFIX}gcc-cross-${TARGET_ARCH}"
18PREFERRED_PROVIDER_virtual/${MLPREFIX}cross-c++ = "${MLPREFIX}gcc-cross-${TARGET_ARCH}"
19PREFERRED_PROVIDER_virtual/${MLPREFIX}compilerlibs = "${MLPREFIX}gcc-runtime"
20
21PREFERRED_PROVIDER_virtual/${MLPREFIX}cross-cc:class-nativesdk = "gcc-crosssdk-${SDK_SYS}"
22PREFERRED_PROVIDER_virtual/${MLPREFIX}cross-c++:class-nativesdk = "gcc-crosssdk-${SDK_SYS}"
23PREFERRED_PROVIDER_virtual/${MLPREFIX}compilerlibs:class-nativesdk = "nativesdk-gcc-runtime"
24
25PREFERRED_PROVIDER_virtual/nativesdk-cross-cc:class-crosssdk = "gcc-crosssdk-${SDK_SYS}"
26PREFERRED_PROVIDER_virtual/nativesdk-cross-c++:class-crosssdk = "gcc-crosssdk-${SDK_SYS}"
27PREFERRED_PROVIDER_virtual/nativesdk-compilerlibs:class-crosssdk = "nativesdk-gcc-runtime"
28
29PREFERRED_PROVIDER_virtual/nativesdk-cross-cc:class-cross-canadian = "gcc-crosssdk-${SDK_SYS}"
30PREFERRED_PROVIDER_virtual/nativesdk-cross-c++:class-cross-canadian = "gcc-crosssdk-${SDK_SYS}"
31PREFERRED_PROVIDER_virtual/nativesdk-compilerlibs:class-cross-canadian = "nativesdk-gcc-runtime"
32
33TCOVERRIDE = "toolchain-gcc"
diff --git a/meta/classes/typecheck.bbclass b/meta/classes/typecheck.bbclass
deleted file mode 100644
index 160f7a024b..0000000000
--- a/meta/classes/typecheck.bbclass
+++ /dev/null
@@ -1,18 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# Check types of bitbake configuration variables
8#
9# See oe.types for details.
10
11python check_types() {
12 import oe.types
13 for key in e.data.keys():
14 if e.data.getVarFlag(key, "type"):
15 oe.data.typed_value(key, e.data)
16}
17addhandler check_types
18check_types[eventmask] = "bb.event.ConfigParsed"
diff --git a/meta/classes/useradd-staticids.bbclass b/meta/classes/useradd-staticids.bbclass
deleted file mode 100644
index 1dbcba2bf1..0000000000
--- a/meta/classes/useradd-staticids.bbclass
+++ /dev/null
@@ -1,313 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# In order to support a deterministic set of 'dynamic' users/groups,
8# we need a function to reformat the params based on a static file
9def update_useradd_static_config(d):
10 import itertools
11 import re
12 import errno
13 import oe.useradd
14
15 def list_extend(iterable, length, obj = None):
16 """Ensure that iterable is the specified length by extending with obj
17 and return it as a list"""
18 return list(itertools.islice(itertools.chain(iterable, itertools.repeat(obj)), length))
19
20 def merge_files(file_list, exp_fields):
21 """Read each passwd/group file in file_list, split each line and create
22 a dictionary with the user/group names as keys and the split lines as
23 values. If the user/group name already exists in the dictionary, then
24 update any fields in the list with the values from the new list (if they
25 are set)."""
26 id_table = dict()
27 for conf in file_list.split():
28 try:
29 with open(conf, "r") as f:
30 for line in f:
31 if line.startswith('#'):
32 continue
33 # Make sure there always are at least exp_fields
34 # elements in the field list. This allows for leaving
35 # out trailing colons in the files.
36 fields = list_extend(line.rstrip().split(":"), exp_fields)
37 if fields[0] not in id_table:
38 id_table[fields[0]] = fields
39 else:
40 id_table[fields[0]] = list(map(lambda x, y: x or y, fields, id_table[fields[0]]))
41 except IOError as e:
42 if e.errno == errno.ENOENT:
43 pass
44
45 return id_table
46
47 def handle_missing_id(id, type, pkg, files, var, value):
48 # For backwards compatibility we accept "1" in addition to "error"
49 error_dynamic = d.getVar('USERADD_ERROR_DYNAMIC')
50 msg = 'Recipe %s, package %s: %sname "%s" does not have a static ID defined.' % (d.getVar('PN'), pkg, type, id)
51 if files:
52 msg += " Add %s to one of these files: %s" % (id, files)
53 else:
54 msg += " %s file(s) not found in BBPATH: %s" % (var, value)
55 if error_dynamic == 'error' or error_dynamic == '1':
56 raise NotImplementedError(msg)
57 elif error_dynamic == 'warn':
58 bb.warn(msg)
59 elif error_dynamic == 'skip':
60 raise bb.parse.SkipRecipe(msg)
61
62 # Return a list of configuration files based on either the default
63 # files/group or the contents of USERADD_GID_TABLES, resp.
64 # files/passwd for USERADD_UID_TABLES.
65 # Paths are resolved via BBPATH.
66 def get_table_list(d, var, default):
67 files = []
68 bbpath = d.getVar('BBPATH')
69 tables = d.getVar(var)
70 if not tables:
71 tables = default
72 for conf_file in tables.split():
73 files.append(bb.utils.which(bbpath, conf_file))
74 return (' '.join(files), var, default)
75
76 # We parse and rewrite the useradd components
77 def rewrite_useradd(params, is_pkg):
78 parser = oe.useradd.build_useradd_parser()
79
80 newparams = []
81 users = None
82 for param in oe.useradd.split_commands(params):
83 try:
84 uaargs = parser.parse_args(oe.useradd.split_args(param))
85 except Exception as e:
86 bb.fatal("%s: Unable to parse arguments for USERADD_PARAM:%s '%s': %s" % (d.getVar('PN'), pkg, param, e))
87
88 # Read all passwd files specified in USERADD_UID_TABLES or files/passwd
89 # Use the standard passwd layout:
90 # username:password:user_id:group_id:comment:home_directory:login_shell
91 #
92 # If a field is left blank, the original value will be used. The 'username'
93 # field is required.
94 #
95 # Note: we ignore the password field, as including even the hashed password
96 # in the useradd command may introduce a security hole. It's assumed that
97 # all new users get the default ('*' which prevents login) until the user is
98 # specifically configured by the system admin.
99 if not users:
100 files, table_var, table_value = get_table_list(d, 'USERADD_UID_TABLES', 'files/passwd')
101 users = merge_files(files, 7)
102
103 type = 'system user' if uaargs.system else 'normal user'
104 if uaargs.LOGIN not in users:
105 handle_missing_id(uaargs.LOGIN, type, pkg, files, table_var, table_value)
106 newparams.append(param)
107 continue
108
109 field = users[uaargs.LOGIN]
110
111 if uaargs.uid and field[2] and (uaargs.uid != field[2]):
112 bb.warn("%s: Changing username %s's uid from (%s) to (%s), verify configuration files!" % (d.getVar('PN'), uaargs.LOGIN, uaargs.uid, field[2]))
113 uaargs.uid = field[2] or uaargs.uid
114
115 # Determine the possible groupname
116 # Unless the group name (or gid) is specified, we assume that the LOGIN is the groupname
117 #
118 # By default the system has creation of the matching groups enabled
119 # So if the implicit username-group creation is on, then the implicit groupname (LOGIN)
120 # is used, and we disable the user_group option.
121 #
122 if uaargs.gid:
123 uaargs.groupname = uaargs.gid
124 elif uaargs.user_group is not False:
125 uaargs.groupname = uaargs.LOGIN
126 else:
127 uaargs.groupname = 'users'
128 uaargs.groupid = field[3] or uaargs.groupname
129
130 if uaargs.groupid and uaargs.gid != uaargs.groupid:
131 newgroup = None
132 if not uaargs.groupid.isdigit():
133 # We don't have a group number, so we have to add a name
134 bb.debug(1, "Adding group %s!" % uaargs.groupid)
135 newgroup = "%s %s" % (' --system' if uaargs.system else '', uaargs.groupid)
136 elif uaargs.groupname and not uaargs.groupname.isdigit():
137 # We have a group name and a group number to assign it to
138 bb.debug(1, "Adding group %s (gid %s)!" % (uaargs.groupname, uaargs.groupid))
139 newgroup = "-g %s %s" % (uaargs.groupid, uaargs.groupname)
140 else:
141 # We want to add a group, but we don't know it's name... so we can't add the group...
142 # We have to assume the group has previously been added or we'll fail on the adduser...
143 # Note: specifying the actual gid is very rare in OE, usually the group name is specified.
144 bb.warn("%s: Changing gid for login %s to %s, verify configuration files!" % (d.getVar('PN'), uaargs.LOGIN, uaargs.groupid))
145
146 uaargs.gid = uaargs.groupid
147 uaargs.user_group = None
148 if newgroup and is_pkg:
149 groupadd = d.getVar("GROUPADD_PARAM:%s" % pkg)
150 if groupadd:
151 # Only add the group if not already specified
152 if not uaargs.groupname in groupadd:
153 d.setVar("GROUPADD_PARAM:%s" % pkg, "%s; %s" % (groupadd, newgroup))
154 else:
155 d.setVar("GROUPADD_PARAM:%s" % pkg, newgroup)
156
157 uaargs.comment = "'%s'" % field[4] if field[4] else uaargs.comment
158 uaargs.home_dir = field[5] or uaargs.home_dir
159 uaargs.shell = field[6] or uaargs.shell
160
161 # Should be an error if a specific option is set...
162 if not uaargs.uid or not uaargs.uid.isdigit() or not uaargs.gid:
163 handle_missing_id(uaargs.LOGIN, type, pkg, files, table_var, table_value)
164
165 # Reconstruct the args...
166 newparam = ['', ' --defaults'][uaargs.defaults]
167 newparam += ['', ' --base-dir %s' % uaargs.base_dir][uaargs.base_dir != None]
168 newparam += ['', ' --comment %s' % uaargs.comment][uaargs.comment != None]
169 newparam += ['', ' --home-dir %s' % uaargs.home_dir][uaargs.home_dir != None]
170 newparam += ['', ' --expiredate %s' % uaargs.expiredate][uaargs.expiredate != None]
171 newparam += ['', ' --inactive %s' % uaargs.inactive][uaargs.inactive != None]
172 newparam += ['', ' --gid %s' % uaargs.gid][uaargs.gid != None]
173 newparam += ['', ' --groups %s' % uaargs.groups][uaargs.groups != None]
174 newparam += ['', ' --skel %s' % uaargs.skel][uaargs.skel != None]
175 newparam += ['', ' --key %s' % uaargs.key][uaargs.key != None]
176 newparam += ['', ' --no-log-init'][uaargs.no_log_init]
177 newparam += ['', ' --create-home'][uaargs.create_home is True]
178 newparam += ['', ' --no-create-home'][uaargs.create_home is False]
179 newparam += ['', ' --no-user-group'][uaargs.user_group is False]
180 newparam += ['', ' --non-unique'][uaargs.non_unique]
181 if uaargs.password != None:
182 newparam += ['', ' --password %s' % uaargs.password][uaargs.password != None]
183 newparam += ['', ' --root %s' % uaargs.root][uaargs.root != None]
184 newparam += ['', ' --system'][uaargs.system]
185 newparam += ['', ' --shell %s' % uaargs.shell][uaargs.shell != None]
186 newparam += ['', ' --uid %s' % uaargs.uid][uaargs.uid != None]
187 newparam += ['', ' --user-group'][uaargs.user_group is True]
188 newparam += ' %s' % uaargs.LOGIN
189
190 newparams.append(newparam)
191
192 return ";".join(newparams).strip()
193
194 # We parse and rewrite the groupadd components
195 def rewrite_groupadd(params, is_pkg):
196 parser = oe.useradd.build_groupadd_parser()
197
198 newparams = []
199 groups = None
200 for param in oe.useradd.split_commands(params):
201 try:
202 # If we're processing multiple lines, we could have left over values here...
203 gaargs = parser.parse_args(oe.useradd.split_args(param))
204 except Exception as e:
205 bb.fatal("%s: Unable to parse arguments for GROUPADD_PARAM:%s '%s': %s" % (d.getVar('PN'), pkg, param, e))
206
207 # Read all group files specified in USERADD_GID_TABLES or files/group
208 # Use the standard group layout:
209 # groupname:password:group_id:group_members
210 #
211 # If a field is left blank, the original value will be used. The 'groupname' field
212 # is required.
213 #
214 # Note: similar to the passwd file, the 'password' filed is ignored
215 # Note: group_members is ignored, group members must be configured with the GROUPMEMS_PARAM
216 if not groups:
217 files, table_var, table_value = get_table_list(d, 'USERADD_GID_TABLES', 'files/group')
218 groups = merge_files(files, 4)
219
220 type = 'system group' if gaargs.system else 'normal group'
221 if gaargs.GROUP not in groups:
222 handle_missing_id(gaargs.GROUP, type, pkg, files, table_var, table_value)
223 newparams.append(param)
224 continue
225
226 field = groups[gaargs.GROUP]
227
228 if field[2]:
229 if gaargs.gid and (gaargs.gid != field[2]):
230 bb.warn("%s: Changing groupname %s's gid from (%s) to (%s), verify configuration files!" % (d.getVar('PN'), gaargs.GROUP, gaargs.gid, field[2]))
231 gaargs.gid = field[2]
232
233 if not gaargs.gid or not gaargs.gid.isdigit():
234 handle_missing_id(gaargs.GROUP, type, pkg, files, table_var, table_value)
235
236 # Reconstruct the args...
237 newparam = ['', ' --force'][gaargs.force]
238 newparam += ['', ' --gid %s' % gaargs.gid][gaargs.gid != None]
239 newparam += ['', ' --key %s' % gaargs.key][gaargs.key != None]
240 newparam += ['', ' --non-unique'][gaargs.non_unique]
241 if gaargs.password != None:
242 newparam += ['', ' --password %s' % gaargs.password][gaargs.password != None]
243 newparam += ['', ' --root %s' % gaargs.root][gaargs.root != None]
244 newparam += ['', ' --system'][gaargs.system]
245 newparam += ' %s' % gaargs.GROUP
246
247 newparams.append(newparam)
248
249 return ";".join(newparams).strip()
250
251 # The parsing of the current recipe depends on the content of
252 # the files listed in USERADD_UID/GID_TABLES. We need to tell bitbake
253 # about that explicitly to trigger re-parsing and thus re-execution of
254 # this code when the files change.
255 bbpath = d.getVar('BBPATH')
256 for varname, default in (('USERADD_UID_TABLES', 'files/passwd'),
257 ('USERADD_GID_TABLES', 'files/group')):
258 tables = d.getVar(varname)
259 if not tables:
260 tables = default
261 for conf_file in tables.split():
262 bb.parse.mark_dependency(d, bb.utils.which(bbpath, conf_file))
263
264 # Load and process the users and groups, rewriting the adduser/addgroup params
265 useradd_packages = d.getVar('USERADD_PACKAGES') or ""
266
267 for pkg in useradd_packages.split():
268 # Groupmems doesn't have anything we might want to change, so simply validating
269 # is a bit of a waste -- only process useradd/groupadd
270 useradd_param = d.getVar('USERADD_PARAM:%s' % pkg)
271 if useradd_param:
272 #bb.warn("Before: 'USERADD_PARAM:%s' - '%s'" % (pkg, useradd_param))
273 d.setVar('USERADD_PARAM:%s' % pkg, rewrite_useradd(useradd_param, True))
274 #bb.warn("After: 'USERADD_PARAM:%s' - '%s'" % (pkg, d.getVar('USERADD_PARAM:%s' % pkg)))
275
276 groupadd_param = d.getVar('GROUPADD_PARAM:%s' % pkg)
277 if groupadd_param:
278 #bb.warn("Before: 'GROUPADD_PARAM:%s' - '%s'" % (pkg, groupadd_param))
279 d.setVar('GROUPADD_PARAM:%s' % pkg, rewrite_groupadd(groupadd_param, True))
280 #bb.warn("After: 'GROUPADD_PARAM:%s' - '%s'" % (pkg, d.getVar('GROUPADD_PARAM:%s' % pkg)))
281
282 # Load and process extra users and groups, rewriting only adduser/addgroup params
283 pkg = d.getVar('PN')
284 extrausers = d.getVar('EXTRA_USERS_PARAMS') or ""
285
286 #bb.warn("Before: 'EXTRA_USERS_PARAMS' - '%s'" % (d.getVar('EXTRA_USERS_PARAMS')))
287 new_extrausers = []
288 for cmd in oe.useradd.split_commands(extrausers):
289 if re.match('''useradd (.*)''', cmd):
290 useradd_param = re.match('''useradd (.*)''', cmd).group(1)
291 useradd_param = rewrite_useradd(useradd_param, False)
292 cmd = 'useradd %s' % useradd_param
293 elif re.match('''groupadd (.*)''', cmd):
294 groupadd_param = re.match('''groupadd (.*)''', cmd).group(1)
295 groupadd_param = rewrite_groupadd(groupadd_param, False)
296 cmd = 'groupadd %s' % groupadd_param
297
298 new_extrausers.append(cmd)
299
300 new_extrausers.append('')
301 d.setVar('EXTRA_USERS_PARAMS', ';'.join(new_extrausers))
302 #bb.warn("After: 'EXTRA_USERS_PARAMS' - '%s'" % (d.getVar('EXTRA_USERS_PARAMS')))
303
304
305python __anonymous() {
306 if not bb.data.inherits_class('nativesdk', d) \
307 and not bb.data.inherits_class('native', d):
308 try:
309 update_useradd_static_config(d)
310 except NotImplementedError as f:
311 bb.debug(1, "Skipping recipe %s: %s" % (d.getVar('PN'), f))
312 raise bb.parse.SkipRecipe(f)
313}
diff --git a/meta/classes/useradd.bbclass b/meta/classes/useradd.bbclass
deleted file mode 100644
index 16a65ac323..0000000000
--- a/meta/classes/useradd.bbclass
+++ /dev/null
@@ -1,290 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7inherit useradd_base
8
9# base-passwd-cross provides the default passwd and group files in the
10# target sysroot, and shadow -native and -sysroot provide the utilities
11# and support files needed to add and modify user and group accounts
12DEPENDS:append:class-target = " base-files shadow-native shadow-sysroot shadow base-passwd"
13PACKAGE_WRITE_DEPS += "shadow-native"
14
15# This preinstall function can be run in four different contexts:
16#
17# a) Before do_install
18# b) At do_populate_sysroot_setscene when installing from sstate packages
19# c) As the preinst script in the target package at do_rootfs time
20# d) As the preinst script in the target package on device as a package upgrade
21#
22useradd_preinst () {
23OPT=""
24SYSROOT=""
25
26if test "x$D" != "x"; then
27 # Installing into a sysroot
28 SYSROOT="$D"
29 OPT="--root $D"
30
31 # Make sure login.defs is there, this is to make debian package backend work
32 # correctly while doing rootfs.
33 # The problem here is that if /etc/login.defs is treated as a config file for
34 # shadow package, then while performing preinsts for packages that depend on
35 # shadow, there might only be /etc/login.def.dpkg-new there in root filesystem.
36 if [ ! -e $D${sysconfdir}/login.defs -a -e $D${sysconfdir}/login.defs.dpkg-new ]; then
37 cp $D${sysconfdir}/login.defs.dpkg-new $D${sysconfdir}/login.defs
38 fi
39
40 # user/group lookups should match useradd/groupadd --root
41 export PSEUDO_PASSWD="$SYSROOT"
42fi
43
44# If we're not doing a special SSTATE/SYSROOT install
45# then set the values, otherwise use the environment
46if test "x$UA_SYSROOT" = "x"; then
47 # Installing onto a target
48 # Add groups and users defined only for this package
49 GROUPADD_PARAM="${GROUPADD_PARAM}"
50 USERADD_PARAM="${USERADD_PARAM}"
51 GROUPMEMS_PARAM="${GROUPMEMS_PARAM}"
52fi
53
54# Perform group additions first, since user additions may depend
55# on these groups existing
56if test "x`echo $GROUPADD_PARAM | tr -d '[:space:]'`" != "x"; then
57 echo "Running groupadd commands..."
58 # Invoke multiple instances of groupadd for parameter lists
59 # separated by ';'
60 opts=`echo "$GROUPADD_PARAM" | cut -d ';' -f 1 | sed -e 's#[ \t]*$##'`
61 remaining=`echo "$GROUPADD_PARAM" | cut -d ';' -f 2- | sed -e 's#[ \t]*$##'`
62 while test "x$opts" != "x"; do
63 perform_groupadd "$SYSROOT" "$OPT $opts"
64 if test "x$opts" = "x$remaining"; then
65 break
66 fi
67 opts=`echo "$remaining" | cut -d ';' -f 1 | sed -e 's#[ \t]*$##'`
68 remaining=`echo "$remaining" | cut -d ';' -f 2- | sed -e 's#[ \t]*$##'`
69 done
70fi
71
72if test "x`echo $USERADD_PARAM | tr -d '[:space:]'`" != "x"; then
73 echo "Running useradd commands..."
74 # Invoke multiple instances of useradd for parameter lists
75 # separated by ';'
76 opts=`echo "$USERADD_PARAM" | cut -d ';' -f 1 | sed -e 's#[ \t]*$##'`
77 remaining=`echo "$USERADD_PARAM" | cut -d ';' -f 2- | sed -e 's#[ \t]*$##'`
78 while test "x$opts" != "x"; do
79 perform_useradd "$SYSROOT" "$OPT $opts"
80 if test "x$opts" = "x$remaining"; then
81 break
82 fi
83 opts=`echo "$remaining" | cut -d ';' -f 1 | sed -e 's#[ \t]*$##'`
84 remaining=`echo "$remaining" | cut -d ';' -f 2- | sed -e 's#[ \t]*$##'`
85 done
86fi
87
88if test "x`echo $GROUPMEMS_PARAM | tr -d '[:space:]'`" != "x"; then
89 echo "Running groupmems commands..."
90 # Invoke multiple instances of groupmems for parameter lists
91 # separated by ';'
92 opts=`echo "$GROUPMEMS_PARAM" | cut -d ';' -f 1 | sed -e 's#[ \t]*$##'`
93 remaining=`echo "$GROUPMEMS_PARAM" | cut -d ';' -f 2- | sed -e 's#[ \t]*$##'`
94 while test "x$opts" != "x"; do
95 perform_groupmems "$SYSROOT" "$OPT $opts"
96 if test "x$opts" = "x$remaining"; then
97 break
98 fi
99 opts=`echo "$remaining" | cut -d ';' -f 1 | sed -e 's#[ \t]*$##'`
100 remaining=`echo "$remaining" | cut -d ';' -f 2- | sed -e 's#[ \t]*$##'`
101 done
102fi
103}
104
105useradd_sysroot () {
106 user_group_groupmems_add_sysroot user
107}
108
109groupadd_sysroot () {
110 user_group_groupmems_add_sysroot group
111}
112
113groupmemsadd_sysroot () {
114 user_group_groupmems_add_sysroot groupmems
115}
116
117user_group_groupmems_add_sysroot () {
118 # Pseudo may (do_prepare_recipe_sysroot) or may not (do_populate_sysroot_setscene) be running
119 # at this point so we're explicit about the environment so pseudo can load if
120 # not already present.
121 # PSEUDO_SYSROOT can contain references to the build architecture and COMPONENT_DIR
122 # so needs the STAGING_FIXME below
123 export PSEUDO="${FAKEROOTENV} ${PSEUDO_SYSROOT}${bindir_native}/pseudo"
124
125 # Explicitly set $D since it isn't set to anything
126 # before do_prepare_recipe_sysroot
127 D=${STAGING_DIR_TARGET}
128
129 # base-passwd's postinst may not have run yet in which case we'll get called later, just exit.
130 # Beware that in some cases we might see the fake pseudo passwd here, in which case we also must
131 # exit.
132 if [ ! -f $D${sysconfdir}/passwd ] ||
133 grep -q this-is-the-pseudo-passwd $D${sysconfdir}/passwd; then
134 exit 0
135 fi
136
137 # It is also possible we may be in a recipe which doesn't have useradd dependencies and hence the
138 # useradd/groupadd tools are unavailable. If there is no dependency, we assume we don't want to
139 # create users in the sysroot
140 if ! command -v useradd; then
141 bbwarn "command useradd not found!"
142 exit 0
143 fi
144
145 # Add groups and users defined for all recipe packages
146 if test "$1" = "group"; then
147 GROUPADD_PARAM="${@get_all_cmd_params(d, 'groupadd')}"
148 elif test "$1" = "user"; then
149 USERADD_PARAM="${@get_all_cmd_params(d, 'useradd')}"
150 elif test "$1" = "groupmems"; then
151 GROUPMEMS_PARAM="${@get_all_cmd_params(d, 'groupmems')}"
152 elif test "x$1" = "x"; then
153 bbwarn "missing type of passwd db action"
154 fi
155
156 # Tell the system to use the environment vars
157 UA_SYSROOT=1
158
159 useradd_preinst
160}
161
162# The export of PSEUDO in useradd_sysroot() above contains references to
163# ${PSEUDO_SYSROOT} and ${PSEUDO_LOCALSTATEDIR}. Additionally, the logging
164# shell functions use ${LOGFIFO}. These need to be handled when restoring
165# postinst-useradd-${PN} from the sstate cache.
166EXTRA_STAGING_FIXMES += "PSEUDO_SYSROOT PSEUDO_LOCALSTATEDIR LOGFIFO"
167
168python useradd_sysroot_sstate () {
169 for type, sort_prefix in [("group", "01"), ("user", "02"), ("groupmems", "03")]:
170 scriptfile = None
171 task = d.getVar("BB_CURRENTTASK")
172 if task == "package_setscene":
173 bb.build.exec_func(type + "add_sysroot", d)
174 elif task == "prepare_recipe_sysroot":
175 # Used to update this recipe's own sysroot so the user/groups are available to do_install
176
177 # If do_populate_sysroot is triggered and we write the file here, there would be an overlapping
178 # files. See usergrouptests.UserGroupTests.test_add_task_between_p_sysroot_and_package
179 scriptfile = d.expand("${RECIPE_SYSROOT}${bindir}/postinst-useradd-" + sort_prefix + type + "-${PN}-recipedebug")
180
181 bb.build.exec_func(type + "add_sysroot", d)
182 elif task == "populate_sysroot":
183 # Used when installed in dependent task sysroots
184 scriptfile = d.expand("${SYSROOT_DESTDIR}${bindir}/postinst-useradd-" + sort_prefix + type + "-${PN}")
185
186 if scriptfile:
187 bb.utils.mkdirhier(os.path.dirname(scriptfile))
188 with open(scriptfile, 'w') as script:
189 script.write("#!/bin/sh -e\n")
190 bb.data.emit_func(type + "add_sysroot", script, d)
191 script.write(type + "add_sysroot\n")
192 os.chmod(scriptfile, 0o755)
193}
194
195do_prepare_recipe_sysroot[postfuncs] += "${SYSROOTFUNC}"
196SYSROOTFUNC:class-target = "useradd_sysroot_sstate"
197SYSROOTFUNC = ""
198
199SYSROOT_PREPROCESS_FUNCS += "${SYSROOTFUNC}"
200
201SSTATEPREINSTFUNCS:append:class-target = " useradd_sysroot_sstate"
202
203USERADD_DEPENDS ??= ""
204DEPENDS += "${USERADD_DEPENDS}"
205do_package_setscene[depends] += "${USERADDSETSCENEDEPS}"
206do_populate_sysroot_setscene[depends] += "${USERADDSETSCENEDEPS}"
207USERADDSETSCENEDEPS:class-target = "${MLPREFIX}base-passwd:do_populate_sysroot_setscene pseudo-native:do_populate_sysroot_setscene shadow-native:do_populate_sysroot_setscene ${MLPREFIX}shadow-sysroot:do_populate_sysroot_setscene ${@' '.join(['%s:do_populate_sysroot_setscene' % pkg for pkg in d.getVar("USERADD_DEPENDS").split()])}"
208USERADDSETSCENEDEPS = ""
209
210# Recipe parse-time sanity checks
211def update_useradd_after_parse(d):
212 useradd_packages = d.getVar('USERADD_PACKAGES')
213
214 if not useradd_packages:
215 bb.fatal("%s inherits useradd but doesn't set USERADD_PACKAGES" % d.getVar('FILE', False))
216
217 for pkg in useradd_packages.split():
218 d.appendVarFlag("do_populate_sysroot", "vardeps", " USERADD_PARAM:%s GROUPADD_PARAM:%s GROUPMEMS_PARAM:%s" % (pkg, pkg, pkg))
219 if not d.getVar('USERADD_PARAM:%s' % pkg) and not d.getVar('GROUPADD_PARAM:%s' % pkg) and not d.getVar('GROUPMEMS_PARAM:%s' % pkg):
220 bb.fatal("%s inherits useradd but doesn't set USERADD_PARAM, GROUPADD_PARAM or GROUPMEMS_PARAM for package %s" % (d.getVar('FILE', False), pkg))
221
222python __anonymous() {
223 if not bb.data.inherits_class('nativesdk', d) \
224 and not bb.data.inherits_class('native', d):
225 update_useradd_after_parse(d)
226}
227
228# Return a single [GROUP|USER]ADD_PARAM formatted string which includes the
229# [group|user]add parameters for all USERADD_PACKAGES in this recipe
230def get_all_cmd_params(d, cmd_type):
231 import string
232
233 param_type = cmd_type.upper() + "_PARAM:%s"
234 params = []
235
236 useradd_packages = d.getVar('USERADD_PACKAGES') or ""
237 for pkg in useradd_packages.split():
238 param = d.getVar(param_type % pkg)
239 if param:
240 params.append(param.rstrip(" ;"))
241
242 return "; ".join(params)
243
244# Adds the preinst script into generated packages
245fakeroot python populate_packages:prepend () {
246 def update_useradd_package(pkg):
247 bb.debug(1, 'adding user/group calls to preinst for %s' % pkg)
248
249 """
250 useradd preinst is appended here because pkg_preinst may be
251 required to execute on the target. Not doing so may cause
252 useradd preinst to be invoked twice, causing unwanted warnings.
253 """
254 preinst = d.getVar('pkg_preinst:%s' % pkg) or d.getVar('pkg_preinst')
255 if not preinst:
256 preinst = '#!/bin/sh\n'
257 preinst += 'bbnote () {\n\techo "NOTE: $*"\n}\n'
258 preinst += 'bbwarn () {\n\techo "WARNING: $*"\n}\n'
259 preinst += 'bbfatal () {\n\techo "ERROR: $*"\n\texit 1\n}\n'
260 preinst += 'perform_groupadd () {\n%s}\n' % d.getVar('perform_groupadd')
261 preinst += 'perform_useradd () {\n%s}\n' % d.getVar('perform_useradd')
262 preinst += 'perform_groupmems () {\n%s}\n' % d.getVar('perform_groupmems')
263 preinst += d.getVar('useradd_preinst')
264 # Expand out the *_PARAM variables to the package specific versions
265 for rep in ["GROUPADD_PARAM", "USERADD_PARAM", "GROUPMEMS_PARAM"]:
266 val = d.getVar(rep + ":" + pkg) or ""
267 preinst = preinst.replace("${" + rep + "}", val)
268 d.setVar('pkg_preinst:%s' % pkg, preinst)
269
270 # RDEPENDS setup
271 rdepends = d.getVar("RDEPENDS:%s" % pkg) or ""
272 rdepends += ' ' + d.getVar('MLPREFIX', False) + 'base-passwd'
273 rdepends += ' ' + d.getVar('MLPREFIX', False) + 'shadow'
274 # base-files is where the default /etc/skel is packaged
275 rdepends += ' ' + d.getVar('MLPREFIX', False) + 'base-files'
276 d.setVar("RDEPENDS:%s" % pkg, rdepends)
277
278 # Add the user/group preinstall scripts and RDEPENDS requirements
279 # to packages specified by USERADD_PACKAGES
280 if not bb.data.inherits_class('nativesdk', d) \
281 and not bb.data.inherits_class('native', d):
282 useradd_packages = d.getVar('USERADD_PACKAGES') or ""
283 for pkg in useradd_packages.split():
284 update_useradd_package(pkg)
285}
286
287# Use the following to extend the useradd with custom functions
288USERADDEXTENSION ?= ""
289
290inherit_defer ${USERADDEXTENSION}
diff --git a/meta/classes/useradd_base.bbclass b/meta/classes/useradd_base.bbclass
deleted file mode 100644
index 5e1c699118..0000000000
--- a/meta/classes/useradd_base.bbclass
+++ /dev/null
@@ -1,171 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# This bbclass provides basic functionality for user/group settings.
8# This bbclass is intended to be inherited by useradd.bbclass and
9# extrausers.bbclass.
10
11# The following functions basically have similar logic.
12# *) Perform necessary checks before invoking the actual command
13# *) Invoke the actual command with flock
14# *) Error out if an error occurs.
15
16# Note that before invoking these functions, make sure the global variable
17# PSEUDO is set up correctly.
18
19perform_groupadd () {
20 local rootdir="$1"
21 local opts="$2"
22 bbnote "${PN}: Performing groupadd with [$opts]"
23 local groupname=`echo "$opts" | awk '{ print $NF }'`
24 local group_exists="`grep "^$groupname:" $rootdir/etc/group || true`"
25 if test "x$group_exists" = "x"; then
26 eval flock -x $rootdir${sysconfdir} -c \"$PSEUDO groupadd \$opts\" || true
27 group_exists="`grep "^$groupname:" $rootdir/etc/group || true`"
28 if test "x$group_exists" = "x"; then
29 bbfatal "${PN}: groupadd command did not succeed."
30 fi
31 else
32 bbnote "${PN}: group $groupname already exists, not re-creating it"
33 fi
34}
35
36perform_useradd () {
37 local rootdir="$1"
38 local opts="$2"
39 bbnote "${PN}: Performing useradd with [$opts]"
40 local username=`echo "$opts" | awk '{ print $NF }'`
41 local user_exists="`grep "^$username:" $rootdir/etc/passwd || true`"
42 if test "x$user_exists" = "x"; then
43 eval flock -x $rootdir${sysconfdir} -c \"$PSEUDO useradd \$opts\" || true
44 user_exists="`grep "^$username:" $rootdir/etc/passwd || true`"
45 if test "x$user_exists" = "x"; then
46 bbfatal "${PN}: useradd command did not succeed."
47 fi
48 else
49 bbnote "${PN}: user $username already exists, not re-creating it"
50 fi
51}
52
53perform_groupmems () {
54 local rootdir="$1"
55 local opts="$2"
56 bbnote "${PN}: Performing groupmems with [$opts]"
57 local groupname=`echo "$opts" | awk '{ for (i = 1; i < NF; i++) if ($i == "-g" || $i == "--group") print $(i+1) }'`
58 local username=`echo "$opts" | awk '{ for (i = 1; i < NF; i++) if ($i == "-a" || $i == "--add") print $(i+1) }'`
59 bbnote "${PN}: Running groupmems command with group $groupname and user $username"
60 local mem_exists="`grep "^$groupname:[^:]*:[^:]*:\([^,]*,\)*$username\(,[^,]*\)*$" $rootdir/etc/group || true`"
61 if test "x$mem_exists" = "x"; then
62 eval flock -x $rootdir${sysconfdir} -c \"$PSEUDO groupmems \$opts\" || true
63 mem_exists="`grep "^$groupname:[^:]*:[^:]*:\([^,]*,\)*$username\(,[^,]*\)*$" $rootdir/etc/group || true`"
64 if test "x$mem_exists" = "x"; then
65 bbfatal "${PN}: groupmems command did not succeed."
66 fi
67 else
68 bbnote "${PN}: group $groupname already contains $username, not re-adding it"
69 fi
70}
71
72perform_groupdel () {
73 local rootdir="$1"
74 local opts="$2"
75 bbnote "${PN}: Performing groupdel with [$opts]"
76 local groupname=`echo "$opts" | awk '{ print $NF }'`
77 local group_exists="`grep "^$groupname:" $rootdir/etc/group || true`"
78
79 if test "x$group_exists" != "x"; then
80 local awk_input='BEGIN {FS=":"}; $1=="'$groupname'" { print $3 }'
81 local groupid=`echo "$awk_input" | awk -f- $rootdir/etc/group`
82 local awk_check_users='BEGIN {FS=":"}; $4=="'$groupid'" {print $1}'
83 local other_users=`echo "$awk_check_users" | awk -f- $rootdir/etc/passwd`
84
85 if test "x$other_users" = "x"; then
86 eval flock -x $rootdir${sysconfdir} -c \"$PSEUDO groupdel \$opts\" || true
87 group_exists="`grep "^$groupname:" $rootdir/etc/group || true`"
88 if test "x$group_exists" != "x"; then
89 bbfatal "${PN}: groupdel command did not succeed."
90 fi
91 else
92 bbnote "${PN}: '$groupname' is primary group for users '$other_users', not removing it"
93 fi
94 else
95 bbnote "${PN}: group $groupname doesn't exist, not removing it"
96 fi
97}
98
99perform_userdel () {
100 local rootdir="$1"
101 local opts="$2"
102 bbnote "${PN}: Performing userdel with [$opts]"
103 local username=`echo "$opts" | awk '{ print $NF }'`
104 local user_exists="`grep "^$username:" $rootdir/etc/passwd || true`"
105 if test "x$user_exists" != "x"; then
106 eval flock -x $rootdir${sysconfdir} -c \"$PSEUDO userdel \$opts\" || true
107 user_exists="`grep "^$username:" $rootdir/etc/passwd || true`"
108 if test "x$user_exists" != "x"; then
109 bbfatal "${PN}: userdel command did not succeed."
110 fi
111 else
112 bbnote "${PN}: user $username doesn't exist, not removing it"
113 fi
114}
115
116perform_groupmod () {
117 # Other than the return value of groupmod, there's no simple way to judge whether the command
118 # succeeds, so we disable -e option temporarily
119 set +e
120 local rootdir="$1"
121 local opts="$2"
122 bbnote "${PN}: Performing groupmod with [$opts]"
123 local groupname=`echo "$opts" | awk '{ print $NF }'`
124 local group_exists="`grep "^$groupname:" $rootdir/etc/group || true`"
125 if test "x$group_exists" != "x"; then
126 eval flock -x $rootdir${sysconfdir} -c \"$PSEUDO groupmod \$opts\"
127 if test $? != 0; then
128 bbwarn "${PN}: groupmod command did not succeed."
129 fi
130 else
131 bbwarn "${PN}: group $groupname doesn't exist, unable to modify it"
132 fi
133 set -e
134}
135
136perform_usermod () {
137 # Same reason with groupmod, temporarily disable -e option
138 set +e
139 local rootdir="$1"
140 local opts="$2"
141 bbnote "${PN}: Performing usermod with [$opts]"
142 local username=`echo "$opts" | awk '{ print $NF }'`
143 local user_exists="`grep "^$username:" $rootdir/etc/passwd || true`"
144 if test "x$user_exists" != "x"; then
145 eval flock -x $rootdir${sysconfdir} -c \"$PSEUDO usermod \$opts\"
146 if test $? != 0; then
147 bbfatal "${PN}: usermod command did not succeed."
148 fi
149 else
150 bbwarn "${PN}: user $username doesn't exist, unable to modify it"
151 fi
152 set -e
153}
154
155perform_passwd_expire () {
156 local rootdir="$1"
157 local opts="$2"
158 bbnote "${PN}: Performing equivalent of passwd --expire with [$opts]"
159 # Directly set sp_lstchg to 0 without using the passwd command: Only root can do that
160 local username=`echo "$opts" | awk '{ print $NF }'`
161 local user_exists="`grep "^$username:" $rootdir/etc/passwd || true`"
162 if test "x$user_exists" != "x"; then
163 eval flock -x $rootdir${sysconfdir} -c \"$PSEUDO sed --follow-symlinks -i \''s/^\('$username':[^:]*\):[^:]*:/\1:0:/'\' $rootdir/etc/shadow \" || true
164 local passwd_lastchanged="`grep "^$username:" $rootdir/etc/shadow | cut -d: -f3`"
165 if test "x$passwd_lastchanged" != "x0"; then
166 bbfatal "${PN}: passwd --expire operation did not succeed."
167 fi
168 else
169 bbnote "${PN}: user $username doesn't exist, not expiring its password"
170 fi
171}
diff --git a/meta/classes/vex.bbclass b/meta/classes/vex.bbclass
deleted file mode 100644
index 402d8e0d96..0000000000
--- a/meta/classes/vex.bbclass
+++ /dev/null
@@ -1,303 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# This class is used to generate metadata needed by external
8# tools to check for vulnerabilities, for example CVEs.
9#
10# In order to use this class just inherit the class in the
11# local.conf file and it will add the generate_vex task for
12# every recipe. If an image is build it will generate a report
13# in DEPLOY_DIR_IMAGE for all the packages used, it will also
14# generate a file for all recipes used in the build.
15#
16# Variables use CVE_CHECK prefix to keep compatibility with
17# the cve-check class
18#
19# Example:
20# bitbake -c generate_vex openssl
21# bitbake core-image-sato
22# bitbake -k -c generate_vex universe
23#
24# The product name that the CVE database uses defaults to BPN, but may need to
25# be overriden per recipe (for example tiff.bb sets CVE_PRODUCT=libtiff).
26CVE_PRODUCT ??= "${BPN}"
27CVE_VERSION ??= "${PV}"
28
29CVE_CHECK_SUMMARY_DIR ?= "${LOG_DIR}/cve"
30
31CVE_CHECK_SUMMARY_FILE_NAME_JSON = "cve-summary.json"
32CVE_CHECK_SUMMARY_INDEX_PATH = "${CVE_CHECK_SUMMARY_DIR}/cve-summary-index.txt"
33
34CVE_CHECK_DIR ??= "${DEPLOY_DIR}/cve"
35CVE_CHECK_RECIPE_FILE_JSON ?= "${CVE_CHECK_DIR}/${PN}_cve.json"
36CVE_CHECK_MANIFEST_JSON ?= "${IMGDEPLOYDIR}/${IMAGE_NAME}.json"
37
38# Skip CVE Check for packages (PN)
39CVE_CHECK_SKIP_RECIPE ?= ""
40
41# Replace NVD DB check status for a given CVE. Each of CVE has to be mentioned
42# separately with optional detail and description for this status.
43#
44# CVE_STATUS[CVE-1234-0001] = "not-applicable-platform: Issue only applies on Windows"
45# CVE_STATUS[CVE-1234-0002] = "fixed-version: Fixed externally"
46#
47# Settings the same status and reason for multiple CVEs is possible
48# via CVE_STATUS_GROUPS variable.
49#
50# CVE_STATUS_GROUPS = "CVE_STATUS_WIN CVE_STATUS_PATCHED"
51#
52# CVE_STATUS_WIN = "CVE-1234-0001 CVE-1234-0003"
53# CVE_STATUS_WIN[status] = "not-applicable-platform: Issue only applies on Windows"
54# CVE_STATUS_PATCHED = "CVE-1234-0002 CVE-1234-0004"
55# CVE_STATUS_PATCHED[status] = "fixed-version: Fixed externally"
56#
57# All possible CVE statuses could be found in cve-check-map.conf
58# CVE_CHECK_STATUSMAP[not-applicable-platform] = "Ignored"
59# CVE_CHECK_STATUSMAP[fixed-version] = "Patched"
60#
61# CVE_CHECK_IGNORE is deprecated and CVE_STATUS has to be used instead.
62# Keep CVE_CHECK_IGNORE until other layers migrate to new variables
63CVE_CHECK_IGNORE ?= ""
64
65# Layers to be excluded
66CVE_CHECK_LAYER_EXCLUDELIST ??= ""
67
68# Layers to be included
69CVE_CHECK_LAYER_INCLUDELIST ??= ""
70
71
72# set to "alphabetical" for version using single alphabetical character as increment release
73CVE_VERSION_SUFFIX ??= ""
74
75python () {
76 if bb.data.inherits_class("cve-check", d):
77 raise bb.parse.SkipRecipe("Skipping recipe: found incompatible combination of cve-check and vex enabled at the same time.")
78
79 from oe.cve_check import extend_cve_status
80 extend_cve_status(d)
81}
82
83def generate_json_report(d, out_path, link_path):
84 if os.path.exists(d.getVar("CVE_CHECK_SUMMARY_INDEX_PATH")):
85 import json
86 from oe.cve_check import cve_check_merge_jsons, update_symlinks
87
88 bb.note("Generating JSON CVE summary")
89 index_file = d.getVar("CVE_CHECK_SUMMARY_INDEX_PATH")
90 summary = {"version":"1", "package": []}
91 with open(index_file) as f:
92 filename = f.readline()
93 while filename:
94 with open(filename.rstrip()) as j:
95 data = json.load(j)
96 cve_check_merge_jsons(summary, data)
97 filename = f.readline()
98
99 summary["package"].sort(key=lambda d: d['name'])
100
101 with open(out_path, "w") as f:
102 json.dump(summary, f, indent=2)
103
104 update_symlinks(out_path, link_path)
105
106python vex_save_summary_handler () {
107 import shutil
108 import datetime
109 from oe.cve_check import update_symlinks
110
111 cvelogpath = d.getVar("CVE_CHECK_SUMMARY_DIR")
112
113 bb.utils.mkdirhier(cvelogpath)
114 timestamp = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
115
116 json_summary_link_name = os.path.join(cvelogpath, d.getVar("CVE_CHECK_SUMMARY_FILE_NAME_JSON"))
117 json_summary_name = os.path.join(cvelogpath, "cve-summary-%s.json" % (timestamp))
118 generate_json_report(d, json_summary_name, json_summary_link_name)
119 bb.plain("Complete CVE JSON report summary created at: %s" % json_summary_link_name)
120}
121
122addhandler vex_save_summary_handler
123vex_save_summary_handler[eventmask] = "bb.event.BuildCompleted"
124
125python do_generate_vex () {
126 """
127 Generate metadata needed for vulnerability checking for
128 the current recipe
129 """
130 from oe.cve_check import get_patched_cves
131
132 try:
133 patched_cves = get_patched_cves(d)
134 cves_status = []
135 products = d.getVar("CVE_PRODUCT").split()
136 for product in products:
137 if ":" in product:
138 _, product = product.split(":", 1)
139 cves_status.append([product, False])
140
141 except FileNotFoundError:
142 bb.fatal("Failure in searching patches")
143
144 cve_write_data_json(d, patched_cves, cves_status)
145}
146
147addtask generate_vex before do_build
148
149python vex_cleanup () {
150 """
151 Delete the file used to gather all the CVE information.
152 """
153 bb.utils.remove(e.data.getVar("CVE_CHECK_SUMMARY_INDEX_PATH"))
154}
155
156addhandler vex_cleanup
157vex_cleanup[eventmask] = "bb.event.BuildCompleted"
158
159python vex_write_rootfs_manifest () {
160 """
161 Create VEX/CVE manifest when building an image
162 """
163
164 import json
165 from oe.rootfs import image_list_installed_packages
166 from oe.cve_check import cve_check_merge_jsons, update_symlinks
167
168 deploy_file_json = d.getVar("CVE_CHECK_RECIPE_FILE_JSON")
169 if os.path.exists(deploy_file_json):
170 bb.utils.remove(deploy_file_json)
171
172 # Create a list of relevant recipies
173 recipies = set()
174 for pkg in list(image_list_installed_packages(d)):
175 pkg_info = os.path.join(d.getVar('PKGDATA_DIR'),
176 'runtime-reverse', pkg)
177 pkg_data = oe.packagedata.read_pkgdatafile(pkg_info)
178 recipies.add(pkg_data["PN"])
179
180 bb.note("Writing rootfs VEX manifest")
181 deploy_dir = d.getVar("IMGDEPLOYDIR")
182 link_name = d.getVar("IMAGE_LINK_NAME")
183
184 json_data = {"version":"1", "package": []}
185 text_data = ""
186
187 save_pn = d.getVar("PN")
188
189 for pkg in recipies:
190 # To be able to use the CVE_CHECK_RECIPE_FILE_JSON variable we have to evaluate
191 # it with the different PN names set each time.
192 d.setVar("PN", pkg)
193
194 pkgfilepath = d.getVar("CVE_CHECK_RECIPE_FILE_JSON")
195 if os.path.exists(pkgfilepath):
196 with open(pkgfilepath) as j:
197 data = json.load(j)
198 cve_check_merge_jsons(json_data, data)
199 else:
200 bb.warn("Missing cve file for %s" % pkg)
201
202 d.setVar("PN", save_pn)
203
204 link_path = os.path.join(deploy_dir, "%s.json" % link_name)
205 manifest_name = d.getVar("CVE_CHECK_MANIFEST_JSON")
206
207 with open(manifest_name, "w") as f:
208 json.dump(json_data, f, indent=2)
209
210 update_symlinks(manifest_name, link_path)
211 bb.plain("Image VEX JSON report stored in: %s" % manifest_name)
212}
213
214ROOTFS_POSTPROCESS_COMMAND:prepend = "vex_write_rootfs_manifest; "
215do_rootfs[recrdeptask] += "do_generate_vex "
216do_populate_sdk[recrdeptask] += "do_generate_vex "
217
218def cve_write_data_json(d, cve_data, cve_status):
219 """
220 Prepare CVE data for the JSON format, then write it.
221 Done for each recipe.
222 """
223
224 from oe.cve_check import get_cpe_ids
225 import json
226
227 output = {"version":"1", "package": []}
228 nvd_link = "https://nvd.nist.gov/vuln/detail/"
229
230 fdir_name = d.getVar("FILE_DIRNAME")
231 layer = fdir_name.split("/")[-3]
232
233 include_layers = d.getVar("CVE_CHECK_LAYER_INCLUDELIST").split()
234 exclude_layers = d.getVar("CVE_CHECK_LAYER_EXCLUDELIST").split()
235
236 if exclude_layers and layer in exclude_layers:
237 return
238
239 if include_layers and layer not in include_layers:
240 return
241
242 product_data = []
243 for s in cve_status:
244 p = {"product": s[0], "cvesInRecord": "Yes"}
245 if s[1] == False:
246 p["cvesInRecord"] = "No"
247 product_data.append(p)
248 product_data = list({p['product']:p for p in product_data}.values())
249
250 package_version = "%s%s" % (d.getVar("EXTENDPE"), d.getVar("PV"))
251 cpes = get_cpe_ids(d.getVar("CVE_PRODUCT"), d.getVar("CVE_VERSION"))
252 package_data = {
253 "name" : d.getVar("PN"),
254 "layer" : layer,
255 "version" : package_version,
256 "products": product_data,
257 "cpes": cpes
258 }
259
260 cve_list = []
261
262 for cve in sorted(cve_data):
263 issue_link = "%s%s" % (nvd_link, cve)
264
265 cve_item = {
266 "id" : cve,
267 "status" : cve_data[cve]["abbrev-status"],
268 "link": issue_link,
269 }
270 if 'NVD-summary' in cve_data[cve]:
271 cve_item["summary"] = cve_data[cve]["NVD-summary"]
272 cve_item["scorev2"] = cve_data[cve]["NVD-scorev2"]
273 cve_item["scorev3"] = cve_data[cve]["NVD-scorev3"]
274 cve_item["scorev4"] = cve_data[cve]["NVD-scorev4"]
275 cve_item["vector"] = cve_data[cve]["NVD-vector"]
276 cve_item["vectorString"] = cve_data[cve]["NVD-vectorString"]
277 if 'status' in cve_data[cve]:
278 cve_item["detail"] = cve_data[cve]["status"]
279 if 'justification' in cve_data[cve]:
280 cve_item["description"] = cve_data[cve]["justification"]
281 if 'resource' in cve_data[cve]:
282 cve_item["patch-file"] = cve_data[cve]["resource"]
283 cve_list.append(cve_item)
284
285 package_data["issue"] = cve_list
286 output["package"].append(package_data)
287
288 deploy_file = d.getVar("CVE_CHECK_RECIPE_FILE_JSON")
289
290 write_string = json.dumps(output, indent=2)
291
292 cvelogpath = d.getVar("CVE_CHECK_SUMMARY_DIR")
293 index_path = d.getVar("CVE_CHECK_SUMMARY_INDEX_PATH")
294 bb.utils.mkdirhier(cvelogpath)
295 bb.utils.mkdirhier(os.path.dirname(deploy_file))
296 fragment_file = os.path.basename(deploy_file)
297 fragment_path = os.path.join(cvelogpath, fragment_file)
298 with open(fragment_path, "w") as f:
299 f.write(write_string)
300 with open(deploy_file, "w") as f:
301 f.write(write_string)
302 with open(index_path, "a+") as f:
303 f.write("%s\n" % fragment_path)