summaryrefslogtreecommitdiffstats
path: root/meta/classes
diff options
context:
space:
mode:
Diffstat (limited to 'meta/classes')
-rw-r--r--meta/classes/allarch.bbclass63
-rw-r--r--meta/classes/archiver.bbclass108
-rw-r--r--meta/classes/autotools-brokensep.bbclass5
-rw-r--r--meta/classes/autotools.bbclass250
-rw-r--r--meta/classes/baremetal-image.bbclass98
-rw-r--r--meta/classes/base.bbclass735
-rw-r--r--meta/classes/bash-completion.bbclass7
-rw-r--r--meta/classes/bin_package.bbclass39
-rw-r--r--meta/classes/binconfig-disabled.bbclass30
-rw-r--r--meta/classes/binconfig.bbclass54
-rw-r--r--meta/classes/blacklist.bbclass20
-rw-r--r--meta/classes/buildhistory.bbclass158
-rw-r--r--meta/classes/buildstats-summary.bbclass6
-rw-r--r--meta/classes/buildstats.bbclass251
-rw-r--r--meta/classes/ccache.bbclass17
-rw-r--r--meta/classes/ccmake.bbclass6
-rw-r--r--meta/classes/chrpath.bbclass8
-rw-r--r--meta/classes/clutter.bbclass18
-rw-r--r--meta/classes/cmake.bbclass217
-rw-r--r--meta/classes/cml1.bbclass101
-rw-r--r--meta/classes/compress_doc.bbclass263
-rw-r--r--meta/classes/copyleft_compliance.bbclass6
-rw-r--r--meta/classes/copyleft_filter.bbclass8
-rw-r--r--meta/classes/core-image.bbclass75
-rw-r--r--meta/classes/cpan-base.bbclass18
-rw-r--r--meta/classes/cpan.bbclass65
-rw-r--r--meta/classes/cpan_build.bbclass41
-rw-r--r--meta/classes/create-spdx-2.2.bbclass1158
-rw-r--r--meta/classes/create-spdx.bbclass8
-rw-r--r--meta/classes/cross-canadian.bbclass194
-rw-r--r--meta/classes/cross.bbclass99
-rw-r--r--meta/classes/crosssdk.bbclass51
-rw-r--r--meta/classes/cve-check.bbclass556
-rw-r--r--meta/classes/debian.bbclass146
-rw-r--r--meta/classes/deploy.bbclass12
-rw-r--r--meta/classes/devicetree.bbclass148
-rw-r--r--meta/classes/devshell.bbclass155
-rw-r--r--meta/classes/devtool-source.bbclass9
-rw-r--r--meta/classes/devupstream.bbclass48
-rw-r--r--meta/classes/distro_features_check.bbclass7
-rw-r--r--meta/classes/distrooverrides.bbclass12
-rw-r--r--meta/classes/distutils-common-base.bbclass25
-rw-r--r--meta/classes/distutils3-base.bbclass6
-rw-r--r--meta/classes/distutils3.bbclass67
-rw-r--r--meta/classes/dos2unix.bbclass14
-rw-r--r--meta/classes/externalsrc.bbclass91
-rw-r--r--meta/classes/extrausers.bbclass10
-rw-r--r--meta/classes/features_check.bbclass57
-rw-r--r--meta/classes/fontcache.bbclass57
-rw-r--r--meta/classes/fs-uuid.bbclass24
-rw-r--r--meta/classes/gconf.bbclass71
-rw-r--r--meta/classes/gettext.bbclass22
-rw-r--r--meta/classes/gio-module-cache.bbclass38
-rw-r--r--meta/classes/glide.bbclass9
-rw-r--r--meta/classes/gnomebase.bbclass30
-rw-r--r--meta/classes/go-mod.bbclass20
-rw-r--r--meta/classes/go-ptest.bbclass54
-rw-r--r--meta/classes/go-vendor.bbclass211
-rw-r--r--meta/classes/go.bbclass156
-rw-r--r--meta/classes/goarch.bbclass116
-rw-r--r--meta/classes/gobject-introspection-data.bbclass7
-rw-r--r--meta/classes/gobject-introspection.bbclass53
-rw-r--r--meta/classes/godep.bbclass8
-rw-r--r--meta/classes/grub-efi-cfg.bbclass123
-rw-r--r--meta/classes/grub-efi.bbclass8
-rw-r--r--meta/classes/gsettings.bbclass42
-rw-r--r--meta/classes/gtk-doc.bbclass83
-rw-r--r--meta/classes/gtk-icon-cache.bbclass84
-rw-r--r--meta/classes/gtk-immodules-cache.bbclass76
-rw-r--r--meta/classes/icecc.bbclass140
-rw-r--r--meta/classes/image-artifact-names.bbclass15
-rw-r--r--meta/classes/image-buildinfo.bbclass54
-rw-r--r--meta/classes/image-combined-dbg.bbclass9
-rw-r--r--meta/classes/image-container.bbclass21
-rw-r--r--meta/classes/image-live.bbclass264
-rw-r--r--meta/classes/image-mklibs.bbclass56
-rw-r--r--meta/classes/image-postinst-intercepts.bbclass23
-rw-r--r--meta/classes/image-prelink.bbclass81
-rw-r--r--meta/classes/image.bbclass674
-rw-r--r--meta/classes/image_types.bbclass324
-rw-r--r--meta/classes/image_types_wic.bbclass157
-rw-r--r--meta/classes/insane.bbclass1403
-rw-r--r--meta/classes/kernel-arch.bbclass68
-rw-r--r--meta/classes/kernel-artifact-names.bbclass26
-rw-r--r--meta/classes/kernel-devicetree.bbclass102
-rw-r--r--meta/classes/kernel-fitimage.bbclass772
-rw-r--r--meta/classes/kernel-grub.bbclass105
-rw-r--r--meta/classes/kernel-module-split.bbclass179
-rw-r--r--meta/classes/kernel-uboot.bbclass30
-rw-r--r--meta/classes/kernel-uimage.bbclass35
-rw-r--r--meta/classes/kernel-yocto.bbclass670
-rw-r--r--meta/classes/kernel.bbclass782
-rw-r--r--meta/classes/kernelsrc.bbclass10
-rw-r--r--meta/classes/lib_package.bbclass7
-rw-r--r--meta/classes/libc-package.bbclass384
-rw-r--r--meta/classes/license.bbclass436
-rw-r--r--meta/classes/license_image.bbclass269
-rw-r--r--meta/classes/linux-kernel-base.bbclass41
-rw-r--r--meta/classes/linuxloader.bbclass72
-rw-r--r--meta/classes/live-vm-common.bbclass94
-rw-r--r--meta/classes/logging.bbclass101
-rw-r--r--meta/classes/manpages.bbclass44
-rw-r--r--meta/classes/mcextend.bbclass6
-rw-r--r--meta/classes/meson.bbclass189
-rw-r--r--meta/classes/meta.bbclass4
-rw-r--r--meta/classes/metadata_scm.bbclass50
-rw-r--r--meta/classes/migrate_localcount.bbclass6
-rw-r--r--meta/classes/mime-xdg.bbclass74
-rw-r--r--meta/classes/mime.bbclass70
-rw-r--r--meta/classes/mirrors.bbclass76
-rw-r--r--meta/classes/module-base.bbclass21
-rw-r--r--meta/classes/module.bbclass74
-rw-r--r--meta/classes/multilib.bbclass48
-rw-r--r--meta/classes/multilib_global.bbclass93
-rw-r--r--meta/classes/multilib_header.bbclass52
-rw-r--r--meta/classes/multilib_script.bbclass34
-rw-r--r--meta/classes/native.bbclass193
-rw-r--r--meta/classes/nativesdk.bbclass115
-rw-r--r--meta/classes/nopackages.bbclass13
-rw-r--r--meta/classes/npm.bbclass318
-rw-r--r--meta/classes/oelint.bbclass6
-rw-r--r--meta/classes/own-mirrors.bbclass33
-rw-r--r--meta/classes/package.bbclass2488
-rw-r--r--meta/classes/package_deb.bbclass324
-rw-r--r--meta/classes/package_ipk.bbclass282
-rw-r--r--meta/classes/package_pkgdata.bbclass167
-rw-r--r--meta/classes/package_rpm.bbclass756
-rw-r--r--meta/classes/package_tar.bbclass71
-rw-r--r--meta/classes/packagedata.bbclass34
-rw-r--r--meta/classes/packagegroup.bbclass61
-rw-r--r--meta/classes/patch.bbclass166
-rw-r--r--meta/classes/perl-version.bbclass66
-rw-r--r--meta/classes/perlnative.bbclass3
-rw-r--r--meta/classes/pixbufcache.bbclass63
-rw-r--r--meta/classes/pkgconfig.bbclass2
-rw-r--r--meta/classes/populate_sdk.bbclass7
-rw-r--r--meta/classes/populate_sdk_base.bbclass340
-rw-r--r--meta/classes/populate_sdk_ext.bbclass796
-rw-r--r--meta/classes/prexport.bbclass6
-rw-r--r--meta/classes/primport.bbclass6
-rw-r--r--meta/classes/ptest-gnome.bbclass8
-rw-r--r--meta/classes/ptest-perl.bbclass30
-rw-r--r--meta/classes/ptest.bbclass119
-rw-r--r--meta/classes/pypi.bbclass26
-rw-r--r--meta/classes/python3-dir.bbclass5
-rw-r--r--meta/classes/python3native.bbclass24
-rw-r--r--meta/classes/python3targetconfig.bbclass17
-rw-r--r--meta/classes/qemu.bbclass67
-rw-r--r--meta/classes/qemuboot.bbclass148
-rw-r--r--meta/classes/recipe_sanity.bbclass8
-rw-r--r--meta/classes/relative_symlinks.bbclass6
-rw-r--r--meta/classes/relocatable.bbclass6
-rw-r--r--meta/classes/remove-libtool.bbclass6
-rw-r--r--meta/classes/report-error.bbclass62
-rw-r--r--meta/classes/reproducible_build.bbclass125
-rw-r--r--meta/classes/reproducible_build_simple.bbclass9
-rw-r--r--meta/classes/rm_work.bbclass126
-rw-r--r--meta/classes/rm_work_and_downloads.bbclass5
-rw-r--r--meta/classes/rootfs-postcommands.bbclass375
-rw-r--r--meta/classes/rootfs_deb.bbclass39
-rw-r--r--meta/classes/rootfs_ipk.bbclass38
-rw-r--r--meta/classes/rootfs_rpm.bbclass39
-rw-r--r--meta/classes/rootfsdebugfiles.bbclass41
-rw-r--r--meta/classes/sanity.bbclass1054
-rw-r--r--meta/classes/scons.bbclass28
-rw-r--r--meta/classes/setuptools3.bbclass4
-rw-r--r--meta/classes/sign_ipk.bbclass6
-rw-r--r--meta/classes/sign_package_feed.bbclass9
-rw-r--r--meta/classes/sign_rpm.bbclass6
-rw-r--r--meta/classes/siteconfig.bbclass6
-rw-r--r--meta/classes/siteinfo.bbclass204
-rw-r--r--meta/classes/sstate.bbclass1225
-rw-r--r--meta/classes/staging.bbclass625
-rw-r--r--meta/classes/syslinux.bbclass194
-rw-r--r--meta/classes/systemd-boot-cfg.bbclass71
-rw-r--r--meta/classes/systemd-boot.bbclass35
-rw-r--r--meta/classes/systemd.bbclass233
-rw-r--r--meta/classes/terminal.bbclass11
-rw-r--r--meta/classes/testexport.bbclass182
-rw-r--r--meta/classes/testimage.bbclass491
-rw-r--r--meta/classes/testsdk.bbclass50
-rw-r--r--meta/classes/texinfo.bbclass18
-rw-r--r--meta/classes/toaster.bbclass10
-rw-r--r--meta/classes/toolchain-scripts-base.bbclass11
-rw-r--r--meta/classes/toolchain-scripts.bbclass203
-rw-r--r--meta/classes/typecheck.bbclass6
-rw-r--r--meta/classes/uboot-config.bbclass57
-rw-r--r--meta/classes/uboot-extlinux-config.bbclass158
-rw-r--r--meta/classes/uboot-sign.bbclass132
-rw-r--r--meta/classes/uninative.bbclass171
-rw-r--r--meta/classes/update-alternatives.bbclass327
-rw-r--r--meta/classes/update-rc.d.bbclass123
-rw-r--r--meta/classes/upstream-version-is-even.bbclass5
-rw-r--r--meta/classes/useradd-staticids.bbclass38
-rw-r--r--meta/classes/useradd.bbclass105
-rw-r--r--meta/classes/useradd_base.bbclass8
-rw-r--r--meta/classes/utility-tasks.bbclass53
-rw-r--r--meta/classes/utils.bbclass362
-rw-r--r--meta/classes/vala.bbclass24
-rw-r--r--meta/classes/waf.bbclass76
-rw-r--r--meta/classes/xmlcatalog.bbclass26
-rw-r--r--meta/classes/yocto-check-layer.bbclass22
202 files changed, 2609 insertions, 26482 deletions
diff --git a/meta/classes/allarch.bbclass b/meta/classes/allarch.bbclass
deleted file mode 100644
index 5bd5c44a27..0000000000
--- a/meta/classes/allarch.bbclass
+++ /dev/null
@@ -1,63 +0,0 @@
1#
2# This class is used for architecture independent recipes/data files (usually scripts)
3#
4
5python allarch_package_arch_handler () {
6 if bb.data.inherits_class("native", d) or bb.data.inherits_class("nativesdk", d) \
7 or bb.data.inherits_class("crosssdk", d):
8 return
9
10 variants = d.getVar("MULTILIB_VARIANTS")
11 if not variants:
12 d.setVar("PACKAGE_ARCH", "all" )
13}
14
15addhandler allarch_package_arch_handler
16allarch_package_arch_handler[eventmask] = "bb.event.RecipePreFinalise"
17
18python () {
19 # Allow this class to be included but overridden - only set
20 # the values if we're still "all" package arch.
21 if d.getVar("PACKAGE_ARCH") == "all":
22 # No need for virtual/libc or a cross compiler
23 d.setVar("INHIBIT_DEFAULT_DEPS","1")
24
25 # Set these to a common set of values, we shouldn't be using them other that for WORKDIR directory
26 # naming anyway
27 d.setVar("baselib", "lib")
28 d.setVar("TARGET_ARCH", "allarch")
29 d.setVar("TARGET_OS", "linux")
30 d.setVar("TARGET_CC_ARCH", "none")
31 d.setVar("TARGET_LD_ARCH", "none")
32 d.setVar("TARGET_AS_ARCH", "none")
33 d.setVar("TARGET_FPU", "")
34 d.setVar("TARGET_PREFIX", "")
35 # Expand PACKAGE_EXTRA_ARCHS since the staging code needs this
36 # (this removes any dependencies from the hash perspective)
37 d.setVar("PACKAGE_EXTRA_ARCHS", d.getVar("PACKAGE_EXTRA_ARCHS"))
38 d.setVar("SDK_ARCH", "none")
39 d.setVar("SDK_CC_ARCH", "none")
40 d.setVar("TARGET_CPPFLAGS", "none")
41 d.setVar("TARGET_CFLAGS", "none")
42 d.setVar("TARGET_CXXFLAGS", "none")
43 d.setVar("TARGET_LDFLAGS", "none")
44 d.setVar("POPULATESYSROOTDEPS", "")
45
46 # Avoid this being unnecessarily different due to nuances of
47 # the target machine that aren't important for "all" arch
48 # packages.
49 d.setVar("LDFLAGS", "")
50
51 # No need to do shared library processing or debug symbol handling
52 d.setVar("EXCLUDE_FROM_SHLIBS", "1")
53 d.setVar("INHIBIT_PACKAGE_DEBUG_SPLIT", "1")
54 d.setVar("INHIBIT_PACKAGE_STRIP", "1")
55
56 # These multilib values shouldn't change allarch packages so exclude them
57 d.appendVarFlag("emit_pkgdata", "vardepsexclude", " MULTILIB_VARIANTS")
58 d.appendVarFlag("write_specfile", "vardepsexclude", " MULTILIBS")
59 d.appendVarFlag("do_package", "vardepsexclude", " package_do_shlibs")
60 elif bb.data.inherits_class('packagegroup', d) and not bb.data.inherits_class('nativesdk', d):
61 bb.error("Please ensure recipe %s sets PACKAGE_ARCH before inherit packagegroup" % d.getVar("FILE"))
62}
63
diff --git a/meta/classes/archiver.bbclass b/meta/classes/archiver.bbclass
index 858507b343..2d0bbfbd42 100644
--- a/meta/classes/archiver.bbclass
+++ b/meta/classes/archiver.bbclass
@@ -1,11 +1,15 @@
1# ex:ts=4:sw=4:sts=4:et 1#
2# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- 2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
3# 7#
4# This bbclass is used for creating archive for: 8# This bbclass is used for creating archive for:
5# 1) original (or unpacked) source: ARCHIVER_MODE[src] = "original" 9# 1) original (or unpacked) source: ARCHIVER_MODE[src] = "original"
6# 2) patched source: ARCHIVER_MODE[src] = "patched" (default) 10# 2) patched source: ARCHIVER_MODE[src] = "patched" (default)
7# 3) configured source: ARCHIVER_MODE[src] = "configured" 11# 3) configured source: ARCHIVER_MODE[src] = "configured"
8# 4) source mirror: ARCHIVE_MODE[src] = "mirror" 12# 4) source mirror: ARCHIVER_MODE[src] = "mirror"
9# 5) The patches between do_unpack and do_patch: 13# 5) The patches between do_unpack and do_patch:
10# ARCHIVER_MODE[diff] = "1" 14# ARCHIVER_MODE[diff] = "1"
11# And you can set the one that you'd like to exclude from the diff: 15# And you can set the one that you'd like to exclude from the diff:
@@ -51,55 +55,66 @@ ARCHIVER_MODE[diff-exclude] ?= ".pc autom4te.cache patches"
51ARCHIVER_MODE[dumpdata] ?= "0" 55ARCHIVER_MODE[dumpdata] ?= "0"
52ARCHIVER_MODE[recipe] ?= "0" 56ARCHIVER_MODE[recipe] ?= "0"
53ARCHIVER_MODE[mirror] ?= "split" 57ARCHIVER_MODE[mirror] ?= "split"
58ARCHIVER_MODE[compression] ?= "xz"
54 59
55DEPLOY_DIR_SRC ?= "${DEPLOY_DIR}/sources" 60DEPLOY_DIR_SRC ?= "${DEPLOY_DIR}/sources"
56ARCHIVER_TOPDIR ?= "${WORKDIR}/archiver-sources" 61ARCHIVER_TOPDIR ?= "${WORKDIR}/archiver-sources"
57ARCHIVER_OUTDIR = "${ARCHIVER_TOPDIR}/${TARGET_SYS}/${PF}/" 62ARCHIVER_ARCH = "${TARGET_SYS}"
63ARCHIVER_OUTDIR = "${ARCHIVER_TOPDIR}/${ARCHIVER_ARCH}/${PF}/"
58ARCHIVER_RPMTOPDIR ?= "${WORKDIR}/deploy-sources-rpm" 64ARCHIVER_RPMTOPDIR ?= "${WORKDIR}/deploy-sources-rpm"
59ARCHIVER_RPMOUTDIR = "${ARCHIVER_RPMTOPDIR}/${TARGET_SYS}/${PF}/" 65ARCHIVER_RPMOUTDIR = "${ARCHIVER_RPMTOPDIR}/${ARCHIVER_ARCH}/${PF}/"
60ARCHIVER_WORKDIR = "${WORKDIR}/archiver-work/" 66ARCHIVER_WORKDIR = "${WORKDIR}/archiver-work/"
61 67
62# When producing a combined mirror directory, allow duplicates for the case 68# When producing a combined mirror directory, allow duplicates for the case
63# where multiple recipes use the same SRC_URI. 69# where multiple recipes use the same SRC_URI.
64ARCHIVER_COMBINED_MIRRORDIR = "${ARCHIVER_TOPDIR}/mirror" 70ARCHIVER_COMBINED_MIRRORDIR = "${ARCHIVER_TOPDIR}/mirror"
65SSTATE_DUPWHITELIST += "${DEPLOY_DIR_SRC}/mirror" 71SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_SRC}/mirror"
66 72
67do_dumpdata[dirs] = "${ARCHIVER_OUTDIR}" 73do_dumpdata[dirs] = "${ARCHIVER_OUTDIR}"
68do_ar_recipe[dirs] = "${ARCHIVER_OUTDIR}" 74do_ar_recipe[dirs] = "${ARCHIVER_OUTDIR}"
69do_ar_original[dirs] = "${ARCHIVER_OUTDIR} ${ARCHIVER_WORKDIR}" 75do_ar_original[dirs] = "${ARCHIVER_OUTDIR} ${ARCHIVER_WORKDIR}"
70do_deploy_archives[dirs] = "${WORKDIR}"
71 76
72# This is a convenience for the shell script to use it 77# This is a convenience for the shell script to use it
73 78
74 79def include_package(d, pn):
75python () {
76 pn = d.getVar('PN')
77 assume_provided = (d.getVar("ASSUME_PROVIDED") or "").split()
78 if pn in assume_provided:
79 for p in d.getVar("PROVIDES").split():
80 if p != pn:
81 pn = p
82 break
83 80
84 included, reason = copyleft_should_include(d) 81 included, reason = copyleft_should_include(d)
85 if not included: 82 if not included:
86 bb.debug(1, 'archiver: %s is excluded: %s' % (pn, reason)) 83 bb.debug(1, 'archiver: %s is excluded: %s' % (pn, reason))
87 return 84 return False
85
88 else: 86 else:
89 bb.debug(1, 'archiver: %s is included: %s' % (pn, reason)) 87 bb.debug(1, 'archiver: %s is included: %s' % (pn, reason))
90 88
91
92 # glibc-locale: do_fetch, do_unpack and do_patch tasks have been deleted, 89 # glibc-locale: do_fetch, do_unpack and do_patch tasks have been deleted,
93 # so avoid archiving source here. 90 # so avoid archiving source here.
94 if pn.startswith('glibc-locale'): 91 if pn.startswith('glibc-locale'):
95 return 92 return False
96 93
97 # We just archive gcc-source for all the gcc related recipes 94 # We just archive gcc-source for all the gcc related recipes
98 if d.getVar('BPN') in ['gcc', 'libgcc'] \ 95 if d.getVar('BPN') in ['gcc', 'libgcc'] \
99 and not pn.startswith('gcc-source'): 96 and not pn.startswith('gcc-source'):
100 bb.debug(1, 'archiver: %s is excluded, covered by gcc-source' % pn) 97 bb.debug(1, 'archiver: %s is excluded, covered by gcc-source' % pn)
98 return False
99
100 return True
101
102python () {
103 pn = d.getVar('PN')
104 assume_provided = (d.getVar("ASSUME_PROVIDED") or "").split()
105 if pn in assume_provided:
106 for p in d.getVar("PROVIDES").split():
107 if p != pn:
108 pn = p
109 break
110
111 if not include_package(d, pn):
101 return 112 return
102 113
114 # TARGET_SYS in ARCHIVER_ARCH will break the stamp for gcc-source in multiconfig
115 if pn.startswith('gcc-source'):
116 d.setVar('ARCHIVER_ARCH', "allarch")
117
103 def hasTask(task): 118 def hasTask(task):
104 return bool(d.getVarFlag(task, "task", False)) and not bool(d.getVarFlag(task, "noexec", False)) 119 return bool(d.getVarFlag(task, "task", False)) and not bool(d.getVarFlag(task, "noexec", False))
105 120
@@ -118,7 +133,7 @@ python () {
118 d.appendVarFlag('do_deploy_archives', 'depends', ' %s:do_ar_patched' % pn) 133 d.appendVarFlag('do_deploy_archives', 'depends', ' %s:do_ar_patched' % pn)
119 elif ar_src == "configured": 134 elif ar_src == "configured":
120 # We can't use "addtask do_ar_configured after do_configure" since it 135 # We can't use "addtask do_ar_configured after do_configure" since it
121 # will cause the deptask of do_populate_sysroot to run not matter what 136 # will cause the deptask of do_populate_sysroot to run no matter what
122 # archives we need, so we add the depends here. 137 # archives we need, so we add the depends here.
123 138
124 # There is a corner case with "gcc-source-${PV}" recipes, they don't have 139 # There is a corner case with "gcc-source-${PV}" recipes, they don't have
@@ -163,7 +178,7 @@ python () {
163 d.appendVarFlag('do_package_write_rpm', 'depends', ' %s:do_ar_configured' % pn) 178 d.appendVarFlag('do_package_write_rpm', 'depends', ' %s:do_ar_configured' % pn)
164} 179}
165 180
166# Take all the sources for a recipe and puts them in WORKDIR/archiver-work/. 181# Take all the sources for a recipe and put them in WORKDIR/archiver-work/.
167# Files in SRC_URI are copied directly, anything that's a directory 182# Files in SRC_URI are copied directly, anything that's a directory
168# (e.g. git repositories) is "unpacked" and then put into a tarball. 183# (e.g. git repositories) is "unpacked" and then put into a tarball.
169python do_ar_original() { 184python do_ar_original() {
@@ -281,7 +296,10 @@ python do_ar_configured() {
281 # ${STAGING_DATADIR}/aclocal/libtool.m4, so we can't re-run the 296 # ${STAGING_DATADIR}/aclocal/libtool.m4, so we can't re-run the
282 # do_configure, we archive the already configured ${S} to 297 # do_configure, we archive the already configured ${S} to
283 # instead of. 298 # instead of.
284 elif pn != 'libtool-native': 299 # The kernel class functions require it to be on work-shared, we
300 # don't unpack, patch, configure again, just archive the already
301 # configured ${S}
302 elif not (pn == 'libtool-native' or is_work_shared(d)):
285 def runTask(task): 303 def runTask(task):
286 prefuncs = d.getVarFlag(task, 'prefuncs') or '' 304 prefuncs = d.getVarFlag(task, 'prefuncs') or ''
287 for func in prefuncs.split(): 305 for func in prefuncs.split():
@@ -383,19 +401,11 @@ python do_ar_mirror() {
383 subprocess.check_call(cmd, shell=True) 401 subprocess.check_call(cmd, shell=True)
384} 402}
385 403
386def exclude_useless_paths(tarinfo):
387 if tarinfo.isdir():
388 if tarinfo.name.endswith('/temp') or tarinfo.name.endswith('/patches') or tarinfo.name.endswith('/.pc'):
389 return None
390 elif tarinfo.name == 'temp' or tarinfo.name == 'patches' or tarinfo.name == '.pc':
391 return None
392 return tarinfo
393
394def create_tarball(d, srcdir, suffix, ar_outdir): 404def create_tarball(d, srcdir, suffix, ar_outdir):
395 """ 405 """
396 create the tarball from srcdir 406 create the tarball from srcdir
397 """ 407 """
398 import tarfile 408 import subprocess
399 409
400 # Make sure we are only creating a single tarball for gcc sources 410 # Make sure we are only creating a single tarball for gcc sources
401 if (d.getVar('SRC_URI') == ""): 411 if (d.getVar('SRC_URI') == ""):
@@ -406,17 +416,30 @@ def create_tarball(d, srcdir, suffix, ar_outdir):
406 # that we archive the actual directory and not just the link. 416 # that we archive the actual directory and not just the link.
407 srcdir = os.path.realpath(srcdir) 417 srcdir = os.path.realpath(srcdir)
408 418
419 compression_method = d.getVarFlag('ARCHIVER_MODE', 'compression')
420 if compression_method == "xz":
421 compression_cmd = "xz %s" % d.getVar('XZ_DEFAULTS')
422 # To keep compatibility with ARCHIVER_MODE[compression]
423 elif compression_method == "gz":
424 compression_cmd = "gzip"
425 elif compression_method == "bz2":
426 compression_cmd = "bzip2"
427 else:
428 bb.fatal("Unsupported compression_method: %s" % compression_method)
429
409 bb.utils.mkdirhier(ar_outdir) 430 bb.utils.mkdirhier(ar_outdir)
410 if suffix: 431 if suffix:
411 filename = '%s-%s.tar.gz' % (d.getVar('PF'), suffix) 432 filename = '%s-%s.tar.%s' % (d.getVar('PF'), suffix, compression_method)
412 else: 433 else:
413 filename = '%s.tar.gz' % d.getVar('PF') 434 filename = '%s.tar.%s' % (d.getVar('PF'), compression_method)
414 tarname = os.path.join(ar_outdir, filename) 435 tarname = os.path.join(ar_outdir, filename)
415 436
416 bb.note('Creating %s' % tarname) 437 bb.note('Creating %s' % tarname)
417 tar = tarfile.open(tarname, 'w:gz') 438 dirname = os.path.dirname(srcdir)
418 tar.add(srcdir, arcname=os.path.basename(srcdir), filter=exclude_useless_paths) 439 basename = os.path.basename(srcdir)
419 tar.close() 440 exclude = "--exclude=temp --exclude=patches --exclude='.pc'"
441 tar_cmd = "tar %s -cf - %s | %s > %s" % (exclude, basename, compression_cmd, tarname)
442 subprocess.check_call(tar_cmd, cwd=dirname, shell=True)
420 443
421# creating .diff.gz between source.orig and source 444# creating .diff.gz between source.orig and source
422def create_diff_gz(d, src_orig, src, ar_outdir): 445def create_diff_gz(d, src_orig, src, ar_outdir):
@@ -449,8 +472,8 @@ def create_diff_gz(d, src_orig, src, ar_outdir):
449 os.chdir(cwd) 472 os.chdir(cwd)
450 473
451def is_work_shared(d): 474def is_work_shared(d):
452 pn = d.getVar('PN') 475 sharedworkdir = os.path.join(d.getVar('TMPDIR'), 'work-shared')
453 return bb.data.inherits_class('kernel', d) or pn.startswith('gcc-source') 476 return d.getVar('S').startswith(sharedworkdir)
454 477
455# Run do_unpack and do_patch 478# Run do_unpack and do_patch
456python do_unpack_and_patch() { 479python do_unpack_and_patch() {
@@ -463,7 +486,7 @@ python do_unpack_and_patch() {
463 ar_sysroot_native = d.getVar('STAGING_DIR_NATIVE') 486 ar_sysroot_native = d.getVar('STAGING_DIR_NATIVE')
464 pn = d.getVar('PN') 487 pn = d.getVar('PN')
465 488
466 # The kernel class functions require it to be on work-shared, so we dont change WORKDIR 489 # The kernel class functions require it to be on work-shared, so we don't change WORKDIR
467 if not is_work_shared(d): 490 if not is_work_shared(d):
468 # Change the WORKDIR to make do_unpack do_patch run in another dir. 491 # Change the WORKDIR to make do_unpack do_patch run in another dir.
469 d.setVar('WORKDIR', ar_workdir) 492 d.setVar('WORKDIR', ar_workdir)
@@ -483,6 +506,9 @@ python do_unpack_and_patch() {
483 src_orig = '%s.orig' % src 506 src_orig = '%s.orig' % src
484 oe.path.copytree(src, src_orig) 507 oe.path.copytree(src, src_orig)
485 508
509 if bb.data.inherits_class('dos2unix', d):
510 bb.build.exec_func('do_convert_crlf_to_lf', d)
511
486 # Make sure gcc and kernel sources are patched only once 512 # Make sure gcc and kernel sources are patched only once
487 if not (d.getVar('SRC_URI') == "" or is_work_shared(d)): 513 if not (d.getVar('SRC_URI') == "" or is_work_shared(d)):
488 bb.build.exec_func('do_patch', d) 514 bb.build.exec_func('do_patch', d)
@@ -505,7 +531,7 @@ python do_unpack_and_patch() {
505# of the output file ensures that we create it each time the recipe 531# of the output file ensures that we create it each time the recipe
506# gets rebuilt, at least as long as a PR server is used. We also rely 532# gets rebuilt, at least as long as a PR server is used. We also rely
507# on that mechanism to catch changes in the file content, because the 533# on that mechanism to catch changes in the file content, because the
508# file content is not part of of the task signature either. 534# file content is not part of the task signature either.
509do_ar_recipe[vardepsexclude] += "BBINCLUDED" 535do_ar_recipe[vardepsexclude] += "BBINCLUDED"
510python do_ar_recipe () { 536python do_ar_recipe () {
511 """ 537 """
@@ -571,7 +597,7 @@ python do_dumpdata () {
571 597
572SSTATETASKS += "do_deploy_archives" 598SSTATETASKS += "do_deploy_archives"
573do_deploy_archives () { 599do_deploy_archives () {
574 echo "Deploying source archive files from ${ARCHIVER_TOPDIR} to ${DEPLOY_DIR_SRC}." 600 bbnote "Deploying source archive files from ${ARCHIVER_TOPDIR} to ${DEPLOY_DIR_SRC}."
575} 601}
576python do_deploy_archives_setscene () { 602python do_deploy_archives_setscene () {
577 sstate_setscene(d) 603 sstate_setscene(d)
diff --git a/meta/classes/autotools-brokensep.bbclass b/meta/classes/autotools-brokensep.bbclass
deleted file mode 100644
index 71cf97a391..0000000000
--- a/meta/classes/autotools-brokensep.bbclass
+++ /dev/null
@@ -1,5 +0,0 @@
1# Autotools class for recipes where separate build dir doesn't work
2# Ideally we should fix software so it does work. Standard autotools supports
3# this.
4inherit autotools
5B = "${S}"
diff --git a/meta/classes/autotools.bbclass b/meta/classes/autotools.bbclass
deleted file mode 100644
index 9dc8ebdaa7..0000000000
--- a/meta/classes/autotools.bbclass
+++ /dev/null
@@ -1,250 +0,0 @@
1def autotools_dep_prepend(d):
2 if d.getVar('INHIBIT_AUTOTOOLS_DEPS'):
3 return ''
4
5 pn = d.getVar('PN')
6 deps = ''
7
8 if pn in ['autoconf-native', 'automake-native']:
9 return deps
10 deps += 'autoconf-native automake-native '
11
12 if not pn in ['libtool', 'libtool-native'] and not pn.endswith("libtool-cross"):
13 deps += 'libtool-native '
14 if not bb.data.inherits_class('native', d) \
15 and not bb.data.inherits_class('nativesdk', d) \
16 and not bb.data.inherits_class('cross', d) \
17 and not d.getVar('INHIBIT_DEFAULT_DEPS'):
18 deps += 'libtool-cross '
19
20 return deps
21
22DEPENDS_prepend = "${@autotools_dep_prepend(d)} "
23
24inherit siteinfo
25
26# Space separated list of shell scripts with variables defined to supply test
27# results for autoconf tests we cannot run at build time.
28# The value of this variable is filled in in a prefunc because it depends on
29# the contents of the sysroot.
30export CONFIG_SITE
31
32acpaths ?= "default"
33EXTRA_AUTORECONF = "--exclude=autopoint --exclude=gtkdocize"
34
35export lt_cv_sys_lib_dlsearch_path_spec = "${libdir} ${base_libdir}"
36
37# When building tools for use at build-time it's recommended for the build
38# system to use these variables when cross-compiling.
39# (http://sources.redhat.com/autobook/autobook/autobook_270.html)
40export CPP_FOR_BUILD = "${BUILD_CPP}"
41export CPPFLAGS_FOR_BUILD = "${BUILD_CPPFLAGS}"
42
43export CC_FOR_BUILD = "${BUILD_CC}"
44export CFLAGS_FOR_BUILD = "${BUILD_CFLAGS}"
45
46export CXX_FOR_BUILD = "${BUILD_CXX}"
47export CXXFLAGS_FOR_BUILD="${BUILD_CXXFLAGS}"
48
49export LD_FOR_BUILD = "${BUILD_LD}"
50export LDFLAGS_FOR_BUILD = "${BUILD_LDFLAGS}"
51
52def append_libtool_sysroot(d):
53 # Only supply libtool sysroot option for non-native packages
54 if not bb.data.inherits_class('native', d):
55 return '--with-libtool-sysroot=${STAGING_DIR_HOST}'
56 return ""
57
58CONFIGUREOPTS = " --build=${BUILD_SYS} \
59 --host=${HOST_SYS} \
60 --target=${TARGET_SYS} \
61 --prefix=${prefix} \
62 --exec_prefix=${exec_prefix} \
63 --bindir=${bindir} \
64 --sbindir=${sbindir} \
65 --libexecdir=${libexecdir} \
66 --datadir=${datadir} \
67 --sysconfdir=${sysconfdir} \
68 --sharedstatedir=${sharedstatedir} \
69 --localstatedir=${localstatedir} \
70 --libdir=${libdir} \
71 --includedir=${includedir} \
72 --oldincludedir=${oldincludedir} \
73 --infodir=${infodir} \
74 --mandir=${mandir} \
75 --disable-silent-rules \
76 ${CONFIGUREOPT_DEPTRACK} \
77 ${@append_libtool_sysroot(d)}"
78CONFIGUREOPT_DEPTRACK ?= "--disable-dependency-tracking"
79
80CACHED_CONFIGUREVARS ?= ""
81
82AUTOTOOLS_SCRIPT_PATH ?= "${S}"
83CONFIGURE_SCRIPT ?= "${AUTOTOOLS_SCRIPT_PATH}/configure"
84
85AUTOTOOLS_AUXDIR ?= "${AUTOTOOLS_SCRIPT_PATH}"
86
87oe_runconf () {
88 # Use relative path to avoid buildpaths in files
89 cfgscript_name="`basename ${CONFIGURE_SCRIPT}`"
90 cfgscript=`python3 -c "import os; print(os.path.relpath(os.path.dirname('${CONFIGURE_SCRIPT}'), '.'))"`/$cfgscript_name
91 if [ -x "$cfgscript" ] ; then
92 bbnote "Running $cfgscript ${CONFIGUREOPTS} ${EXTRA_OECONF} $@"
93 if ! CONFIG_SHELL=${CONFIG_SHELL-/bin/bash} ${CACHED_CONFIGUREVARS} $cfgscript ${CONFIGUREOPTS} ${EXTRA_OECONF} "$@"; then
94 bbnote "The following config.log files may provide further information."
95 bbnote `find ${B} -ignore_readdir_race -type f -name config.log`
96 bbfatal_log "configure failed"
97 fi
98 else
99 bbfatal "no configure script found at $cfgscript"
100 fi
101}
102
103CONFIGURESTAMPFILE = "${WORKDIR}/configure.sstate"
104
105autotools_preconfigure() {
106 if [ -n "${CONFIGURESTAMPFILE}" -a -e "${CONFIGURESTAMPFILE}" ]; then
107 if [ "`cat ${CONFIGURESTAMPFILE}`" != "${BB_TASKHASH}" ]; then
108 if [ "${S}" != "${B}" ]; then
109 echo "Previously configured separate build directory detected, cleaning ${B}"
110 rm -rf ${B}
111 mkdir -p ${B}
112 else
113 # At least remove the .la files since automake won't automatically
114 # regenerate them even if CFLAGS/LDFLAGS are different
115 cd ${S}
116 if [ "${CLEANBROKEN}" != "1" -a \( -e Makefile -o -e makefile -o -e GNUmakefile \) ]; then
117 oe_runmake clean
118 fi
119 find ${S} -ignore_readdir_race -name \*.la -delete
120 fi
121 fi
122 fi
123}
124
125autotools_postconfigure(){
126 if [ -n "${CONFIGURESTAMPFILE}" ]; then
127 mkdir -p `dirname ${CONFIGURESTAMPFILE}`
128 echo ${BB_TASKHASH} > ${CONFIGURESTAMPFILE}
129 fi
130}
131
132EXTRACONFFUNCS ??= ""
133
134EXTRA_OECONF_append = " ${PACKAGECONFIG_CONFARGS}"
135
136do_configure[prefuncs] += "autotools_preconfigure autotools_aclocals ${EXTRACONFFUNCS}"
137do_compile[prefuncs] += "autotools_aclocals"
138do_install[prefuncs] += "autotools_aclocals"
139do_configure[postfuncs] += "autotools_postconfigure"
140
141ACLOCALDIR = "${STAGING_DATADIR}/aclocal"
142ACLOCALEXTRAPATH = ""
143ACLOCALEXTRAPATH_class-target = " -I ${STAGING_DATADIR_NATIVE}/aclocal/"
144ACLOCALEXTRAPATH_class-nativesdk = " -I ${STAGING_DATADIR_NATIVE}/aclocal/"
145
146python autotools_aclocals () {
147 d.setVar("CONFIG_SITE", siteinfo_get_files(d, sysrootcache=True))
148}
149
150CONFIGURE_FILES = "${S}/configure.in ${S}/configure.ac ${S}/config.h.in ${S}/acinclude.m4 Makefile.am"
151
152autotools_do_configure() {
153 # WARNING: gross hack follows:
154 # An autotools built package generally needs these scripts, however only
155 # automake or libtoolize actually install the current versions of them.
156 # This is a problem in builds that do not use libtool or automake, in the case
157 # where we -need- the latest version of these scripts. e.g. running a build
158 # for a package whose autotools are old, on an x86_64 machine, which the old
159 # config.sub does not support. Work around this by installing them manually
160 # regardless.
161
162 PRUNE_M4=""
163
164 for ac in `find ${S} -ignore_readdir_race -name configure.in -o -name configure.ac`; do
165 rm -f `dirname $ac`/configure
166 done
167 if [ -e ${AUTOTOOLS_SCRIPT_PATH}/configure.in -o -e ${AUTOTOOLS_SCRIPT_PATH}/configure.ac ]; then
168 olddir=`pwd`
169 cd ${AUTOTOOLS_SCRIPT_PATH}
170 mkdir -p ${ACLOCALDIR}
171 ACLOCAL="aclocal --system-acdir=${ACLOCALDIR}/"
172 if [ x"${acpaths}" = xdefault ]; then
173 acpaths=
174 for i in `find ${AUTOTOOLS_SCRIPT_PATH} -ignore_readdir_race -maxdepth 2 -name \*.m4|grep -v 'aclocal.m4'| \
175 grep -v 'acinclude.m4' | sed -e 's,\(.*/\).*$,\1,'|sort -u`; do
176 acpaths="$acpaths -I $i"
177 done
178 else
179 acpaths="${acpaths}"
180 fi
181 acpaths="$acpaths ${ACLOCALEXTRAPATH}"
182 AUTOV=`automake --version | sed -e '1{s/.* //;s/\.[0-9]\+$//};q'`
183 automake --version
184 echo "AUTOV is $AUTOV"
185 if [ -d ${STAGING_DATADIR_NATIVE}/aclocal-$AUTOV ]; then
186 ACLOCAL="$ACLOCAL --automake-acdir=${STAGING_DATADIR_NATIVE}/aclocal-$AUTOV"
187 fi
188 # autoreconf is too shy to overwrite aclocal.m4 if it doesn't look
189 # like it was auto-generated. Work around this by blowing it away
190 # by hand, unless the package specifically asked not to run aclocal.
191 if ! echo ${EXTRA_AUTORECONF} | grep -q "aclocal"; then
192 rm -f aclocal.m4
193 fi
194 if [ -e configure.in ]; then
195 CONFIGURE_AC=configure.in
196 else
197 CONFIGURE_AC=configure.ac
198 fi
199 if grep -q "^[[:space:]]*AM_GLIB_GNU_GETTEXT" $CONFIGURE_AC; then
200 if grep -q "sed.*POTFILES" $CONFIGURE_AC; then
201 : do nothing -- we still have an old unmodified configure.ac
202 else
203 bbnote Executing glib-gettextize --force --copy
204 echo "no" | glib-gettextize --force --copy
205 fi
206 elif [ "${BPN}" != "gettext" ] && grep -q "^[[:space:]]*AM_GNU_GETTEXT" $CONFIGURE_AC; then
207 # We'd call gettextize here if it wasn't so broken...
208 cp ${STAGING_DATADIR_NATIVE}/gettext/config.rpath ${AUTOTOOLS_AUXDIR}/
209 if [ -d ${S}/po/ ]; then
210 cp -f ${STAGING_DATADIR_NATIVE}/gettext/po/Makefile.in.in ${S}/po/
211 if [ ! -e ${S}/po/remove-potcdate.sin ]; then
212 cp ${STAGING_DATADIR_NATIVE}/gettext/po/remove-potcdate.sin ${S}/po/
213 fi
214 fi
215 PRUNE_M4="$PRUNE_M4 gettext.m4 iconv.m4 lib-ld.m4 lib-link.m4 lib-prefix.m4 nls.m4 po.m4 progtest.m4"
216 fi
217 mkdir -p m4
218
219 for i in $PRUNE_M4; do
220 find ${S} -ignore_readdir_race -name $i -delete
221 done
222
223 bbnote Executing ACLOCAL=\"$ACLOCAL\" autoreconf -Wcross --verbose --install --force ${EXTRA_AUTORECONF} $acpaths
224 ACLOCAL="$ACLOCAL" autoreconf -Wcross -Wno-obsolete --verbose --install --force ${EXTRA_AUTORECONF} $acpaths || die "autoreconf execution failed."
225 cd $olddir
226 fi
227 if [ -e ${CONFIGURE_SCRIPT} ]; then
228 oe_runconf
229 else
230 bbnote "nothing to configure"
231 fi
232}
233
234autotools_do_compile() {
235 oe_runmake
236}
237
238autotools_do_install() {
239 oe_runmake 'DESTDIR=${D}' install
240 # Info dir listing isn't interesting at this point so remove it if it exists.
241 if [ -e "${D}${infodir}/dir" ]; then
242 rm -f ${D}${infodir}/dir
243 fi
244}
245
246inherit siteconfig
247
248EXPORT_FUNCTIONS do_configure do_compile do_install
249
250B = "${WORKDIR}/build"
diff --git a/meta/classes/baremetal-image.bbclass b/meta/classes/baremetal-image.bbclass
deleted file mode 100644
index b0f5e885b5..0000000000
--- a/meta/classes/baremetal-image.bbclass
+++ /dev/null
@@ -1,98 +0,0 @@
1# Baremetal image class
2#
3# This class is meant to be inherited by recipes for baremetal/RTOS applications
4# It contains code that would be used by all of them, every recipe just needs to
5# override certain variables.
6#
7# For scalability purposes, code within this class focuses on the "image" wiring
8# to satisfy the OpenEmbedded image creation and testing infrastructure.
9#
10# See meta-skeleton for a working example.
11
12
13# Toolchain should be baremetal or newlib based.
14# TCLIBC="baremetal" or TCLIBC="newlib"
15COMPATIBLE_HOST_libc-musl_class-target = "null"
16COMPATIBLE_HOST_libc-glibc_class-target = "null"
17
18
19inherit rootfs-postcommands
20
21# Set some defaults, but these should be overriden by each recipe if required
22IMGDEPLOYDIR ?= "${WORKDIR}/deploy-${PN}-image-complete"
23BAREMETAL_BINNAME ?= "hello_baremetal_${MACHINE}"
24IMAGE_LINK_NAME ?= "baremetal-helloworld-image-${MACHINE}"
25IMAGE_NAME_SUFFIX ?= ""
26
27do_rootfs[dirs] = "${IMGDEPLOYDIR} ${DEPLOY_DIR_IMAGE}"
28
29do_image(){
30 install ${D}/${base_libdir}/firmware/${BAREMETAL_BINNAME}.bin ${IMGDEPLOYDIR}/${IMAGE_LINK_NAME}.bin
31 install ${D}/${base_libdir}/firmware/${BAREMETAL_BINNAME}.elf ${IMGDEPLOYDIR}/${IMAGE_LINK_NAME}.elf
32}
33
34do_image_complete(){
35 :
36}
37
38python do_rootfs(){
39 from oe.utils import execute_pre_post_process
40 from pathlib import Path
41
42 # Write empty manifest file to satisfy test infrastructure
43 deploy_dir = d.getVar('IMGDEPLOYDIR')
44 link_name = d.getVar('IMAGE_LINK_NAME')
45 manifest_name = d.getVar('IMAGE_MANIFEST')
46
47 Path(manifest_name).touch()
48 if os.path.exists(manifest_name) and link_name:
49 manifest_link = deploy_dir + "/" + link_name + ".manifest"
50 if os.path.lexists(manifest_link):
51 os.remove(manifest_link)
52 os.symlink(os.path.basename(manifest_name), manifest_link)
53 execute_pre_post_process(d, d.getVar('ROOTFS_POSTPROCESS_COMMAND'))
54}
55
56
57# Assure binaries, manifest and qemubootconf are populated on DEPLOY_DIR_IMAGE
58do_image_complete[dirs] = "${TOPDIR}"
59SSTATETASKS += "do_image_complete"
60SSTATE_SKIP_CREATION_task-image-complete = '1'
61do_image_complete[sstate-inputdirs] = "${IMGDEPLOYDIR}"
62do_image_complete[sstate-outputdirs] = "${DEPLOY_DIR_IMAGE}"
63do_image_complete[stamp-extra-info] = "${MACHINE_ARCH}"
64addtask do_image_complete after do_image before do_build
65
66python do_image_complete_setscene () {
67 sstate_setscene(d)
68}
69addtask do_image_complete_setscene
70
71# QEMU generic Baremetal/RTOS parameters
72QB_DEFAULT_KERNEL ?= "${IMAGE_LINK_NAME}.bin"
73QB_MEM ?= "-m 256"
74QB_DEFAULT_FSTYPE ?= "bin"
75QB_DTB ?= ""
76QB_OPT_APPEND = "-nographic"
77
78# This next part is necessary to trick the build system into thinking
79# its building an image recipe so it generates the qemuboot.conf
80addtask do_rootfs before do_image after do_install
81addtask do_image after do_rootfs before do_image_complete
82addtask do_image_complete after do_image before do_build
83inherit qemuboot
84
85# Based on image.bbclass to make sure we build qemu
86python(){
87 # do_addto_recipe_sysroot doesnt exist for all recipes, but we need it to have
88 # /usr/bin on recipe-sysroot (qemu) populated
89 def extraimage_getdepends(task):
90 deps = ""
91 for dep in (d.getVar('EXTRA_IMAGEDEPENDS') or "").split():
92 # Make sure we only add it for qemu
93 if 'qemu' in dep:
94 deps += " %s:%s" % (dep, task)
95 return deps
96 d.appendVarFlag('do_image', 'depends', extraimage_getdepends('do_addto_recipe_sysroot'))
97 d.appendVarFlag('do_image', 'depends', extraimage_getdepends('do_populate_sysroot'))
98}
diff --git a/meta/classes/base.bbclass b/meta/classes/base.bbclass
deleted file mode 100644
index b4160402f0..0000000000
--- a/meta/classes/base.bbclass
+++ /dev/null
@@ -1,735 +0,0 @@
1BB_DEFAULT_TASK ?= "build"
2CLASSOVERRIDE ?= "class-target"
3
4inherit patch
5inherit staging
6
7inherit mirrors
8inherit utils
9inherit utility-tasks
10inherit metadata_scm
11inherit logging
12
13OE_EXTRA_IMPORTS ?= ""
14
15OE_IMPORTS += "os sys time oe.path oe.utils oe.types oe.package oe.packagegroup oe.sstatesig oe.lsb oe.cachedpath oe.license ${OE_EXTRA_IMPORTS}"
16OE_IMPORTS[type] = "list"
17
18PACKAGECONFIG_CONFARGS ??= ""
19
20def oe_import(d):
21 import sys
22
23 bbpath = d.getVar("BBPATH").split(":")
24 sys.path[0:0] = [os.path.join(dir, "lib") for dir in bbpath]
25
26 def inject(name, value):
27 """Make a python object accessible from the metadata"""
28 if hasattr(bb.utils, "_context"):
29 bb.utils._context[name] = value
30 else:
31 __builtins__[name] = value
32
33 import oe.data
34 for toimport in oe.data.typed_value("OE_IMPORTS", d):
35 try:
36 imported = __import__(toimport)
37 inject(toimport.split(".", 1)[0], imported)
38 except AttributeError as e:
39 bb.error("Error importing OE modules: %s" % str(e))
40 return ""
41
42# We need the oe module name space early (before INHERITs get added)
43OE_IMPORTED := "${@oe_import(d)}"
44
45def lsb_distro_identifier(d):
46 adjust = d.getVar('LSB_DISTRO_ADJUST')
47 adjust_func = None
48 if adjust:
49 try:
50 adjust_func = globals()[adjust]
51 except KeyError:
52 pass
53 return oe.lsb.distro_identifier(adjust_func)
54
55die() {
56 bbfatal_log "$*"
57}
58
59oe_runmake_call() {
60 bbnote ${MAKE} ${EXTRA_OEMAKE} "$@"
61 ${MAKE} ${EXTRA_OEMAKE} "$@"
62}
63
64oe_runmake() {
65 oe_runmake_call "$@" || die "oe_runmake failed"
66}
67
68
69def base_dep_prepend(d):
70 if d.getVar('INHIBIT_DEFAULT_DEPS', False):
71 return ""
72 return "${BASE_DEFAULT_DEPS}"
73
74BASE_DEFAULT_DEPS = "virtual/${TARGET_PREFIX}gcc virtual/${TARGET_PREFIX}compilerlibs virtual/libc"
75
76BASEDEPENDS = ""
77BASEDEPENDS_class-target = "${@base_dep_prepend(d)}"
78BASEDEPENDS_class-nativesdk = "${@base_dep_prepend(d)}"
79
80DEPENDS_prepend="${BASEDEPENDS} "
81
82FILESPATH = "${@base_set_filespath(["${FILE_DIRNAME}/${BP}", "${FILE_DIRNAME}/${BPN}", "${FILE_DIRNAME}/files"], d)}"
83# THISDIR only works properly with imediate expansion as it has to run
84# in the context of the location its used (:=)
85THISDIR = "${@os.path.dirname(d.getVar('FILE'))}"
86
87def extra_path_elements(d):
88 path = ""
89 elements = (d.getVar('EXTRANATIVEPATH') or "").split()
90 for e in elements:
91 path = path + "${STAGING_BINDIR_NATIVE}/" + e + ":"
92 return path
93
94PATH_prepend = "${@extra_path_elements(d)}"
95
96def get_lic_checksum_file_list(d):
97 filelist = []
98 lic_files = d.getVar("LIC_FILES_CHKSUM") or ''
99 tmpdir = d.getVar("TMPDIR")
100 s = d.getVar("S")
101 b = d.getVar("B")
102 workdir = d.getVar("WORKDIR")
103
104 urls = lic_files.split()
105 for url in urls:
106 # We only care about items that are absolute paths since
107 # any others should be covered by SRC_URI.
108 try:
109 (method, host, path, user, pswd, parm) = bb.fetch.decodeurl(url)
110 if method != "file" or not path:
111 raise bb.fetch.MalformedUrl(url)
112
113 if path[0] == '/':
114 if path.startswith((tmpdir, s, b, workdir)):
115 continue
116 filelist.append(path + ":" + str(os.path.exists(path)))
117 except bb.fetch.MalformedUrl:
118 bb.fatal(d.getVar('PN') + ": LIC_FILES_CHKSUM contains an invalid URL: " + url)
119 return " ".join(filelist)
120
121def setup_hosttools_dir(dest, toolsvar, d, fatal=True):
122 tools = d.getVar(toolsvar).split()
123 origbbenv = d.getVar("BB_ORIGENV", False)
124 path = origbbenv.getVar("PATH")
125 bb.utils.mkdirhier(dest)
126 notfound = []
127 for tool in tools:
128 desttool = os.path.join(dest, tool)
129 if not os.path.exists(desttool):
130 # clean up dead symlink
131 if os.path.islink(desttool):
132 os.unlink(desttool)
133 srctool = bb.utils.which(path, tool, executable=True)
134 # gcc/g++ may link to ccache on some hosts, e.g.,
135 # /usr/local/bin/ccache/gcc -> /usr/bin/ccache, then which(gcc)
136 # would return /usr/local/bin/ccache/gcc, but what we need is
137 # /usr/bin/gcc, this code can check and fix that.
138 if "ccache" in srctool:
139 srctool = bb.utils.which(path, tool, executable=True, direction=1)
140 if srctool:
141 os.symlink(srctool, desttool)
142 else:
143 notfound.append(tool)
144
145 if notfound and fatal:
146 bb.fatal("The following required tools (as specified by HOSTTOOLS) appear to be unavailable in PATH, please install them in order to proceed:\n %s" % " ".join(notfound))
147
148addtask fetch
149do_fetch[dirs] = "${DL_DIR}"
150do_fetch[file-checksums] = "${@bb.fetch.get_checksum_file_list(d)}"
151do_fetch[file-checksums] += " ${@get_lic_checksum_file_list(d)}"
152do_fetch[vardeps] += "SRCREV"
153python base_do_fetch() {
154
155 src_uri = (d.getVar('SRC_URI') or "").split()
156 if len(src_uri) == 0:
157 return
158
159 try:
160 fetcher = bb.fetch2.Fetch(src_uri, d)
161 fetcher.download()
162 except bb.fetch2.BBFetchException as e:
163 bb.fatal(str(e))
164}
165
166addtask unpack after do_fetch
167do_unpack[dirs] = "${WORKDIR}"
168
169do_unpack[cleandirs] = "${@d.getVar('S') if os.path.normpath(d.getVar('S')) != os.path.normpath(d.getVar('WORKDIR')) else os.path.join('${S}', 'patches')}"
170
171python base_do_unpack() {
172 src_uri = (d.getVar('SRC_URI') or "").split()
173 if len(src_uri) == 0:
174 return
175
176 try:
177 fetcher = bb.fetch2.Fetch(src_uri, d)
178 fetcher.unpack(d.getVar('WORKDIR'))
179 except bb.fetch2.BBFetchException as e:
180 bb.fatal(str(e))
181}
182
183def get_layers_branch_rev(d):
184 layers = (d.getVar("BBLAYERS") or "").split()
185 layers_branch_rev = ["%-20s = \"%s:%s\"" % (os.path.basename(i), \
186 base_get_metadata_git_branch(i, None).strip(), \
187 base_get_metadata_git_revision(i, None)) \
188 for i in layers]
189 i = len(layers_branch_rev)-1
190 p1 = layers_branch_rev[i].find("=")
191 s1 = layers_branch_rev[i][p1:]
192 while i > 0:
193 p2 = layers_branch_rev[i-1].find("=")
194 s2= layers_branch_rev[i-1][p2:]
195 if s1 == s2:
196 layers_branch_rev[i-1] = layers_branch_rev[i-1][0:p2]
197 i -= 1
198 else:
199 i -= 1
200 p1 = layers_branch_rev[i].find("=")
201 s1= layers_branch_rev[i][p1:]
202 return layers_branch_rev
203
204
205BUILDCFG_FUNCS ??= "buildcfg_vars get_layers_branch_rev buildcfg_neededvars"
206BUILDCFG_FUNCS[type] = "list"
207
208def buildcfg_vars(d):
209 statusvars = oe.data.typed_value('BUILDCFG_VARS', d)
210 for var in statusvars:
211 value = d.getVar(var)
212 if value is not None:
213 yield '%-20s = "%s"' % (var, value)
214
215def buildcfg_neededvars(d):
216 needed_vars = oe.data.typed_value("BUILDCFG_NEEDEDVARS", d)
217 pesteruser = []
218 for v in needed_vars:
219 val = d.getVar(v)
220 if not val or val == 'INVALID':
221 pesteruser.append(v)
222
223 if pesteruser:
224 bb.fatal('The following variable(s) were not set: %s\nPlease set them directly, or choose a MACHINE or DISTRO that sets them.' % ', '.join(pesteruser))
225
226addhandler base_eventhandler
227base_eventhandler[eventmask] = "bb.event.ConfigParsed bb.event.MultiConfigParsed bb.event.BuildStarted bb.event.RecipePreFinalise bb.event.RecipeParsed"
228python base_eventhandler() {
229 import bb.runqueue
230
231 if isinstance(e, bb.event.ConfigParsed):
232 if not d.getVar("NATIVELSBSTRING", False):
233 d.setVar("NATIVELSBSTRING", lsb_distro_identifier(d))
234 d.setVar("ORIGNATIVELSBSTRING", d.getVar("NATIVELSBSTRING", False))
235 d.setVar('BB_VERSION', bb.__version__)
236
237 # There might be no bb.event.ConfigParsed event if bitbake server is
238 # running, so check bb.event.BuildStarted too to make sure ${HOSTTOOLS_DIR}
239 # exists.
240 if isinstance(e, bb.event.ConfigParsed) or \
241 (isinstance(e, bb.event.BuildStarted) and not os.path.exists(d.getVar('HOSTTOOLS_DIR'))):
242 # Works with the line in layer.conf which changes PATH to point here
243 setup_hosttools_dir(d.getVar('HOSTTOOLS_DIR'), 'HOSTTOOLS', d)
244 setup_hosttools_dir(d.getVar('HOSTTOOLS_DIR'), 'HOSTTOOLS_NONFATAL', d, fatal=False)
245
246 if isinstance(e, bb.event.MultiConfigParsed):
247 # We need to expand SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS in each of the multiconfig data stores
248 # own contexts so the variables get expanded correctly for that arch, then inject back into
249 # the main data store.
250 deps = []
251 for config in e.mcdata:
252 deps.append(e.mcdata[config].getVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS"))
253 deps = " ".join(deps)
254 e.mcdata[''].setVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS", deps)
255
256 if isinstance(e, bb.event.BuildStarted):
257 localdata = bb.data.createCopy(d)
258 statuslines = []
259 for func in oe.data.typed_value('BUILDCFG_FUNCS', localdata):
260 g = globals()
261 if func not in g:
262 bb.warn("Build configuration function '%s' does not exist" % func)
263 else:
264 flines = g[func](localdata)
265 if flines:
266 statuslines.extend(flines)
267
268 statusheader = d.getVar('BUILDCFG_HEADER')
269 if statusheader:
270 bb.plain('\n%s\n%s\n' % (statusheader, '\n'.join(statuslines)))
271
272 # This code is to silence warnings where the SDK variables overwrite the
273 # target ones and we'd see dulpicate key names overwriting each other
274 # for various PREFERRED_PROVIDERS
275 if isinstance(e, bb.event.RecipePreFinalise):
276 if d.getVar("TARGET_PREFIX") == d.getVar("SDK_PREFIX"):
277 d.delVar("PREFERRED_PROVIDER_virtual/${TARGET_PREFIX}binutils")
278 d.delVar("PREFERRED_PROVIDER_virtual/${TARGET_PREFIX}gcc")
279 d.delVar("PREFERRED_PROVIDER_virtual/${TARGET_PREFIX}g++")
280 d.delVar("PREFERRED_PROVIDER_virtual/${TARGET_PREFIX}compilerlibs")
281
282 if isinstance(e, bb.event.RecipeParsed):
283 #
284 # If we have multiple providers of virtual/X and a PREFERRED_PROVIDER_virtual/X is set
285 # skip parsing for all the other providers which will mean they get uninstalled from the
286 # sysroot since they're now "unreachable". This makes switching virtual/kernel work in
287 # particular.
288 #
289 pn = d.getVar('PN')
290 source_mirror_fetch = d.getVar('SOURCE_MIRROR_FETCH', False)
291 if not source_mirror_fetch:
292 provs = (d.getVar("PROVIDES") or "").split()
293 multiwhitelist = (d.getVar("MULTI_PROVIDER_WHITELIST") or "").split()
294 for p in provs:
295 if p.startswith("virtual/") and p not in multiwhitelist:
296 profprov = d.getVar("PREFERRED_PROVIDER_" + p)
297 if profprov and pn != profprov:
298 raise bb.parse.SkipRecipe("PREFERRED_PROVIDER_%s set to %s, not %s" % (p, profprov, pn))
299}
300
301CONFIGURESTAMPFILE = "${WORKDIR}/configure.sstate"
302CLEANBROKEN = "0"
303
304addtask configure after do_patch
305do_configure[dirs] = "${B}"
306base_do_configure() {
307 if [ -n "${CONFIGURESTAMPFILE}" -a -e "${CONFIGURESTAMPFILE}" ]; then
308 if [ "`cat ${CONFIGURESTAMPFILE}`" != "${BB_TASKHASH}" ]; then
309 cd ${B}
310 if [ "${CLEANBROKEN}" != "1" -a \( -e Makefile -o -e makefile -o -e GNUmakefile \) ]; then
311 oe_runmake clean
312 fi
313 # -ignore_readdir_race does not work correctly with -delete;
314 # use xargs to avoid spurious build failures
315 find ${B} -ignore_readdir_race -name \*.la -type f -print0 | xargs -0 rm -f
316 fi
317 fi
318 if [ -n "${CONFIGURESTAMPFILE}" ]; then
319 mkdir -p `dirname ${CONFIGURESTAMPFILE}`
320 echo ${BB_TASKHASH} > ${CONFIGURESTAMPFILE}
321 fi
322}
323
324addtask compile after do_configure
325do_compile[dirs] = "${B}"
326base_do_compile() {
327 if [ -e Makefile -o -e makefile -o -e GNUmakefile ]; then
328 oe_runmake || die "make failed"
329 else
330 bbnote "nothing to compile"
331 fi
332}
333
334addtask install after do_compile
335do_install[dirs] = "${B}"
336# Remove and re-create ${D} so that is it guaranteed to be empty
337do_install[cleandirs] = "${D}"
338
339base_do_install() {
340 :
341}
342
343base_do_package() {
344 :
345}
346
347addtask build after do_populate_sysroot
348do_build[noexec] = "1"
349do_build[recrdeptask] += "do_deploy"
350do_build () {
351 :
352}
353
354def set_packagetriplet(d):
355 archs = []
356 tos = []
357 tvs = []
358
359 archs.append(d.getVar("PACKAGE_ARCHS").split())
360 tos.append(d.getVar("TARGET_OS"))
361 tvs.append(d.getVar("TARGET_VENDOR"))
362
363 def settriplet(d, varname, archs, tos, tvs):
364 triplets = []
365 for i in range(len(archs)):
366 for arch in archs[i]:
367 triplets.append(arch + tvs[i] + "-" + tos[i])
368 triplets.reverse()
369 d.setVar(varname, " ".join(triplets))
370
371 settriplet(d, "PKGTRIPLETS", archs, tos, tvs)
372
373 variants = d.getVar("MULTILIB_VARIANTS") or ""
374 for item in variants.split():
375 localdata = bb.data.createCopy(d)
376 overrides = localdata.getVar("OVERRIDES", False) + ":virtclass-multilib-" + item
377 localdata.setVar("OVERRIDES", overrides)
378
379 archs.append(localdata.getVar("PACKAGE_ARCHS").split())
380 tos.append(localdata.getVar("TARGET_OS"))
381 tvs.append(localdata.getVar("TARGET_VENDOR"))
382
383 settriplet(d, "PKGMLTRIPLETS", archs, tos, tvs)
384
385python () {
386 import string, re
387
388 # Handle backfilling
389 oe.utils.features_backfill("DISTRO_FEATURES", d)
390 oe.utils.features_backfill("MACHINE_FEATURES", d)
391
392 if d.getVar("S")[-1] == '/':
393 bb.warn("Recipe %s sets S variable with trailing slash '%s', remove it" % (d.getVar("PN"), d.getVar("S")))
394 if d.getVar("B")[-1] == '/':
395 bb.warn("Recipe %s sets B variable with trailing slash '%s', remove it" % (d.getVar("PN"), d.getVar("B")))
396
397 if os.path.normpath(d.getVar("WORKDIR")) != os.path.normpath(d.getVar("S")):
398 d.appendVar("PSEUDO_IGNORE_PATHS", ",${S}")
399 if os.path.normpath(d.getVar("WORKDIR")) != os.path.normpath(d.getVar("B")):
400 d.appendVar("PSEUDO_IGNORE_PATHS", ",${B}")
401
402 # Handle PACKAGECONFIG
403 #
404 # These take the form:
405 #
406 # PACKAGECONFIG ??= "<default options>"
407 # PACKAGECONFIG[foo] = "--enable-foo,--disable-foo,foo_depends,foo_runtime_depends,foo_runtime_recommends,foo_conflict_packageconfig"
408 pkgconfigflags = d.getVarFlags("PACKAGECONFIG") or {}
409 if pkgconfigflags:
410 pkgconfig = (d.getVar('PACKAGECONFIG') or "").split()
411 pn = d.getVar("PN")
412
413 mlprefix = d.getVar("MLPREFIX")
414
415 def expandFilter(appends, extension, prefix):
416 appends = bb.utils.explode_deps(d.expand(" ".join(appends)))
417 newappends = []
418 for a in appends:
419 if a.endswith("-native") or ("-cross-" in a):
420 newappends.append(a)
421 elif a.startswith("virtual/"):
422 subs = a.split("/", 1)[1]
423 if subs.startswith(prefix):
424 newappends.append(a + extension)
425 else:
426 newappends.append("virtual/" + prefix + subs + extension)
427 else:
428 if a.startswith(prefix):
429 newappends.append(a + extension)
430 else:
431 newappends.append(prefix + a + extension)
432 return newappends
433
434 def appendVar(varname, appends):
435 if not appends:
436 return
437 if varname.find("DEPENDS") != -1:
438 if bb.data.inherits_class('nativesdk', d) or bb.data.inherits_class('cross-canadian', d) :
439 appends = expandFilter(appends, "", "nativesdk-")
440 elif bb.data.inherits_class('native', d):
441 appends = expandFilter(appends, "-native", "")
442 elif mlprefix:
443 appends = expandFilter(appends, "", mlprefix)
444 varname = d.expand(varname)
445 d.appendVar(varname, " " + " ".join(appends))
446
447 extradeps = []
448 extrardeps = []
449 extrarrecs = []
450 extraconf = []
451 for flag, flagval in sorted(pkgconfigflags.items()):
452 items = flagval.split(",")
453 num = len(items)
454 if num > 6:
455 bb.error("%s: PACKAGECONFIG[%s] Only enable,disable,depend,rdepend,rrecommend,conflict_packageconfig can be specified!"
456 % (d.getVar('PN'), flag))
457
458 if flag in pkgconfig:
459 if num >= 3 and items[2]:
460 extradeps.append(items[2])
461 if num >= 4 and items[3]:
462 extrardeps.append(items[3])
463 if num >= 5 and items[4]:
464 extrarrecs.append(items[4])
465 if num >= 1 and items[0]:
466 extraconf.append(items[0])
467 elif num >= 2 and items[1]:
468 extraconf.append(items[1])
469
470 if num >= 6 and items[5]:
471 conflicts = set(items[5].split())
472 invalid = conflicts.difference(set(pkgconfigflags.keys()))
473 if invalid:
474 bb.error("%s: PACKAGECONFIG[%s] Invalid conflict package config%s '%s' specified."
475 % (d.getVar('PN'), flag, 's' if len(invalid) > 1 else '', ' '.join(invalid)))
476
477 if flag in pkgconfig:
478 intersec = conflicts.intersection(set(pkgconfig))
479 if intersec:
480 bb.fatal("%s: PACKAGECONFIG[%s] Conflict package config%s '%s' set in PACKAGECONFIG."
481 % (d.getVar('PN'), flag, 's' if len(intersec) > 1 else '', ' '.join(intersec)))
482
483 appendVar('DEPENDS', extradeps)
484 appendVar('RDEPENDS_${PN}', extrardeps)
485 appendVar('RRECOMMENDS_${PN}', extrarrecs)
486 appendVar('PACKAGECONFIG_CONFARGS', extraconf)
487
488 pn = d.getVar('PN')
489 license = d.getVar('LICENSE')
490 if license == "INVALID" and pn != "defaultpkgname":
491 bb.fatal('This recipe does not have the LICENSE field set (%s)' % pn)
492
493 if bb.data.inherits_class('license', d):
494 check_license_format(d)
495 unmatched_license_flags = check_license_flags(d)
496 if unmatched_license_flags:
497 if len(unmatched_license_flags) == 1:
498 message = "because it has a restricted license '{0}'. Which is not whitelisted in LICENSE_FLAGS_WHITELIST".format(unmatched_license_flags[0])
499 else:
500 message = "because it has restricted licenses {0}. Which are not whitelisted in LICENSE_FLAGS_WHITELIST".format(
501 ", ".join("'{0}'".format(f) for f in unmatched_license_flags))
502 bb.debug(1, "Skipping %s %s" % (pn, message))
503 raise bb.parse.SkipRecipe(message)
504
505 # If we're building a target package we need to use fakeroot (pseudo)
506 # in order to capture permissions, owners, groups and special files
507 if not bb.data.inherits_class('native', d) and not bb.data.inherits_class('cross', d):
508 d.appendVarFlag('do_prepare_recipe_sysroot', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
509 d.appendVarFlag('do_install', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
510 d.setVarFlag('do_install', 'fakeroot', '1')
511 d.appendVarFlag('do_package', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
512 d.setVarFlag('do_package', 'fakeroot', '1')
513 d.setVarFlag('do_package_setscene', 'fakeroot', '1')
514 d.appendVarFlag('do_package_setscene', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
515 d.setVarFlag('do_devshell', 'fakeroot', '1')
516 d.appendVarFlag('do_devshell', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
517
518 need_machine = d.getVar('COMPATIBLE_MACHINE')
519 if need_machine and not d.getVar('PARSE_ALL_RECIPES', False):
520 import re
521 compat_machines = (d.getVar('MACHINEOVERRIDES') or "").split(":")
522 for m in compat_machines:
523 if re.match(need_machine, m):
524 break
525 else:
526 raise bb.parse.SkipRecipe("incompatible with machine %s (not in COMPATIBLE_MACHINE)" % d.getVar('MACHINE'))
527
528 source_mirror_fetch = d.getVar('SOURCE_MIRROR_FETCH', False) or d.getVar('PARSE_ALL_RECIPES', False)
529 if not source_mirror_fetch:
530 need_host = d.getVar('COMPATIBLE_HOST')
531 if need_host:
532 import re
533 this_host = d.getVar('HOST_SYS')
534 if not re.match(need_host, this_host):
535 raise bb.parse.SkipRecipe("incompatible with host %s (not in COMPATIBLE_HOST)" % this_host)
536
537 bad_licenses = (d.getVar('INCOMPATIBLE_LICENSE') or "").split()
538
539 check_license = False if pn.startswith("nativesdk-") else True
540 for t in ["-native", "-cross-${TARGET_ARCH}", "-cross-initial-${TARGET_ARCH}",
541 "-crosssdk-${SDK_SYS}", "-crosssdk-initial-${SDK_SYS}",
542 "-cross-canadian-${TRANSLATED_TARGET_ARCH}"]:
543 if pn.endswith(d.expand(t)):
544 check_license = False
545 if pn.startswith("gcc-source-"):
546 check_license = False
547
548 if check_license and bad_licenses:
549 bad_licenses = expand_wildcard_licenses(d, bad_licenses)
550
551 whitelist = []
552 for lic in bad_licenses:
553 spdx_license = return_spdx(d, lic)
554 whitelist.extend((d.getVar("WHITELIST_" + lic) or "").split())
555 if spdx_license:
556 whitelist.extend((d.getVar("WHITELIST_" + spdx_license) or "").split())
557
558 if pn in whitelist:
559 '''
560 We need to track what we are whitelisting and why. If pn is
561 incompatible we need to be able to note that the image that
562 is created may infact contain incompatible licenses despite
563 INCOMPATIBLE_LICENSE being set.
564 '''
565 bb.note("Including %s as buildable despite it having an incompatible license because it has been whitelisted" % pn)
566 else:
567 pkgs = d.getVar('PACKAGES').split()
568 skipped_pkgs = {}
569 unskipped_pkgs = []
570 for pkg in pkgs:
571 incompatible_lic = incompatible_license(d, bad_licenses, pkg)
572 if incompatible_lic:
573 skipped_pkgs[pkg] = incompatible_lic
574 else:
575 unskipped_pkgs.append(pkg)
576 if unskipped_pkgs:
577 for pkg in skipped_pkgs:
578 bb.debug(1, "Skipping the package %s at do_rootfs because of incompatible license(s): %s" % (pkg, ' '.join(skipped_pkgs[pkg])))
579 d.setVar('LICENSE_EXCLUSION-' + pkg, ' '.join(skipped_pkgs[pkg]))
580 for pkg in unskipped_pkgs:
581 bb.debug(1, "Including the package %s" % pkg)
582 else:
583 incompatible_lic = incompatible_license(d, bad_licenses)
584 for pkg in skipped_pkgs:
585 incompatible_lic += skipped_pkgs[pkg]
586 incompatible_lic = sorted(list(set(incompatible_lic)))
587
588 if incompatible_lic:
589 bb.debug(1, "Skipping recipe %s because of incompatible license(s): %s" % (pn, ' '.join(incompatible_lic)))
590 raise bb.parse.SkipRecipe("it has incompatible license(s): %s" % ' '.join(incompatible_lic))
591
592 needsrcrev = False
593 srcuri = d.getVar('SRC_URI')
594 for uri_string in srcuri.split():
595 uri = bb.fetch.URI(uri_string)
596
597 # HTTP/FTP use the wget fetcher
598 if uri.scheme in ("http", "https", "ftp"):
599 d.appendVarFlag('do_fetch', 'depends', ' wget-native:do_populate_sysroot')
600
601 # Svn packages should DEPEND on subversion-native
602 if uri.scheme == "svn":
603 needsrcrev = True
604 d.appendVarFlag('do_fetch', 'depends', ' subversion-native:do_populate_sysroot')
605
606 # Git packages should DEPEND on git-native
607 elif uri.scheme in ("git", "gitsm"):
608 needsrcrev = True
609 d.appendVarFlag('do_fetch', 'depends', ' git-native:do_populate_sysroot')
610
611 # Mercurial packages should DEPEND on mercurial-native
612 elif uri.scheme == "hg":
613 needsrcrev = True
614 d.appendVar("EXTRANATIVEPATH", ' python3-native ')
615 d.appendVarFlag('do_fetch', 'depends', ' mercurial-native:do_populate_sysroot')
616
617 # Perforce packages support SRCREV = "${AUTOREV}"
618 elif uri.scheme == "p4":
619 needsrcrev = True
620
621 # OSC packages should DEPEND on osc-native
622 elif uri.scheme == "osc":
623 d.appendVarFlag('do_fetch', 'depends', ' osc-native:do_populate_sysroot')
624
625 elif uri.scheme == "npm":
626 d.appendVarFlag('do_fetch', 'depends', ' nodejs-native:do_populate_sysroot')
627
628 # *.lz4 should DEPEND on lz4-native for unpacking
629 if uri.path.endswith('.lz4'):
630 d.appendVarFlag('do_unpack', 'depends', ' lz4-native:do_populate_sysroot')
631
632 # *.lz should DEPEND on lzip-native for unpacking
633 elif uri.path.endswith('.lz'):
634 d.appendVarFlag('do_unpack', 'depends', ' lzip-native:do_populate_sysroot')
635
636 # *.xz should DEPEND on xz-native for unpacking
637 elif uri.path.endswith('.xz') or uri.path.endswith('.txz'):
638 d.appendVarFlag('do_unpack', 'depends', ' xz-native:do_populate_sysroot')
639
640 # .zip should DEPEND on unzip-native for unpacking
641 elif uri.path.endswith('.zip') or uri.path.endswith('.jar'):
642 d.appendVarFlag('do_unpack', 'depends', ' unzip-native:do_populate_sysroot')
643
644 # Some rpm files may be compressed internally using xz (for example, rpms from Fedora)
645 elif uri.path.endswith('.rpm'):
646 d.appendVarFlag('do_unpack', 'depends', ' xz-native:do_populate_sysroot')
647
648 # *.deb should DEPEND on xz-native for unpacking
649 elif uri.path.endswith('.deb'):
650 d.appendVarFlag('do_unpack', 'depends', ' xz-native:do_populate_sysroot')
651
652 if needsrcrev:
653 d.setVar("SRCPV", "${@bb.fetch2.get_srcrev(d)}")
654
655 # Gather all named SRCREVs to add to the sstate hash calculation
656 # This anonymous python snippet is called multiple times so we
657 # need to be careful to not double up the appends here and cause
658 # the base hash to mismatch the task hash
659 for uri in srcuri.split():
660 parm = bb.fetch.decodeurl(uri)[5]
661 uri_names = parm.get("name", "").split(",")
662 for uri_name in filter(None, uri_names):
663 srcrev_name = "SRCREV_{}".format(uri_name)
664 if srcrev_name not in (d.getVarFlag("do_fetch", "vardeps") or "").split():
665 d.appendVarFlag("do_fetch", "vardeps", " {}".format(srcrev_name))
666
667 set_packagetriplet(d)
668
669 # 'multimachine' handling
670 mach_arch = d.getVar('MACHINE_ARCH')
671 pkg_arch = d.getVar('PACKAGE_ARCH')
672
673 if (pkg_arch == mach_arch):
674 # Already machine specific - nothing further to do
675 return
676
677 #
678 # We always try to scan SRC_URI for urls with machine overrides
679 # unless the package sets SRC_URI_OVERRIDES_PACKAGE_ARCH=0
680 #
681 override = d.getVar('SRC_URI_OVERRIDES_PACKAGE_ARCH')
682 if override != '0':
683 paths = []
684 fpaths = (d.getVar('FILESPATH') or '').split(':')
685 machine = d.getVar('MACHINE')
686 for p in fpaths:
687 if os.path.basename(p) == machine and os.path.isdir(p):
688 paths.append(p)
689
690 if len(paths) != 0:
691 for s in srcuri.split():
692 if not s.startswith("file://"):
693 continue
694 fetcher = bb.fetch2.Fetch([s], d)
695 local = fetcher.localpath(s)
696 for mp in paths:
697 if local.startswith(mp):
698 #bb.note("overriding PACKAGE_ARCH from %s to %s for %s" % (pkg_arch, mach_arch, pn))
699 d.setVar('PACKAGE_ARCH', "${MACHINE_ARCH}")
700 return
701
702 packages = d.getVar('PACKAGES').split()
703 for pkg in packages:
704 pkgarch = d.getVar("PACKAGE_ARCH_%s" % pkg)
705
706 # We could look for != PACKAGE_ARCH here but how to choose
707 # if multiple differences are present?
708 # Look through PACKAGE_ARCHS for the priority order?
709 if pkgarch and pkgarch == mach_arch:
710 d.setVar('PACKAGE_ARCH', "${MACHINE_ARCH}")
711 bb.warn("Recipe %s is marked as only being architecture specific but seems to have machine specific packages?! The recipe may as well mark itself as machine specific directly." % d.getVar("PN"))
712}
713
714addtask cleansstate after do_clean
715python do_cleansstate() {
716 sstate_clean_cachefiles(d)
717}
718addtask cleanall after do_cleansstate
719do_cleansstate[nostamp] = "1"
720
721python do_cleanall() {
722 src_uri = (d.getVar('SRC_URI') or "").split()
723 if len(src_uri) == 0:
724 return
725
726 try:
727 fetcher = bb.fetch2.Fetch(src_uri, d)
728 fetcher.clean()
729 except bb.fetch2.BBFetchException as e:
730 bb.fatal(str(e))
731}
732do_cleanall[nostamp] = "1"
733
734
735EXPORT_FUNCTIONS do_fetch do_unpack do_configure do_compile do_install do_package
diff --git a/meta/classes/bash-completion.bbclass b/meta/classes/bash-completion.bbclass
deleted file mode 100644
index 80ee9b4874..0000000000
--- a/meta/classes/bash-completion.bbclass
+++ /dev/null
@@ -1,7 +0,0 @@
1DEPENDS_append_class-target = " bash-completion"
2
3PACKAGES += "${PN}-bash-completion"
4
5FILES_${PN}-bash-completion = "${datadir}/bash-completion ${sysconfdir}/bash_completion.d"
6
7RDEPENDS_${PN}-bash-completion = "bash-completion"
diff --git a/meta/classes/bin_package.bbclass b/meta/classes/bin_package.bbclass
deleted file mode 100644
index cbc9b1fa13..0000000000
--- a/meta/classes/bin_package.bbclass
+++ /dev/null
@@ -1,39 +0,0 @@
1#
2# ex:ts=4:sw=4:sts=4:et
3# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
4#
5# Common variable and task for the binary package recipe.
6# Basic principle:
7# * The files have been unpacked to ${S} by base.bbclass
8# * Skip do_configure and do_compile
9# * Use do_install to install the files to ${D}
10#
11# Note:
12# The "subdir" parameter in the SRC_URI is useful when the input package
13# is rpm, ipk, deb and so on, for example:
14#
15# SRC_URI = "http://foo.com/foo-1.0-r1.i586.rpm;subdir=foo-1.0"
16#
17# Then the files would be unpacked to ${WORKDIR}/foo-1.0, otherwise
18# they would be in ${WORKDIR}.
19#
20
21# Skip the unwanted steps
22do_configure[noexec] = "1"
23do_compile[noexec] = "1"
24
25# Install the files to ${D}
26bin_package_do_install () {
27 # Do it carefully
28 [ -d "${S}" ] || exit 1
29 if [ -z "$(ls -A ${S})" ]; then
30 bbfatal bin_package has nothing to install. Be sure the SRC_URI unpacks into S.
31 fi
32 cd ${S}
33 tar --no-same-owner --exclude='./patches' --exclude='./.pc' -cpf - . \
34 | tar --no-same-owner -xpf - -C ${D}
35}
36
37FILES_${PN} = "/"
38
39EXPORT_FUNCTIONS do_install
diff --git a/meta/classes/binconfig-disabled.bbclass b/meta/classes/binconfig-disabled.bbclass
deleted file mode 100644
index 096b670e12..0000000000
--- a/meta/classes/binconfig-disabled.bbclass
+++ /dev/null
@@ -1,30 +0,0 @@
1#
2# Class to disable binconfig files instead of installing them
3#
4
5# The list of scripts which should be disabled.
6BINCONFIG ?= ""
7
8FILES_${PN}-dev += "${bindir}/*-config"
9
10do_install_append () {
11 for x in ${BINCONFIG}; do
12 # Make the disabled script emit invalid parameters for those configure
13 # scripts which call it without checking the return code.
14 echo "#!/bin/sh" > ${D}$x
15 echo "echo 'ERROR: $x should not be used, use an alternative such as pkg-config' >&2" >> ${D}$x
16 echo "echo '--should-not-have-used-$x'" >> ${D}$x
17 echo "exit 1" >> ${D}$x
18 chmod +x ${D}$x
19 done
20}
21
22SYSROOT_PREPROCESS_FUNCS += "binconfig_disabled_sysroot_preprocess"
23
24binconfig_disabled_sysroot_preprocess () {
25 for x in ${BINCONFIG}; do
26 configname=`basename $x`
27 install -d ${SYSROOT_DESTDIR}${bindir_crossscripts}
28 install ${D}$x ${SYSROOT_DESTDIR}${bindir_crossscripts}
29 done
30}
diff --git a/meta/classes/binconfig.bbclass b/meta/classes/binconfig.bbclass
deleted file mode 100644
index 9112ed4608..0000000000
--- a/meta/classes/binconfig.bbclass
+++ /dev/null
@@ -1,54 +0,0 @@
1FILES_${PN}-dev += "${bindir}/*-config"
2
3# The namespaces can clash here hence the two step replace
4def get_binconfig_mangle(d):
5 s = "-e ''"
6 if not bb.data.inherits_class('native', d):
7 optional_quote = r"\(\"\?\)"
8 s += " -e 's:=%s${base_libdir}:=\\1OEBASELIBDIR:;'" % optional_quote
9 s += " -e 's:=%s${libdir}:=\\1OELIBDIR:;'" % optional_quote
10 s += " -e 's:=%s${includedir}:=\\1OEINCDIR:;'" % optional_quote
11 s += " -e 's:=%s${datadir}:=\\1OEDATADIR:'" % optional_quote
12 s += " -e 's:=%s${prefix}/:=\\1OEPREFIX/:'" % optional_quote
13 s += " -e 's:=%s${exec_prefix}/:=\\1OEEXECPREFIX/:'" % optional_quote
14 s += " -e 's:-L${libdir}:-LOELIBDIR:;'"
15 s += " -e 's:-I${includedir}:-IOEINCDIR:;'"
16 s += " -e 's:-L${WORKDIR}:-LOELIBDIR:'"
17 s += " -e 's:-I${WORKDIR}:-IOEINCDIR:'"
18 s += " -e 's:OEBASELIBDIR:${STAGING_BASELIBDIR}:;'"
19 s += " -e 's:OELIBDIR:${STAGING_LIBDIR}:;'"
20 s += " -e 's:OEINCDIR:${STAGING_INCDIR}:;'"
21 s += " -e 's:OEDATADIR:${STAGING_DATADIR}:'"
22 s += " -e 's:OEPREFIX:${STAGING_DIR_HOST}${prefix}:'"
23 s += " -e 's:OEEXECPREFIX:${STAGING_DIR_HOST}${exec_prefix}:'"
24 if d.getVar("OE_BINCONFIG_EXTRA_MANGLE", False):
25 s += d.getVar("OE_BINCONFIG_EXTRA_MANGLE")
26
27 return s
28
29BINCONFIG_GLOB ?= "*-config"
30
31PACKAGE_PREPROCESS_FUNCS += "binconfig_package_preprocess"
32
33binconfig_package_preprocess () {
34 for config in `find ${PKGD} -type f -name '${BINCONFIG_GLOB}'`; do
35 sed -i \
36 -e 's:${STAGING_BASELIBDIR}:${base_libdir}:g;' \
37 -e 's:${STAGING_LIBDIR}:${libdir}:g;' \
38 -e 's:${STAGING_INCDIR}:${includedir}:g;' \
39 -e 's:${STAGING_DATADIR}:${datadir}:' \
40 -e 's:${STAGING_DIR_HOST}${prefix}:${prefix}:' \
41 $config
42 done
43}
44
45SYSROOT_PREPROCESS_FUNCS += "binconfig_sysroot_preprocess"
46
47binconfig_sysroot_preprocess () {
48 for config in `find ${S} -type f -name '${BINCONFIG_GLOB}'` `find ${B} -type f -name '${BINCONFIG_GLOB}'`; do
49 configname=`basename $config`
50 install -d ${SYSROOT_DESTDIR}${bindir_crossscripts}
51 sed ${@get_binconfig_mangle(d)} $config > ${SYSROOT_DESTDIR}${bindir_crossscripts}/$configname
52 chmod u+x ${SYSROOT_DESTDIR}${bindir_crossscripts}/$configname
53 done
54}
diff --git a/meta/classes/blacklist.bbclass b/meta/classes/blacklist.bbclass
deleted file mode 100644
index dc794228ff..0000000000
--- a/meta/classes/blacklist.bbclass
+++ /dev/null
@@ -1,20 +0,0 @@
1# anonymous support class from originally from angstrom
2#
3# To use the blacklist, a distribution should include this
4# class in the INHERIT_DISTRO
5#
6# No longer use ANGSTROM_BLACKLIST, instead use a table of
7# recipes in PNBLACKLIST
8#
9# Features:
10#
11# * To add a package to the blacklist, set:
12# PNBLACKLIST[pn] = "message"
13#
14
15python () {
16 blacklist = d.getVarFlag('PNBLACKLIST', d.getVar('PN'))
17
18 if blacklist:
19 raise bb.parse.SkipRecipe("Recipe is blacklisted: %s" % (blacklist))
20}
diff --git a/meta/classes/buildhistory.bbclass b/meta/classes/buildhistory.bbclass
index 117a44eaf3..fd53e92402 100644
--- a/meta/classes/buildhistory.bbclass
+++ b/meta/classes/buildhistory.bbclass
@@ -6,8 +6,10 @@
6# Copyright (C) 2011-2016 Intel Corporation 6# Copyright (C) 2011-2016 Intel Corporation
7# Copyright (C) 2007-2011 Koen Kooi <koen@openembedded.org> 7# Copyright (C) 2007-2011 Koen Kooi <koen@openembedded.org>
8# 8#
9# SPDX-License-Identifier: MIT
10#
9 11
10inherit image-artifact-names 12IMAGE_CLASSES += "image-artifact-names"
11 13
12BUILDHISTORY_FEATURES ?= "image package sdk" 14BUILDHISTORY_FEATURES ?= "image package sdk"
13BUILDHISTORY_DIR ?= "${TOPDIR}/buildhistory" 15BUILDHISTORY_DIR ?= "${TOPDIR}/buildhistory"
@@ -31,7 +33,7 @@ BUILDHISTORY_DIR_PACKAGE = "${BUILDHISTORY_DIR}/packages/${MULTIMACH_TARGET_SYS}
31# of failed builds. 33# of failed builds.
32# 34#
33# The expected usage is via auto.conf, but passing via the command line also works 35# The expected usage is via auto.conf, but passing via the command line also works
34# with: BB_ENV_EXTRAWHITE=BUILDHISTORY_RESET BUILDHISTORY_RESET=1 36# with: BB_ENV_PASSTHROUGH_ADDITIONS=BUILDHISTORY_RESET BUILDHISTORY_RESET=1
35BUILDHISTORY_RESET ?= "" 37BUILDHISTORY_RESET ?= ""
36 38
37BUILDHISTORY_OLD_DIR = "${BUILDHISTORY_DIR}/${@ "old" if "${BUILDHISTORY_RESET}" else ""}" 39BUILDHISTORY_OLD_DIR = "${BUILDHISTORY_DIR}/${@ "old" if "${BUILDHISTORY_RESET}" else ""}"
@@ -43,15 +45,16 @@ BUILDHISTORY_COMMIT ?= "1"
43BUILDHISTORY_COMMIT_AUTHOR ?= "buildhistory <buildhistory@${DISTRO}>" 45BUILDHISTORY_COMMIT_AUTHOR ?= "buildhistory <buildhistory@${DISTRO}>"
44BUILDHISTORY_PUSH_REPO ?= "" 46BUILDHISTORY_PUSH_REPO ?= ""
45BUILDHISTORY_TAG ?= "build" 47BUILDHISTORY_TAG ?= "build"
48BUILDHISTORY_PATH_PREFIX_STRIP ?= ""
46 49
47SSTATEPOSTINSTFUNCS_append = " buildhistory_emit_pkghistory" 50SSTATEPOSTINSTFUNCS:append = " buildhistory_emit_pkghistory"
48# We want to avoid influencing the signatures of sstate tasks - first the function itself: 51# We want to avoid influencing the signatures of sstate tasks - first the function itself:
49sstate_install[vardepsexclude] += "buildhistory_emit_pkghistory" 52sstate_install[vardepsexclude] += "buildhistory_emit_pkghistory"
50# then the value added to SSTATEPOSTINSTFUNCS: 53# then the value added to SSTATEPOSTINSTFUNCS:
51SSTATEPOSTINSTFUNCS[vardepvalueexclude] .= "| buildhistory_emit_pkghistory" 54SSTATEPOSTINSTFUNCS[vardepvalueexclude] .= "| buildhistory_emit_pkghistory"
52 55
53# Similarly for our function that gets the output signatures 56# Similarly for our function that gets the output signatures
54SSTATEPOSTUNPACKFUNCS_append = " buildhistory_emit_outputsigs" 57SSTATEPOSTUNPACKFUNCS:append = " buildhistory_emit_outputsigs"
55sstate_installpkgdir[vardepsexclude] += "buildhistory_emit_outputsigs" 58sstate_installpkgdir[vardepsexclude] += "buildhistory_emit_outputsigs"
56SSTATEPOSTUNPACKFUNCS[vardepvalueexclude] .= "| buildhistory_emit_outputsigs" 59SSTATEPOSTUNPACKFUNCS[vardepvalueexclude] .= "| buildhistory_emit_outputsigs"
57 60
@@ -90,13 +93,19 @@ buildhistory_emit_sysroot() {
90python buildhistory_emit_pkghistory() { 93python buildhistory_emit_pkghistory() {
91 if d.getVar('BB_CURRENTTASK') in ['populate_sysroot', 'populate_sysroot_setscene']: 94 if d.getVar('BB_CURRENTTASK') in ['populate_sysroot', 'populate_sysroot_setscene']:
92 bb.build.exec_func("buildhistory_emit_sysroot", d) 95 bb.build.exec_func("buildhistory_emit_sysroot", d)
93
94 if not d.getVar('BB_CURRENTTASK') in ['packagedata', 'packagedata_setscene']:
95 return 0 96 return 0
96 97
97 if not "package" in (d.getVar('BUILDHISTORY_FEATURES') or "").split(): 98 if not "package" in (d.getVar('BUILDHISTORY_FEATURES') or "").split():
98 return 0 99 return 0
99 100
101 if d.getVar('BB_CURRENTTASK') in ['package', 'package_setscene']:
102 # Create files-in-<package-name>.txt files containing a list of files of each recipe's package
103 bb.build.exec_func("buildhistory_list_pkg_files", d)
104 return 0
105
106 if not d.getVar('BB_CURRENTTASK') in ['packagedata', 'packagedata_setscene']:
107 return 0
108
100 import re 109 import re
101 import json 110 import json
102 import shlex 111 import shlex
@@ -286,7 +295,7 @@ python buildhistory_emit_pkghistory() {
286 r = bb.utils.vercmp((pkge, pkgv, pkgr), (last_pkge, last_pkgv, last_pkgr)) 295 r = bb.utils.vercmp((pkge, pkgv, pkgr), (last_pkge, last_pkgv, last_pkgr))
287 if r < 0: 296 if r < 0:
288 msg = "Package version for package %s went backwards which would break package feeds (from %s:%s-%s to %s:%s-%s)" % (pkg, last_pkge, last_pkgv, last_pkgr, pkge, pkgv, pkgr) 297 msg = "Package version for package %s went backwards which would break package feeds (from %s:%s-%s to %s:%s-%s)" % (pkg, last_pkge, last_pkgv, last_pkgr, pkge, pkgv, pkgr)
289 package_qa_handle_error("version-going-backwards", msg, d) 298 oe.qa.handle_error("version-going-backwards", msg, d)
290 299
291 pkginfo = PackageInfo(pkg) 300 pkginfo = PackageInfo(pkg)
292 # Apparently the version can be different on a per-package basis (see Python) 301 # Apparently the version can be different on a per-package basis (see Python)
@@ -318,8 +327,7 @@ python buildhistory_emit_pkghistory() {
318 327
319 write_pkghistory(pkginfo, d) 328 write_pkghistory(pkginfo, d)
320 329
321 # Create files-in-<package-name>.txt files containing a list of files of each recipe's package 330 oe.qa.exit_if_errors(d)
322 bb.build.exec_func("buildhistory_list_pkg_files", d)
323} 331}
324 332
325python buildhistory_emit_outputsigs() { 333python buildhistory_emit_outputsigs() {
@@ -441,11 +449,16 @@ def buildhistory_list_installed(d, rootfs_type="image"):
441 else: 449 else:
442 pkgs = sdk_list_installed_packages(d, rootfs_type == "sdk_target") 450 pkgs = sdk_list_installed_packages(d, rootfs_type == "sdk_target")
443 451
452 if rootfs_type == "sdk_host":
453 pkgdata_dir = d.getVar('PKGDATA_DIR_SDK')
454 else:
455 pkgdata_dir = d.getVar('PKGDATA_DIR')
456
444 for output_type, output_file in process_list: 457 for output_type, output_file in process_list:
445 output_file_full = os.path.join(d.getVar('WORKDIR'), output_file) 458 output_file_full = os.path.join(d.getVar('WORKDIR'), output_file)
446 459
447 with open(output_file_full, 'w') as output: 460 with open(output_file_full, 'w') as output:
448 output.write(format_pkg_list(pkgs, output_type)) 461 output.write(format_pkg_list(pkgs, output_type, pkgdata_dir))
449 462
450python buildhistory_list_installed_image() { 463python buildhistory_list_installed_image() {
451 buildhistory_list_installed(d) 464 buildhistory_list_installed(d)
@@ -486,6 +499,8 @@ buildhistory_get_installed() {
486 -e 's:|: -> :' \ 499 -e 's:|: -> :' \
487 -e 's:"\[REC\]":[style=dotted]:' \ 500 -e 's:"\[REC\]":[style=dotted]:' \
488 -e 's:"\([<>=]\+\)" "\([^"]*\)":[label="\1 \2"]:' \ 501 -e 's:"\([<>=]\+\)" "\([^"]*\)":[label="\1 \2"]:' \
502 -e 's:"\([*]\+\)" "\([^"]*\)":[label="\2"]:' \
503 -e 's:"\[RPROVIDES\]":[style=dashed]:' \
489 $1/depends.tmp 504 $1/depends.tmp
490 # Add header, sorted and de-duped contents and footer and then delete the temp file 505 # Add header, sorted and de-duped contents and footer and then delete the temp file
491 printf "digraph depends {\n node [shape=plaintext]\n" > $1/depends.dot 506 printf "digraph depends {\n node [shape=plaintext]\n" > $1/depends.dot
@@ -493,11 +508,22 @@ buildhistory_get_installed() {
493 echo "}" >> $1/depends.dot 508 echo "}" >> $1/depends.dot
494 rm $1/depends.tmp 509 rm $1/depends.tmp
495 510
511 # Set correct pkgdatadir
512 pkgdatadir=${PKGDATA_DIR}
513 if [ "$2" = "sdk" ] && [ "$3" = "host" ] ; then
514 pkgdatadir="${PKGDATA_DIR_SDK}"
515 fi
516
496 # Produce installed package sizes list 517 # Produce installed package sizes list
497 oe-pkgdata-util -p ${PKGDATA_DIR} read-value "PKGSIZE" -n -f $pkgcache > $1/installed-package-sizes.tmp 518 oe-pkgdata-util -p $pkgdatadir read-value "PKGSIZE" -n -f $pkgcache > $1/installed-package-sizes.tmp
498 cat $1/installed-package-sizes.tmp | awk '{print $2 "\tKiB\t" $1}' | sort -n -r > $1/installed-package-sizes.txt 519 cat $1/installed-package-sizes.tmp | awk '{print $2 "\tKiB\t" $1}' | sort -n -r > $1/installed-package-sizes.txt
499 rm $1/installed-package-sizes.tmp 520 rm $1/installed-package-sizes.tmp
500 521
522 # Produce package info: runtime_name, buildtime_name, recipe, version, size
523 oe-pkgdata-util -p $pkgdatadir read-value "PACKAGE,PN,PV,PKGSIZE" -n -f $pkgcache > $1/installed-package-info.tmp
524 cat $1/installed-package-info.tmp | sort -n -r -k 5 > $1/installed-package-info.txt
525 rm $1/installed-package-info.tmp
526
501 # We're now done with the cache, delete it 527 # We're now done with the cache, delete it
502 rm $pkgcache 528 rm $pkgcache
503 529
@@ -534,7 +560,7 @@ buildhistory_get_sdk_installed() {
534 return 560 return
535 fi 561 fi
536 562
537 buildhistory_get_installed ${BUILDHISTORY_DIR_SDK}/$1 sdk 563 buildhistory_get_installed ${BUILDHISTORY_DIR_SDK}/$1 sdk $1
538} 564}
539 565
540buildhistory_get_sdk_installed_host() { 566buildhistory_get_sdk_installed_host() {
@@ -665,26 +691,29 @@ python buildhistory_get_extra_sdkinfo() {
665 691
666# By using ROOTFS_POSTUNINSTALL_COMMAND we get in after uninstallation of 692# By using ROOTFS_POSTUNINSTALL_COMMAND we get in after uninstallation of
667# unneeded packages but before the removal of packaging files 693# unneeded packages but before the removal of packaging files
668ROOTFS_POSTUNINSTALL_COMMAND += "buildhistory_list_installed_image ;" 694ROOTFS_POSTUNINSTALL_COMMAND += "buildhistory_list_installed_image"
669ROOTFS_POSTUNINSTALL_COMMAND += "buildhistory_get_image_installed ;" 695ROOTFS_POSTUNINSTALL_COMMAND += "buildhistory_get_image_installed"
670ROOTFS_POSTUNINSTALL_COMMAND[vardepvalueexclude] .= "| buildhistory_list_installed_image ;| buildhistory_get_image_installed ;" 696ROOTFS_POSTUNINSTALL_COMMAND[vardepvalueexclude] .= "| buildhistory_list_installed_image| buildhistory_get_image_installed"
671ROOTFS_POSTUNINSTALL_COMMAND[vardepsexclude] += "buildhistory_list_installed_image buildhistory_get_image_installed" 697ROOTFS_POSTUNINSTALL_COMMAND[vardepsexclude] += "buildhistory_list_installed_image buildhistory_get_image_installed"
672 698
673IMAGE_POSTPROCESS_COMMAND += "buildhistory_get_imageinfo ;" 699IMAGE_POSTPROCESS_COMMAND += "buildhistory_get_imageinfo"
674IMAGE_POSTPROCESS_COMMAND[vardepvalueexclude] .= "| buildhistory_get_imageinfo ;" 700IMAGE_POSTPROCESS_COMMAND[vardepvalueexclude] .= "| buildhistory_get_imageinfo"
675IMAGE_POSTPROCESS_COMMAND[vardepsexclude] += "buildhistory_get_imageinfo" 701IMAGE_POSTPROCESS_COMMAND[vardepsexclude] += "buildhistory_get_imageinfo"
676 702
677# We want these to be the last run so that we get called after complementary package installation 703# We want these to be the last run so that we get called after complementary package installation
678POPULATE_SDK_POST_TARGET_COMMAND_append = " buildhistory_list_installed_sdk_target;" 704POPULATE_SDK_POST_TARGET_COMMAND:append = " buildhistory_list_installed_sdk_target"
679POPULATE_SDK_POST_TARGET_COMMAND_append = " buildhistory_get_sdk_installed_target;" 705POPULATE_SDK_POST_TARGET_COMMAND:append = " buildhistory_get_sdk_installed_target"
680POPULATE_SDK_POST_TARGET_COMMAND[vardepvalueexclude] .= "| buildhistory_list_installed_sdk_target;| buildhistory_get_sdk_installed_target;" 706POPULATE_SDK_POST_TARGET_COMMAND[vardepvalueexclude] .= "| buildhistory_list_installed_sdk_target| buildhistory_get_sdk_installed_target"
707POPULATE_SDK_POST_TARGET_COMMAND[vardepsexclude] += "buildhistory_list_installed_sdk_target buildhistory_get_sdk_installed_target"
681 708
682POPULATE_SDK_POST_HOST_COMMAND_append = " buildhistory_list_installed_sdk_host;" 709POPULATE_SDK_POST_HOST_COMMAND:append = " buildhistory_list_installed_sdk_host"
683POPULATE_SDK_POST_HOST_COMMAND_append = " buildhistory_get_sdk_installed_host;" 710POPULATE_SDK_POST_HOST_COMMAND:append = " buildhistory_get_sdk_installed_host"
684POPULATE_SDK_POST_HOST_COMMAND[vardepvalueexclude] .= "| buildhistory_list_installed_sdk_host;| buildhistory_get_sdk_installed_host;" 711POPULATE_SDK_POST_HOST_COMMAND[vardepvalueexclude] .= "| buildhistory_list_installed_sdk_host| buildhistory_get_sdk_installed_host"
712POPULATE_SDK_POST_HOST_COMMAND[vardepsexclude] += "buildhistory_list_installed_sdk_host buildhistory_get_sdk_installed_host"
685 713
686SDK_POSTPROCESS_COMMAND_append = " buildhistory_get_sdkinfo ; buildhistory_get_extra_sdkinfo; " 714SDK_POSTPROCESS_COMMAND:append = " buildhistory_get_sdkinfo buildhistory_get_extra_sdkinfo"
687SDK_POSTPROCESS_COMMAND[vardepvalueexclude] .= "| buildhistory_get_sdkinfo ; buildhistory_get_extra_sdkinfo; " 715SDK_POSTPROCESS_COMMAND[vardepvalueexclude] .= "| buildhistory_get_sdkinfo buildhistory_get_extra_sdkinfo"
716SDK_POSTPROCESS_COMMAND[vardepsexclude] += "buildhistory_get_sdkinfo buildhistory_get_extra_sdkinfo"
688 717
689python buildhistory_write_sigs() { 718python buildhistory_write_sigs() {
690 if not "task" in (d.getVar('BUILDHISTORY_FEATURES') or "").split(): 719 if not "task" in (d.getVar('BUILDHISTORY_FEATURES') or "").split():
@@ -694,7 +723,7 @@ python buildhistory_write_sigs() {
694 if hasattr(bb.parse.siggen, 'dump_siglist'): 723 if hasattr(bb.parse.siggen, 'dump_siglist'):
695 taskoutdir = os.path.join(d.getVar('BUILDHISTORY_DIR'), 'task') 724 taskoutdir = os.path.join(d.getVar('BUILDHISTORY_DIR'), 'task')
696 bb.utils.mkdirhier(taskoutdir) 725 bb.utils.mkdirhier(taskoutdir)
697 bb.parse.siggen.dump_siglist(os.path.join(taskoutdir, 'tasksigs.txt')) 726 bb.parse.siggen.dump_siglist(os.path.join(taskoutdir, 'tasksigs.txt'), d.getVar("BUILDHISTORY_PATH_PREFIX_STRIP"))
698} 727}
699 728
700def buildhistory_get_build_id(d): 729def buildhistory_get_build_id(d):
@@ -714,30 +743,10 @@ def buildhistory_get_build_id(d):
714 statusheader = d.getVar('BUILDCFG_HEADER') 743 statusheader = d.getVar('BUILDCFG_HEADER')
715 return('\n%s\n%s\n' % (statusheader, '\n'.join(statuslines))) 744 return('\n%s\n%s\n' % (statusheader, '\n'.join(statuslines)))
716 745
717def buildhistory_get_modified(path):
718 # copied from get_layer_git_status() in image-buildinfo.bbclass
719 import subprocess
720 try:
721 subprocess.check_output("""cd %s; export PSEUDO_UNLOAD=1; set -e;
722 git diff --quiet --no-ext-diff
723 git diff --quiet --no-ext-diff --cached""" % path,
724 shell=True,
725 stderr=subprocess.STDOUT)
726 return ""
727 except subprocess.CalledProcessError as ex:
728 # Silently treat errors as "modified", without checking for the
729 # (expected) return code 1 in a modified git repo. For example, we get
730 # output and a 129 return code when a layer isn't a git repo at all.
731 return " -- modified"
732
733def buildhistory_get_metadata_revs(d): 746def buildhistory_get_metadata_revs(d):
734 # We want an easily machine-readable format here, so get_layers_branch_rev isn't quite what we want 747 # We want an easily machine-readable format here
735 layers = (d.getVar("BBLAYERS") or "").split() 748 revisions = oe.buildcfg.get_layer_revisions(d)
736 medadata_revs = ["%-17s = %s:%s%s" % (os.path.basename(i), \ 749 medadata_revs = ["%-17s = %s:%s%s" % (r[1], r[2], r[3], r[4]) for r in revisions]
737 base_get_metadata_git_branch(i, None).strip(), \
738 base_get_metadata_git_revision(i, None), \
739 buildhistory_get_modified(i)) \
740 for i in layers]
741 return '\n'.join(medadata_revs) 750 return '\n'.join(medadata_revs)
742 751
743def outputvars(vars, listvars, d): 752def outputvars(vars, listvars, d):
@@ -762,11 +771,11 @@ def buildhistory_get_imagevars(d):
762def buildhistory_get_sdkvars(d): 771def buildhistory_get_sdkvars(d):
763 if d.getVar('BB_WORKERCONTEXT') != '1': 772 if d.getVar('BB_WORKERCONTEXT') != '1':
764 return "" 773 return ""
765 sdkvars = "DISTRO DISTRO_VERSION SDK_NAME SDK_VERSION SDKMACHINE SDKIMAGE_FEATURES BAD_RECOMMENDATIONS NO_RECOMMENDATIONS PACKAGE_EXCLUDE" 774 sdkvars = "DISTRO DISTRO_VERSION SDK_NAME SDK_VERSION SDKMACHINE SDKIMAGE_FEATURES TOOLCHAIN_HOST_TASK TOOLCHAIN_TARGET_TASK BAD_RECOMMENDATIONS NO_RECOMMENDATIONS PACKAGE_EXCLUDE"
766 if d.getVar('BB_CURRENTTASK') == 'populate_sdk_ext': 775 if d.getVar('BB_CURRENTTASK') == 'populate_sdk_ext':
767 # Extensible SDK uses some additional variables 776 # Extensible SDK uses some additional variables
768 sdkvars += " SDK_LOCAL_CONF_WHITELIST SDK_LOCAL_CONF_BLACKLIST SDK_INHERIT_BLACKLIST SDK_UPDATE_URL SDK_EXT_TYPE SDK_RECRDEP_TASKS SDK_INCLUDE_PKGDATA SDK_INCLUDE_TOOLCHAIN" 777 sdkvars += " ESDK_LOCALCONF_ALLOW ESDK_LOCALCONF_REMOVE ESDK_CLASS_INHERIT_DISABLE SDK_UPDATE_URL SDK_EXT_TYPE SDK_RECRDEP_TASKS SDK_INCLUDE_PKGDATA SDK_INCLUDE_TOOLCHAIN"
769 listvars = "SDKIMAGE_FEATURES BAD_RECOMMENDATIONS PACKAGE_EXCLUDE SDK_LOCAL_CONF_WHITELIST SDK_LOCAL_CONF_BLACKLIST SDK_INHERIT_BLACKLIST" 778 listvars = "SDKIMAGE_FEATURES BAD_RECOMMENDATIONS PACKAGE_EXCLUDE ESDK_LOCALCONF_ALLOW ESDK_LOCALCONF_REMOVE ESDK_CLASS_INHERIT_DISABLE"
770 return outputvars(sdkvars, listvars, d) 779 return outputvars(sdkvars, listvars, d)
771 780
772 781
@@ -869,10 +878,11 @@ python buildhistory_eventhandler() {
869 if os.path.isdir(olddir): 878 if os.path.isdir(olddir):
870 shutil.rmtree(olddir) 879 shutil.rmtree(olddir)
871 rootdir = e.data.getVar("BUILDHISTORY_DIR") 880 rootdir = e.data.getVar("BUILDHISTORY_DIR")
881 bb.utils.mkdirhier(rootdir)
872 entries = [ x for x in os.listdir(rootdir) if not x.startswith('.') ] 882 entries = [ x for x in os.listdir(rootdir) if not x.startswith('.') ]
873 bb.utils.mkdirhier(olddir) 883 bb.utils.mkdirhier(olddir)
874 for entry in entries: 884 for entry in entries:
875 os.rename(os.path.join(rootdir, entry), 885 bb.utils.rename(os.path.join(rootdir, entry),
876 os.path.join(olddir, entry)) 886 os.path.join(olddir, entry))
877 elif isinstance(e, bb.event.BuildCompleted): 887 elif isinstance(e, bb.event.BuildCompleted):
878 if reset: 888 if reset:
@@ -911,22 +921,12 @@ def _get_srcrev_values(d):
911 if urldata[u].method.supports_srcrev(): 921 if urldata[u].method.supports_srcrev():
912 scms.append(u) 922 scms.append(u)
913 923
914 autoinc_templ = 'AUTOINC+'
915 dict_srcrevs = {} 924 dict_srcrevs = {}
916 dict_tag_srcrevs = {} 925 dict_tag_srcrevs = {}
917 for scm in scms: 926 for scm in scms:
918 ud = urldata[scm] 927 ud = urldata[scm]
919 for name in ud.names: 928 for name in ud.names:
920 try: 929 autoinc, rev = ud.method.sortable_revision(ud, d, name)
921 rev = ud.method.sortable_revision(ud, d, name)
922 except TypeError:
923 # support old bitbake versions
924 rev = ud.method.sortable_revision(scm, ud, d, name)
925 # Clean this up when we next bump bitbake version
926 if type(rev) != str:
927 autoinc, rev = rev
928 elif rev.startswith(autoinc_templ):
929 rev = rev[len(autoinc_templ):]
930 dict_srcrevs[name] = rev 930 dict_srcrevs[name] = rev
931 if 'tag' in ud.parm: 931 if 'tag' in ud.parm:
932 tag = ud.parm['tag']; 932 tag = ud.parm['tag'];
@@ -957,23 +957,19 @@ def write_latest_srcrev(d, pkghistdir):
957 value = value.replace('"', '').strip() 957 value = value.replace('"', '').strip()
958 old_tag_srcrevs[key] = value 958 old_tag_srcrevs[key] = value
959 with open(srcrevfile, 'w') as f: 959 with open(srcrevfile, 'w') as f:
960 orig_srcrev = d.getVar('SRCREV', False) or 'INVALID' 960 for name, srcrev in sorted(srcrevs.items()):
961 if orig_srcrev != 'INVALID': 961 suffix = "_" + name
962 f.write('# SRCREV = "%s"\n' % orig_srcrev) 962 if name == "default":
963 if len(srcrevs) > 1: 963 suffix = ""
964 for name, srcrev in sorted(srcrevs.items()): 964 orig_srcrev = d.getVar('SRCREV%s' % suffix, False)
965 orig_srcrev = d.getVar('SRCREV_%s' % name, False) 965 if orig_srcrev:
966 if orig_srcrev: 966 f.write('# SRCREV%s = "%s"\n' % (suffix, orig_srcrev))
967 f.write('# SRCREV_%s = "%s"\n' % (name, orig_srcrev)) 967 f.write('SRCREV%s = "%s"\n' % (suffix, srcrev))
968 f.write('SRCREV_%s = "%s"\n' % (name, srcrev)) 968 for name, srcrev in sorted(tag_srcrevs.items()):
969 else: 969 f.write('# tag_%s = "%s"\n' % (name, srcrev))
970 f.write('SRCREV = "%s"\n' % next(iter(srcrevs.values()))) 970 if name in old_tag_srcrevs and old_tag_srcrevs[name] != srcrev:
971 if len(tag_srcrevs) > 0: 971 pkg = d.getVar('PN')
972 for name, srcrev in sorted(tag_srcrevs.items()): 972 bb.warn("Revision for tag %s in package %s was changed since last build (from %s to %s)" % (name, pkg, old_tag_srcrevs[name], srcrev))
973 f.write('# tag_%s = "%s"\n' % (name, srcrev))
974 if name in old_tag_srcrevs and old_tag_srcrevs[name] != srcrev:
975 pkg = d.getVar('PN')
976 bb.warn("Revision for tag %s in package %s was changed since last build (from %s to %s)" % (name, pkg, old_tag_srcrevs[name], srcrev))
977 973
978 else: 974 else:
979 if os.path.exists(srcrevfile): 975 if os.path.exists(srcrevfile):
diff --git a/meta/classes/buildstats-summary.bbclass b/meta/classes/buildstats-summary.bbclass
index f9b241b6c5..12e8f17836 100644
--- a/meta/classes/buildstats-summary.bbclass
+++ b/meta/classes/buildstats-summary.bbclass
@@ -1,3 +1,9 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
1# Summarize sstate usage at the end of the build 7# Summarize sstate usage at the end of the build
2python buildstats_summary () { 8python buildstats_summary () {
3 import collections 9 import collections
diff --git a/meta/classes/buildstats.bbclass b/meta/classes/buildstats.bbclass
deleted file mode 100644
index a8ee6e69a6..0000000000
--- a/meta/classes/buildstats.bbclass
+++ /dev/null
@@ -1,251 +0,0 @@
1BUILDSTATS_BASE = "${TMPDIR}/buildstats/"
2
3################################################################################
4# Build statistics gathering.
5#
6# The CPU and Time gathering/tracking functions and bbevent inspiration
7# were written by Christopher Larson.
8#
9################################################################################
10
11def get_buildprocess_cputime(pid):
12 with open("/proc/%d/stat" % pid, "r") as f:
13 fields = f.readline().rstrip().split()
14 # 13: utime, 14: stime, 15: cutime, 16: cstime
15 return sum(int(field) for field in fields[13:16])
16
17def get_process_cputime(pid):
18 import resource
19 with open("/proc/%d/stat" % pid, "r") as f:
20 fields = f.readline().rstrip().split()
21 stats = {
22 'utime' : fields[13],
23 'stime' : fields[14],
24 'cutime' : fields[15],
25 'cstime' : fields[16],
26 }
27 iostats = {}
28 if os.path.isfile("/proc/%d/io" % pid):
29 with open("/proc/%d/io" % pid, "r") as f:
30 while True:
31 i = f.readline().strip()
32 if not i:
33 break
34 if not ":" in i:
35 # one more extra line is appended (empty or containing "0")
36 # most probably due to race condition in kernel while
37 # updating IO stats
38 break
39 i = i.split(": ")
40 iostats[i[0]] = i[1]
41 resources = resource.getrusage(resource.RUSAGE_SELF)
42 childres = resource.getrusage(resource.RUSAGE_CHILDREN)
43 return stats, iostats, resources, childres
44
45def get_cputime():
46 with open("/proc/stat", "r") as f:
47 fields = f.readline().rstrip().split()[1:]
48 return sum(int(field) for field in fields)
49
50def set_timedata(var, d, server_time):
51 d.setVar(var, server_time)
52
53def get_timedata(var, d, end_time):
54 oldtime = d.getVar(var, False)
55 if oldtime is None:
56 return
57 return end_time - oldtime
58
59def set_buildtimedata(var, d):
60 import time
61 time = time.time()
62 cputime = get_cputime()
63 proctime = get_buildprocess_cputime(os.getpid())
64 d.setVar(var, (time, cputime, proctime))
65
66def get_buildtimedata(var, d):
67 import time
68 timedata = d.getVar(var, False)
69 if timedata is None:
70 return
71 oldtime, oldcpu, oldproc = timedata
72 procdiff = get_buildprocess_cputime(os.getpid()) - oldproc
73 cpudiff = get_cputime() - oldcpu
74 end_time = time.time()
75 timediff = end_time - oldtime
76 if cpudiff > 0:
77 cpuperc = float(procdiff) * 100 / cpudiff
78 else:
79 cpuperc = None
80 return timediff, cpuperc
81
82def write_task_data(status, logfile, e, d):
83 with open(os.path.join(logfile), "a") as f:
84 elapsedtime = get_timedata("__timedata_task", d, e.time)
85 if elapsedtime:
86 f.write(d.expand("${PF}: %s\n" % e.task))
87 f.write(d.expand("Elapsed time: %0.2f seconds\n" % elapsedtime))
88 cpu, iostats, resources, childres = get_process_cputime(os.getpid())
89 if cpu:
90 f.write("utime: %s\n" % cpu['utime'])
91 f.write("stime: %s\n" % cpu['stime'])
92 f.write("cutime: %s\n" % cpu['cutime'])
93 f.write("cstime: %s\n" % cpu['cstime'])
94 for i in iostats:
95 f.write("IO %s: %s\n" % (i, iostats[i]))
96 rusages = ["ru_utime", "ru_stime", "ru_maxrss", "ru_minflt", "ru_majflt", "ru_inblock", "ru_oublock", "ru_nvcsw", "ru_nivcsw"]
97 for i in rusages:
98 f.write("rusage %s: %s\n" % (i, getattr(resources, i)))
99 for i in rusages:
100 f.write("Child rusage %s: %s\n" % (i, getattr(childres, i)))
101 if status == "passed":
102 f.write("Status: PASSED \n")
103 else:
104 f.write("Status: FAILED \n")
105 f.write("Ended: %0.2f \n" % e.time)
106
107def write_host_data(logfile, e, d):
108 import subprocess, os, datetime
109 cmds = d.getVar('BB_LOG_HOST_STAT_CMDS')
110 if cmds is None:
111 d.setVar("BB_LOG_HOST_STAT_ON_INTERVAL", "0")
112 d.setVar("BB_LOG_HOST_STAT_ON_FAILURE", "0")
113 bb.warn("buildstats: Collecting host data failed. Set BB_LOG_HOST_STAT_CMDS=\"command1 ; command2 ; ... \" in conf\/local.conf\n")
114 return
115 path = d.getVar("PATH")
116 opath = d.getVar("BB_ORIGENV", False).getVar("PATH")
117 ospath = os.environ['PATH']
118 os.environ['PATH'] = path + ":" + opath + ":" + ospath
119 with open(logfile, "a") as f:
120 f.write("Event Time: %f\nDate: %s\n" % (e.time, datetime.datetime.now()))
121 for cmd in cmds.split(";"):
122 if len(cmd) == 0:
123 continue
124 try:
125 output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT, timeout=1).decode('utf-8')
126 except (subprocess.CalledProcessError, subprocess.TimeoutExpired, FileNotFoundError) as err:
127 output = "Error running command: %s\n%s\n" % (cmd, err)
128 f.write("%s\n%s\n" % (cmd, output))
129 os.environ['PATH'] = ospath
130
131python run_buildstats () {
132 import bb.build
133 import bb.event
134 import time, subprocess, platform
135
136 bn = d.getVar('BUILDNAME')
137 ########################################################################
138 # bitbake fires HeartbeatEvent even before a build has been
139 # triggered, causing BUILDNAME to be None
140 ########################################################################
141 if bn is not None:
142 bsdir = os.path.join(d.getVar('BUILDSTATS_BASE'), bn)
143 taskdir = os.path.join(bsdir, d.getVar('PF'))
144 if isinstance(e, bb.event.HeartbeatEvent) and bb.utils.to_boolean(d.getVar("BB_LOG_HOST_STAT_ON_INTERVAL")):
145 bb.utils.mkdirhier(bsdir)
146 write_host_data(os.path.join(bsdir, "host_stats"), e, d)
147
148 if isinstance(e, bb.event.BuildStarted):
149 ########################################################################
150 # If the kernel was not configured to provide I/O statistics, issue
151 # a one time warning.
152 ########################################################################
153 if not os.path.isfile("/proc/%d/io" % os.getpid()):
154 bb.warn("The Linux kernel on your build host was not configured to provide process I/O statistics. (CONFIG_TASK_IO_ACCOUNTING is not set)")
155
156 ########################################################################
157 # at first pass make the buildstats hierarchy and then
158 # set the buildname
159 ########################################################################
160 bb.utils.mkdirhier(bsdir)
161 set_buildtimedata("__timedata_build", d)
162 build_time = os.path.join(bsdir, "build_stats")
163 # write start of build into build_time
164 with open(build_time, "a") as f:
165 host_info = platform.uname()
166 f.write("Host Info: ")
167 for x in host_info:
168 if x:
169 f.write(x + " ")
170 f.write("\n")
171 f.write("Build Started: %0.2f \n" % d.getVar('__timedata_build', False)[0])
172
173 elif isinstance(e, bb.event.BuildCompleted):
174 build_time = os.path.join(bsdir, "build_stats")
175 with open(build_time, "a") as f:
176 ########################################################################
177 # Write build statistics for the build
178 ########################################################################
179 timedata = get_buildtimedata("__timedata_build", d)
180 if timedata:
181 time, cpu = timedata
182 # write end of build and cpu used into build_time
183 f.write("Elapsed time: %0.2f seconds \n" % (time))
184 if cpu:
185 f.write("CPU usage: %0.1f%% \n" % cpu)
186
187 if isinstance(e, bb.build.TaskStarted):
188 set_timedata("__timedata_task", d, e.time)
189 bb.utils.mkdirhier(taskdir)
190 # write into the task event file the name and start time
191 with open(os.path.join(taskdir, e.task), "a") as f:
192 f.write("Event: %s \n" % bb.event.getName(e))
193 f.write("Started: %0.2f \n" % e.time)
194
195 elif isinstance(e, bb.build.TaskSucceeded):
196 write_task_data("passed", os.path.join(taskdir, e.task), e, d)
197 if e.task == "do_rootfs":
198 bs = os.path.join(bsdir, "build_stats")
199 with open(bs, "a") as f:
200 rootfs = d.getVar('IMAGE_ROOTFS')
201 if os.path.isdir(rootfs):
202 try:
203 rootfs_size = subprocess.check_output(["du", "-sh", rootfs],
204 stderr=subprocess.STDOUT).decode('utf-8')
205 f.write("Uncompressed Rootfs size: %s" % rootfs_size)
206 except subprocess.CalledProcessError as err:
207 bb.warn("Failed to get rootfs size: %s" % err.output.decode('utf-8'))
208
209 elif isinstance(e, bb.build.TaskFailed):
210 # Can have a failure before TaskStarted so need to mkdir here too
211 bb.utils.mkdirhier(taskdir)
212 write_task_data("failed", os.path.join(taskdir, e.task), e, d)
213 ########################################################################
214 # Lets make things easier and tell people where the build failed in
215 # build_status. We do this here because BuildCompleted triggers no
216 # matter what the status of the build actually is
217 ########################################################################
218 build_status = os.path.join(bsdir, "build_stats")
219 with open(build_status, "a") as f:
220 f.write(d.expand("Failed at: ${PF} at task: %s \n" % e.task))
221 if bb.utils.to_boolean(d.getVar("BB_LOG_HOST_STAT_ON_FAILURE")):
222 write_host_data(build_status, e, d)
223}
224
225addhandler run_buildstats
226run_buildstats[eventmask] = "bb.event.BuildStarted bb.event.BuildCompleted bb.event.HeartbeatEvent bb.build.TaskStarted bb.build.TaskSucceeded bb.build.TaskFailed"
227
228python runqueue_stats () {
229 import buildstats
230 from bb import event, runqueue
231 # We should not record any samples before the first task has started,
232 # because that's the first activity shown in the process chart.
233 # Besides, at that point we are sure that the build variables
234 # are available that we need to find the output directory.
235 # The persistent SystemStats is stored in the datastore and
236 # closed when the build is done.
237 system_stats = d.getVar('_buildstats_system_stats', False)
238 if not system_stats and isinstance(e, (bb.runqueue.sceneQueueTaskStarted, bb.runqueue.runQueueTaskStarted)):
239 system_stats = buildstats.SystemStats(d)
240 d.setVar('_buildstats_system_stats', system_stats)
241 if system_stats:
242 # Ensure that we sample at important events.
243 done = isinstance(e, bb.event.BuildCompleted)
244 system_stats.sample(e, force=done)
245 if done:
246 system_stats.close()
247 d.delVar('_buildstats_system_stats')
248}
249
250addhandler runqueue_stats
251runqueue_stats[eventmask] = "bb.runqueue.sceneQueueTaskStarted bb.runqueue.runQueueTaskStarted bb.event.HeartbeatEvent bb.event.BuildCompleted bb.event.MonitorDiskEvent"
diff --git a/meta/classes/ccache.bbclass b/meta/classes/ccache.bbclass
index 4532894c57..262db6672c 100644
--- a/meta/classes/ccache.bbclass
+++ b/meta/classes/ccache.bbclass
@@ -1,4 +1,10 @@
1# 1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7#
2# Usage: 8# Usage:
3# - Enable ccache 9# - Enable ccache
4# Add the following line to a conffile such as conf/local.conf: 10# Add the following line to a conffile such as conf/local.conf:
@@ -22,6 +28,11 @@
22# be shared between different builds. 28# be shared between different builds.
23CCACHE_TOP_DIR ?= "${TMPDIR}/ccache" 29CCACHE_TOP_DIR ?= "${TMPDIR}/ccache"
24 30
31# ccache-native and cmake-native have a circular dependency
32# that affects other native recipes, but not all.
33# Allows to use ccache in specified native recipes.
34CCACHE_NATIVE_RECIPES_ALLOWED ?= ""
35
25# ccahe removes CCACHE_BASEDIR from file path, so that hashes will be the same 36# ccahe removes CCACHE_BASEDIR from file path, so that hashes will be the same
26# in different builds. 37# in different builds.
27export CCACHE_BASEDIR ?= "${TMPDIR}" 38export CCACHE_BASEDIR ?= "${TMPDIR}"
@@ -48,9 +59,9 @@ python() {
48 Enable ccache for the recipe 59 Enable ccache for the recipe
49 """ 60 """
50 pn = d.getVar('PN') 61 pn = d.getVar('PN')
51 # quilt-native doesn't need ccache since no c files 62 if (pn in d.getVar('CCACHE_NATIVE_RECIPES_ALLOWED') or
52 if not (bb.data.inherits_class("native", d) or 63 not (bb.data.inherits_class("native", d) or
53 bb.utils.to_boolean(d.getVar('CCACHE_DISABLE'))): 64 bb.utils.to_boolean(d.getVar('CCACHE_DISABLE')))):
54 d.appendVar('DEPENDS', ' ccache-native') 65 d.appendVar('DEPENDS', ' ccache-native')
55 d.setVar('CCACHE', 'ccache ') 66 d.setVar('CCACHE', 'ccache ')
56} 67}
diff --git a/meta/classes/ccmake.bbclass b/meta/classes/ccmake.bbclass
index df5134a108..c5b4bf6260 100644
--- a/meta/classes/ccmake.bbclass
+++ b/meta/classes/ccmake.bbclass
@@ -1,3 +1,9 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
1inherit terminal 7inherit terminal
2 8
3python do_ccmake() { 9python do_ccmake() {
diff --git a/meta/classes/chrpath.bbclass b/meta/classes/chrpath.bbclass
index 26b984c4db..16729dcf61 100644
--- a/meta/classes/chrpath.bbclass
+++ b/meta/classes/chrpath.bbclass
@@ -1,3 +1,9 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
1CHRPATH_BIN ?= "chrpath" 7CHRPATH_BIN ?= "chrpath"
2PREPROCESS_RELOCATE_DIRS ?= "" 8PREPROCESS_RELOCATE_DIRS ?= ""
3 9
@@ -56,7 +62,7 @@ def process_file_linux(cmd, fpath, rootdir, baseprefix, tmpdir, d, break_hardlin
56def process_file_darwin(cmd, fpath, rootdir, baseprefix, tmpdir, d, break_hardlinks = False): 62def process_file_darwin(cmd, fpath, rootdir, baseprefix, tmpdir, d, break_hardlinks = False):
57 import subprocess as sub 63 import subprocess as sub
58 64
59 p = sub.Popen([d.expand("${HOST_PREFIX}otool"), '-L', fpath],stdout=sub.PIPE,stderr=sub.PIPE) 65 p = sub.Popen([d.expand("${HOST_PREFIX}otool"), '-L', fpath],stdout=sub.PIPE,stderr=sub.PIPE, text=True)
60 out, err = p.communicate() 66 out, err = p.communicate()
61 # If returned successfully, process stdout for results 67 # If returned successfully, process stdout for results
62 if p.returncode != 0: 68 if p.returncode != 0:
diff --git a/meta/classes/clutter.bbclass b/meta/classes/clutter.bbclass
deleted file mode 100644
index 24b53a13e4..0000000000
--- a/meta/classes/clutter.bbclass
+++ /dev/null
@@ -1,18 +0,0 @@
1def get_minor_dir(v):
2 import re
3 m = re.match(r"^([0-9]+)\.([0-9]+)", v)
4 return "%s.%s" % (m.group(1), m.group(2))
5
6def get_real_name(n):
7 import re
8 m = re.match(r"^([a-z]+(-[a-z]+)?)(-[0-9]+\.[0-9]+)?", n)
9 return "%s" % (m.group(1))
10
11VERMINOR = "${@get_minor_dir("${PV}")}"
12REALNAME = "${@get_real_name("${BPN}")}"
13
14SRC_URI = "${GNOME_MIRROR}/${REALNAME}/${VERMINOR}/${REALNAME}-${PV}.tar.xz;name=archive"
15S = "${WORKDIR}/${REALNAME}-${PV}"
16
17CLUTTERBASEBUILDCLASS ??= "autotools"
18inherit ${CLUTTERBASEBUILDCLASS} pkgconfig gtk-doc gettext
diff --git a/meta/classes/cmake.bbclass b/meta/classes/cmake.bbclass
deleted file mode 100644
index 4af22268b7..0000000000
--- a/meta/classes/cmake.bbclass
+++ /dev/null
@@ -1,217 +0,0 @@
1# Path to the CMake file to process.
2OECMAKE_SOURCEPATH ??= "${S}"
3
4DEPENDS_prepend = "cmake-native "
5B = "${WORKDIR}/build"
6
7# What CMake generator to use.
8# The supported options are "Unix Makefiles" or "Ninja".
9OECMAKE_GENERATOR ?= "Ninja"
10
11python() {
12 generator = d.getVar("OECMAKE_GENERATOR")
13 if "Unix Makefiles" in generator:
14 args = "-G '" + generator + "' -DCMAKE_MAKE_PROGRAM=" + d.getVar("MAKE")
15 d.setVar("OECMAKE_GENERATOR_ARGS", args)
16 d.setVarFlag("do_compile", "progress", "percent")
17 elif "Ninja" in generator:
18 args = "-G '" + generator + "' -DCMAKE_MAKE_PROGRAM=ninja"
19 d.appendVar("DEPENDS", " ninja-native")
20 d.setVar("OECMAKE_GENERATOR_ARGS", args)
21 d.setVarFlag("do_compile", "progress", r"outof:^\[(\d+)/(\d+)\]\s+")
22 else:
23 bb.fatal("Unknown CMake Generator %s" % generator)
24}
25OECMAKE_AR ?= "${AR}"
26
27# Compiler flags
28OECMAKE_C_FLAGS ?= "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS} ${CFLAGS}"
29OECMAKE_CXX_FLAGS ?= "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS} ${CXXFLAGS}"
30OECMAKE_C_FLAGS_RELEASE ?= "-DNDEBUG"
31OECMAKE_CXX_FLAGS_RELEASE ?= "-DNDEBUG"
32OECMAKE_C_LINK_FLAGS ?= "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS} ${CPPFLAGS} ${LDFLAGS}"
33OECMAKE_CXX_LINK_FLAGS ?= "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS} ${CXXFLAGS} ${LDFLAGS}"
34CXXFLAGS += "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS}"
35CFLAGS += "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS}"
36
37def oecmake_map_compiler(compiler, d):
38 args = d.getVar(compiler).split()
39 if args[0] == "ccache":
40 return args[1], args[0]
41 return args[0], ""
42
43# C/C++ Compiler (without cpu arch/tune arguments)
44OECMAKE_C_COMPILER ?= "${@oecmake_map_compiler('CC', d)[0]}"
45OECMAKE_C_COMPILER_LAUNCHER ?= "${@oecmake_map_compiler('CC', d)[1]}"
46OECMAKE_CXX_COMPILER ?= "${@oecmake_map_compiler('CXX', d)[0]}"
47OECMAKE_CXX_COMPILER_LAUNCHER ?= "${@oecmake_map_compiler('CXX', d)[1]}"
48
49# clear compiler vars for allarch to avoid sig hash difference
50OECMAKE_C_COMPILER_allarch = ""
51OECMAKE_C_COMPILER_LAUNCHER_allarch = ""
52OECMAKE_CXX_COMPILER_allarch = ""
53OECMAKE_CXX_COMPILER_LAUNCHER_allarch = ""
54
55OECMAKE_RPATH ?= ""
56OECMAKE_PERLNATIVE_DIR ??= ""
57OECMAKE_EXTRA_ROOT_PATH ?= ""
58
59OECMAKE_FIND_ROOT_PATH_MODE_PROGRAM = "ONLY"
60OECMAKE_FIND_ROOT_PATH_MODE_PROGRAM_class-native = "BOTH"
61
62EXTRA_OECMAKE_append = " ${PACKAGECONFIG_CONFARGS}"
63
64export CMAKE_BUILD_PARALLEL_LEVEL
65CMAKE_BUILD_PARALLEL_LEVEL_task-compile = "${@oe.utils.parallel_make(d, False)}"
66CMAKE_BUILD_PARALLEL_LEVEL_task-install = "${@oe.utils.parallel_make(d, True)}"
67
68OECMAKE_TARGET_COMPILE ?= "all"
69OECMAKE_TARGET_INSTALL ?= "install"
70
71def map_host_os_to_system_name(host_os):
72 if host_os.startswith('mingw'):
73 return 'Windows'
74 if host_os.startswith('linux'):
75 return 'Linux'
76 return host_os
77
78# CMake expects target architectures in the format of uname(2),
79# which do not always match TARGET_ARCH, so all the necessary
80# conversions should happen here.
81def map_host_arch_to_uname_arch(host_arch):
82 if host_arch == "powerpc":
83 return "ppc"
84 if host_arch == "powerpc64":
85 return "ppc64"
86 return host_arch
87
88cmake_do_generate_toolchain_file() {
89 if [ "${BUILD_SYS}" = "${HOST_SYS}" ]; then
90 cmake_crosscompiling="set( CMAKE_CROSSCOMPILING FALSE )"
91 fi
92 cat > ${WORKDIR}/toolchain.cmake <<EOF
93# CMake system name must be something like "Linux".
94# This is important for cross-compiling.
95$cmake_crosscompiling
96set( CMAKE_SYSTEM_NAME ${@map_host_os_to_system_name(d.getVar('HOST_OS'))} )
97set( CMAKE_SYSTEM_PROCESSOR ${@map_host_arch_to_uname_arch(d.getVar('HOST_ARCH'))} )
98set( CMAKE_C_COMPILER ${OECMAKE_C_COMPILER} )
99set( CMAKE_CXX_COMPILER ${OECMAKE_CXX_COMPILER} )
100set( CMAKE_C_COMPILER_LAUNCHER ${OECMAKE_C_COMPILER_LAUNCHER} )
101set( CMAKE_CXX_COMPILER_LAUNCHER ${OECMAKE_CXX_COMPILER_LAUNCHER} )
102set( CMAKE_ASM_COMPILER ${OECMAKE_C_COMPILER} )
103set( CMAKE_AR ${OECMAKE_AR} CACHE FILEPATH "Archiver" )
104set( CMAKE_C_FLAGS "${OECMAKE_C_FLAGS}" CACHE STRING "CFLAGS" )
105set( CMAKE_CXX_FLAGS "${OECMAKE_CXX_FLAGS}" CACHE STRING "CXXFLAGS" )
106set( CMAKE_ASM_FLAGS "${OECMAKE_C_FLAGS}" CACHE STRING "ASM FLAGS" )
107set( CMAKE_C_FLAGS_RELEASE "${OECMAKE_C_FLAGS_RELEASE}" CACHE STRING "Additional CFLAGS for release" )
108set( CMAKE_CXX_FLAGS_RELEASE "${OECMAKE_CXX_FLAGS_RELEASE}" CACHE STRING "Additional CXXFLAGS for release" )
109set( CMAKE_ASM_FLAGS_RELEASE "${OECMAKE_C_FLAGS_RELEASE}" CACHE STRING "Additional ASM FLAGS for release" )
110set( CMAKE_C_LINK_FLAGS "${OECMAKE_C_LINK_FLAGS}" CACHE STRING "LDFLAGS" )
111set( CMAKE_CXX_LINK_FLAGS "${OECMAKE_CXX_LINK_FLAGS}" CACHE STRING "LDFLAGS" )
112
113# only search in the paths provided so cmake doesnt pick
114# up libraries and tools from the native build machine
115set( CMAKE_FIND_ROOT_PATH ${STAGING_DIR_HOST} ${STAGING_DIR_NATIVE} ${CROSS_DIR} ${OECMAKE_PERLNATIVE_DIR} ${OECMAKE_EXTRA_ROOT_PATH} ${EXTERNAL_TOOLCHAIN} ${HOSTTOOLS_DIR})
116set( CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY )
117set( CMAKE_FIND_ROOT_PATH_MODE_PROGRAM ${OECMAKE_FIND_ROOT_PATH_MODE_PROGRAM} )
118set( CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY )
119set( CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY )
120set( CMAKE_PROGRAM_PATH "/" )
121
122# Use qt.conf settings
123set( ENV{QT_CONF_PATH} ${WORKDIR}/qt.conf )
124
125# We need to set the rpath to the correct directory as cmake does not provide any
126# directory as rpath by default
127set( CMAKE_INSTALL_RPATH ${OECMAKE_RPATH} )
128
129# Use RPATHs relative to build directory for reproducibility
130set( CMAKE_BUILD_RPATH_USE_ORIGIN ON )
131
132# Use our cmake modules
133list(APPEND CMAKE_MODULE_PATH "${STAGING_DATADIR}/cmake/Modules/")
134
135# add for non /usr/lib libdir, e.g. /usr/lib64
136set( CMAKE_LIBRARY_PATH ${libdir} ${base_libdir})
137
138# add include dir to implicit includes in case it differs from /usr/include
139list(APPEND CMAKE_C_IMPLICIT_INCLUDE_DIRECTORIES ${includedir})
140list(APPEND CMAKE_CXX_IMPLICIT_INCLUDE_DIRECTORIES ${includedir})
141
142EOF
143}
144
145addtask generate_toolchain_file after do_patch before do_configure
146
147CONFIGURE_FILES = "CMakeLists.txt"
148
149cmake_do_configure() {
150 if [ "${OECMAKE_BUILDPATH}" ]; then
151 bbnote "cmake.bbclass no longer uses OECMAKE_BUILDPATH. The default behaviour is now out-of-tree builds with B=WORKDIR/build."
152 fi
153
154 if [ "${S}" != "${B}" ]; then
155 rm -rf ${B}
156 mkdir -p ${B}
157 cd ${B}
158 else
159 find ${B} -name CMakeFiles -or -name Makefile -or -name cmake_install.cmake -or -name CMakeCache.txt -delete
160 fi
161
162 # Just like autotools cmake can use a site file to cache result that need generated binaries to run
163 if [ -e ${WORKDIR}/site-file.cmake ] ; then
164 oecmake_sitefile="-C ${WORKDIR}/site-file.cmake"
165 else
166 oecmake_sitefile=
167 fi
168
169 cmake \
170 ${OECMAKE_GENERATOR_ARGS} \
171 $oecmake_sitefile \
172 ${OECMAKE_SOURCEPATH} \
173 -DCMAKE_INSTALL_PREFIX:PATH=${prefix} \
174 -DCMAKE_INSTALL_BINDIR:PATH=${@os.path.relpath(d.getVar('bindir'), d.getVar('prefix') + '/')} \
175 -DCMAKE_INSTALL_SBINDIR:PATH=${@os.path.relpath(d.getVar('sbindir'), d.getVar('prefix') + '/')} \
176 -DCMAKE_INSTALL_LIBEXECDIR:PATH=${@os.path.relpath(d.getVar('libexecdir'), d.getVar('prefix') + '/')} \
177 -DCMAKE_INSTALL_SYSCONFDIR:PATH=${sysconfdir} \
178 -DCMAKE_INSTALL_SHAREDSTATEDIR:PATH=${@os.path.relpath(d.getVar('sharedstatedir'), d. getVar('prefix') + '/')} \
179 -DCMAKE_INSTALL_LOCALSTATEDIR:PATH=${localstatedir} \
180 -DCMAKE_INSTALL_LIBDIR:PATH=${@os.path.relpath(d.getVar('libdir'), d.getVar('prefix') + '/')} \
181 -DCMAKE_INSTALL_INCLUDEDIR:PATH=${@os.path.relpath(d.getVar('includedir'), d.getVar('prefix') + '/')} \
182 -DCMAKE_INSTALL_DATAROOTDIR:PATH=${@os.path.relpath(d.getVar('datadir'), d.getVar('prefix') + '/')} \
183 -DPYTHON_EXECUTABLE:PATH=${PYTHON} \
184 -DPython_EXECUTABLE:PATH=${PYTHON} \
185 -DPython3_EXECUTABLE:PATH=${PYTHON} \
186 -DLIB_SUFFIX=${@d.getVar('baselib').replace('lib', '')} \
187 -DCMAKE_INSTALL_SO_NO_EXE=0 \
188 -DCMAKE_TOOLCHAIN_FILE=${WORKDIR}/toolchain.cmake \
189 -DCMAKE_NO_SYSTEM_FROM_IMPORTED=1 \
190 -DCMAKE_EXPORT_NO_PACKAGE_REGISTRY=ON \
191 ${EXTRA_OECMAKE} \
192 -Wno-dev
193}
194
195# To disable verbose cmake logs for a given recipe or globally config metadata e.g. local.conf
196# add following
197#
198# CMAKE_VERBOSE = ""
199#
200
201CMAKE_VERBOSE ??= "VERBOSE=1"
202
203# Then run do_compile again
204cmake_runcmake_build() {
205 bbnote ${DESTDIR:+DESTDIR=${DESTDIR} }${CMAKE_VERBOSE} cmake --build '${B}' "$@" -- ${EXTRA_OECMAKE_BUILD}
206 eval ${DESTDIR:+DESTDIR=${DESTDIR} }${CMAKE_VERBOSE} cmake --build '${B}' "$@" -- ${EXTRA_OECMAKE_BUILD}
207}
208
209cmake_do_compile() {
210 cmake_runcmake_build --target ${OECMAKE_TARGET_COMPILE}
211}
212
213cmake_do_install() {
214 DESTDIR='${D}' cmake_runcmake_build --target ${OECMAKE_TARGET_INSTALL}
215}
216
217EXPORT_FUNCTIONS do_configure do_compile do_install do_generate_toolchain_file
diff --git a/meta/classes/cml1.bbclass b/meta/classes/cml1.bbclass
deleted file mode 100644
index d319d66ab2..0000000000
--- a/meta/classes/cml1.bbclass
+++ /dev/null
@@ -1,101 +0,0 @@
1# returns all the elements from the src uri that are .cfg files
2def find_cfgs(d):
3 sources=src_patches(d, True)
4 sources_list=[]
5 for s in sources:
6 if s.endswith('.cfg'):
7 sources_list.append(s)
8
9 return sources_list
10
11cml1_do_configure() {
12 set -e
13 unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS
14 yes '' | oe_runmake oldconfig
15}
16
17EXPORT_FUNCTIONS do_configure
18addtask configure after do_unpack do_patch before do_compile
19
20inherit terminal
21
22OE_TERMINAL_EXPORTS += "HOST_EXTRACFLAGS HOSTLDFLAGS TERMINFO CROSS_CURSES_LIB CROSS_CURSES_INC"
23HOST_EXTRACFLAGS = "${BUILD_CFLAGS} ${BUILD_LDFLAGS}"
24HOSTLDFLAGS = "${BUILD_LDFLAGS}"
25CROSS_CURSES_LIB = "-lncurses -ltinfo"
26CROSS_CURSES_INC = '-DCURSES_LOC="<curses.h>"'
27TERMINFO = "${STAGING_DATADIR_NATIVE}/terminfo"
28
29KCONFIG_CONFIG_COMMAND ??= "menuconfig"
30KCONFIG_CONFIG_ROOTDIR ??= "${B}"
31python do_menuconfig() {
32 import shutil
33
34 config = os.path.join(d.getVar('KCONFIG_CONFIG_ROOTDIR'), ".config")
35 configorig = os.path.join(d.getVar('KCONFIG_CONFIG_ROOTDIR'), ".config.orig")
36
37 try:
38 mtime = os.path.getmtime(config)
39 shutil.copy(config, configorig)
40 except OSError:
41 mtime = 0
42
43 # setup native pkg-config variables (kconfig scripts call pkg-config directly, cannot generically be overriden to pkg-config-native)
44 d.setVar("PKG_CONFIG_DIR", "${STAGING_DIR_NATIVE}${libdir_native}/pkgconfig")
45 d.setVar("PKG_CONFIG_PATH", "${PKG_CONFIG_DIR}:${STAGING_DATADIR_NATIVE}/pkgconfig")
46 d.setVar("PKG_CONFIG_LIBDIR", "${PKG_CONFIG_DIR}")
47 d.setVarFlag("PKG_CONFIG_SYSROOT_DIR", "unexport", "1")
48 # ensure that environment variables are overwritten with this tasks 'd' values
49 d.appendVar("OE_TERMINAL_EXPORTS", " PKG_CONFIG_DIR PKG_CONFIG_PATH PKG_CONFIG_LIBDIR PKG_CONFIG_SYSROOT_DIR")
50
51 oe_terminal("sh -c \"make %s; if [ \\$? -ne 0 ]; then echo 'Command failed.'; printf 'Press any key to continue... '; read r; fi\"" % d.getVar('KCONFIG_CONFIG_COMMAND'),
52 d.getVar('PN') + ' Configuration', d)
53
54 # FIXME this check can be removed when the minimum bitbake version has been bumped
55 if hasattr(bb.build, 'write_taint'):
56 try:
57 newmtime = os.path.getmtime(config)
58 except OSError:
59 newmtime = 0
60
61 if newmtime > mtime:
62 bb.note("Configuration changed, recompile will be forced")
63 bb.build.write_taint('do_compile', d)
64}
65do_menuconfig[depends] += "ncurses-native:do_populate_sysroot"
66do_menuconfig[nostamp] = "1"
67do_menuconfig[dirs] = "${KCONFIG_CONFIG_ROOTDIR}"
68addtask menuconfig after do_configure
69
70python do_diffconfig() {
71 import shutil
72 import subprocess
73
74 workdir = d.getVar('WORKDIR')
75 fragment = workdir + '/fragment.cfg'
76 configorig = os.path.join(d.getVar('KCONFIG_CONFIG_ROOTDIR'), ".config.orig")
77 config = os.path.join(d.getVar('KCONFIG_CONFIG_ROOTDIR'), ".config")
78
79 try:
80 md5newconfig = bb.utils.md5_file(configorig)
81 md5config = bb.utils.md5_file(config)
82 isdiff = md5newconfig != md5config
83 except IOError as e:
84 bb.fatal("No config files found. Did you do menuconfig ?\n%s" % e)
85
86 if isdiff:
87 statement = 'diff --unchanged-line-format= --old-line-format= --new-line-format="%L" ' + configorig + ' ' + config + '>' + fragment
88 subprocess.call(statement, shell=True)
89 # No need to check the exit code as we know it's going to be
90 # non-zero, but that's what we expect.
91 shutil.copy(configorig, config)
92
93 bb.plain("Config fragment has been dumped into:\n %s" % fragment)
94 else:
95 if os.path.exists(fragment):
96 os.unlink(fragment)
97}
98
99do_diffconfig[nostamp] = "1"
100do_diffconfig[dirs] = "${KCONFIG_CONFIG_ROOTDIR}"
101addtask diffconfig
diff --git a/meta/classes/compress_doc.bbclass b/meta/classes/compress_doc.bbclass
deleted file mode 100644
index d6d11fad26..0000000000
--- a/meta/classes/compress_doc.bbclass
+++ /dev/null
@@ -1,263 +0,0 @@
1# Compress man pages in ${mandir} and info pages in ${infodir}
2#
3# 1. The doc will be compressed to gz format by default.
4#
5# 2. It will automatically correct the compressed doc which is not
6# in ${DOC_COMPRESS} but in ${DOC_COMPRESS_LIST} to the format
7# of ${DOC_COMPRESS} policy
8#
9# 3. It is easy to add a new type compression by editing
10# local.conf, such as:
11# DOC_COMPRESS_LIST_append = ' abc'
12# DOC_COMPRESS = 'abc'
13# DOC_COMPRESS_CMD[abc] = 'abc compress cmd ***'
14# DOC_DECOMPRESS_CMD[abc] = 'abc decompress cmd ***'
15
16# All supported compression policy
17DOC_COMPRESS_LIST ?= "gz xz bz2"
18
19# Compression policy, must be one of ${DOC_COMPRESS_LIST}
20DOC_COMPRESS ?= "gz"
21
22# Compression shell command
23DOC_COMPRESS_CMD[gz] ?= 'gzip -v -9 -n'
24DOC_COMPRESS_CMD[bz2] ?= "bzip2 -v -9"
25DOC_COMPRESS_CMD[xz] ?= "xz -v"
26
27# Decompression shell command
28DOC_DECOMPRESS_CMD[gz] ?= 'gunzip -v'
29DOC_DECOMPRESS_CMD[bz2] ?= "bunzip2 -v"
30DOC_DECOMPRESS_CMD[xz] ?= "unxz -v"
31
32PACKAGE_PREPROCESS_FUNCS += "package_do_compress_doc compress_doc_updatealternatives"
33python package_do_compress_doc() {
34 compress_mode = d.getVar('DOC_COMPRESS')
35 compress_list = (d.getVar('DOC_COMPRESS_LIST') or '').split()
36 if compress_mode not in compress_list:
37 bb.fatal('Compression policy %s not supported (not listed in %s)\n' % (compress_mode, compress_list))
38
39 dvar = d.getVar('PKGD')
40 compress_cmds = {}
41 decompress_cmds = {}
42 for mode in compress_list:
43 compress_cmds[mode] = d.getVarFlag('DOC_COMPRESS_CMD', mode)
44 decompress_cmds[mode] = d.getVarFlag('DOC_DECOMPRESS_CMD', mode)
45
46 mandir = os.path.abspath(dvar + os.sep + d.getVar("mandir"))
47 if os.path.exists(mandir):
48 # Decompress doc files which format is not compress_mode
49 decompress_doc(mandir, compress_mode, decompress_cmds)
50 compress_doc(mandir, compress_mode, compress_cmds)
51
52 infodir = os.path.abspath(dvar + os.sep + d.getVar("infodir"))
53 if os.path.exists(infodir):
54 # Decompress doc files which format is not compress_mode
55 decompress_doc(infodir, compress_mode, decompress_cmds)
56 compress_doc(infodir, compress_mode, compress_cmds)
57}
58
59def _get_compress_format(file, compress_format_list):
60 for compress_format in compress_format_list:
61 compress_suffix = '.' + compress_format
62 if file.endswith(compress_suffix):
63 return compress_format
64
65 return ''
66
67# Collect hardlinks to dict, each element in dict lists hardlinks
68# which points to the same doc file.
69# {hardlink10: [hardlink11, hardlink12],,,}
70# The hardlink10, hardlink11 and hardlink12 are the same file.
71def _collect_hardlink(hardlink_dict, file):
72 for hardlink in hardlink_dict:
73 # Add to the existed hardlink
74 if os.path.samefile(hardlink, file):
75 hardlink_dict[hardlink].append(file)
76 return hardlink_dict
77
78 hardlink_dict[file] = []
79 return hardlink_dict
80
81def _process_hardlink(hardlink_dict, compress_mode, shell_cmds, decompress=False):
82 import subprocess
83 for target in hardlink_dict:
84 if decompress:
85 compress_format = _get_compress_format(target, shell_cmds.keys())
86 cmd = "%s -f %s" % (shell_cmds[compress_format], target)
87 bb.note('decompress hardlink %s' % target)
88 else:
89 cmd = "%s -f %s" % (shell_cmds[compress_mode], target)
90 bb.note('compress hardlink %s' % target)
91 (retval, output) = subprocess.getstatusoutput(cmd)
92 if retval:
93 bb.warn("de/compress file failed %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else ""))
94 return
95
96 for hardlink_dup in hardlink_dict[target]:
97 if decompress:
98 # Remove compress suffix
99 compress_suffix = '.' + compress_format
100 new_hardlink = hardlink_dup[:-len(compress_suffix)]
101 new_target = target[:-len(compress_suffix)]
102 else:
103 # Append compress suffix
104 compress_suffix = '.' + compress_mode
105 new_hardlink = hardlink_dup + compress_suffix
106 new_target = target + compress_suffix
107
108 bb.note('hardlink %s-->%s' % (new_hardlink, new_target))
109 if not os.path.exists(new_hardlink):
110 os.link(new_target, new_hardlink)
111 if os.path.exists(hardlink_dup):
112 os.unlink(hardlink_dup)
113
114def _process_symlink(file, compress_format, decompress=False):
115 compress_suffix = '.' + compress_format
116 if decompress:
117 # Remove compress suffix
118 new_linkname = file[:-len(compress_suffix)]
119 new_source = os.readlink(file)[:-len(compress_suffix)]
120 else:
121 # Append compress suffix
122 new_linkname = file + compress_suffix
123 new_source = os.readlink(file) + compress_suffix
124
125 bb.note('symlink %s-->%s' % (new_linkname, new_source))
126 if not os.path.exists(new_linkname):
127 os.symlink(new_source, new_linkname)
128
129 os.unlink(file)
130
131def _is_info(file):
132 flags = '.info .info-'.split()
133 for flag in flags:
134 if flag in os.path.basename(file):
135 return True
136
137 return False
138
139def _is_man(file):
140 import re
141
142 # It refers MANSECT-var in man(1.6g)'s man.config
143 # ".1:.1p:.8:.2:.3:.3p:.4:.5:.6:.7:.9:.0p:.tcl:.n:.l:.p:.o"
144 # Not start with '.', and contain the above colon-seperate element
145 p = re.compile(r'[^\.]+\.([1-9lnop]|0p|tcl)')
146 if p.search(file):
147 return True
148
149 return False
150
151def _is_compress_doc(file, compress_format_list):
152 compress_format = _get_compress_format(file, compress_format_list)
153 compress_suffix = '.' + compress_format
154 if file.endswith(compress_suffix):
155 # Remove the compress suffix
156 uncompress_file = file[:-len(compress_suffix)]
157 if _is_info(uncompress_file) or _is_man(uncompress_file):
158 return True, compress_format
159
160 return False, ''
161
162def compress_doc(topdir, compress_mode, compress_cmds):
163 import subprocess
164 hardlink_dict = {}
165 for root, dirs, files in os.walk(topdir):
166 for f in files:
167 file = os.path.join(root, f)
168 if os.path.isdir(file):
169 continue
170
171 if _is_info(file) or _is_man(file):
172 # Symlink
173 if os.path.islink(file):
174 _process_symlink(file, compress_mode)
175 # Hardlink
176 elif os.lstat(file).st_nlink > 1:
177 _collect_hardlink(hardlink_dict, file)
178 # Normal file
179 elif os.path.isfile(file):
180 cmd = "%s %s" % (compress_cmds[compress_mode], file)
181 (retval, output) = subprocess.getstatusoutput(cmd)
182 if retval:
183 bb.warn("compress failed %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else ""))
184 continue
185 bb.note('compress file %s' % file)
186
187 _process_hardlink(hardlink_dict, compress_mode, compress_cmds)
188
189# Decompress doc files which format is not compress_mode
190def decompress_doc(topdir, compress_mode, decompress_cmds):
191 import subprocess
192 hardlink_dict = {}
193 decompress = True
194 for root, dirs, files in os.walk(topdir):
195 for f in files:
196 file = os.path.join(root, f)
197 if os.path.isdir(file):
198 continue
199
200 res, compress_format = _is_compress_doc(file, decompress_cmds.keys())
201 # Decompress files which format is not compress_mode
202 if res and compress_mode!=compress_format:
203 # Symlink
204 if os.path.islink(file):
205 _process_symlink(file, compress_format, decompress)
206 # Hardlink
207 elif os.lstat(file).st_nlink > 1:
208 _collect_hardlink(hardlink_dict, file)
209 # Normal file
210 elif os.path.isfile(file):
211 cmd = "%s %s" % (decompress_cmds[compress_format], file)
212 (retval, output) = subprocess.getstatusoutput(cmd)
213 if retval:
214 bb.warn("decompress failed %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else ""))
215 continue
216 bb.note('decompress file %s' % file)
217
218 _process_hardlink(hardlink_dict, compress_mode, decompress_cmds, decompress)
219
220python compress_doc_updatealternatives () {
221 if not bb.data.inherits_class('update-alternatives', d):
222 return
223
224 mandir = d.getVar("mandir")
225 infodir = d.getVar("infodir")
226 compress_mode = d.getVar('DOC_COMPRESS')
227 for pkg in (d.getVar('PACKAGES') or "").split():
228 old_names = (d.getVar('ALTERNATIVE_%s' % pkg) or "").split()
229 new_names = []
230 for old_name in old_names:
231 old_link = d.getVarFlag('ALTERNATIVE_LINK_NAME', old_name)
232 old_target = d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, old_name) or \
233 d.getVarFlag('ALTERNATIVE_TARGET', old_name) or \
234 d.getVar('ALTERNATIVE_TARGET_%s' % pkg) or \
235 d.getVar('ALTERNATIVE_TARGET') or \
236 old_link
237 # Sometimes old_target is specified as relative to the link name.
238 old_target = os.path.join(os.path.dirname(old_link), old_target)
239
240 # The updatealternatives used for compress doc
241 if mandir in old_target or infodir in old_target:
242 new_name = old_name + '.' + compress_mode
243 new_link = old_link + '.' + compress_mode
244 new_target = old_target + '.' + compress_mode
245 d.delVarFlag('ALTERNATIVE_LINK_NAME', old_name)
246 d.setVarFlag('ALTERNATIVE_LINK_NAME', new_name, new_link)
247 if d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, old_name):
248 d.delVarFlag('ALTERNATIVE_TARGET_%s' % pkg, old_name)
249 d.setVarFlag('ALTERNATIVE_TARGET_%s' % pkg, new_name, new_target)
250 elif d.getVarFlag('ALTERNATIVE_TARGET', old_name):
251 d.delVarFlag('ALTERNATIVE_TARGET', old_name)
252 d.setVarFlag('ALTERNATIVE_TARGET', new_name, new_target)
253 elif d.getVar('ALTERNATIVE_TARGET_%s' % pkg):
254 d.setVar('ALTERNATIVE_TARGET_%s' % pkg, new_target)
255 elif d.getVar('ALTERNATIVE_TARGET'):
256 d.setVar('ALTERNATIVE_TARGET', new_target)
257
258 new_names.append(new_name)
259
260 if new_names:
261 d.setVar('ALTERNATIVE_%s' % pkg, ' '.join(new_names))
262}
263
diff --git a/meta/classes/copyleft_compliance.bbclass b/meta/classes/copyleft_compliance.bbclass
index eabf12ce7a..9ff9956fe9 100644
--- a/meta/classes/copyleft_compliance.bbclass
+++ b/meta/classes/copyleft_compliance.bbclass
@@ -1,3 +1,9 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
1# Deploy sources for recipes for compliance with copyleft-style licenses 7# Deploy sources for recipes for compliance with copyleft-style licenses
2# Defaults to using symlinks, as it's a quick operation, and one can easily 8# Defaults to using symlinks, as it's a quick operation, and one can easily
3# follow the links when making use of the files (e.g. tar with the -h arg). 9# follow the links when making use of the files (e.g. tar with the -h arg).
diff --git a/meta/classes/copyleft_filter.bbclass b/meta/classes/copyleft_filter.bbclass
index c36bce431a..83cd90060d 100644
--- a/meta/classes/copyleft_filter.bbclass
+++ b/meta/classes/copyleft_filter.bbclass
@@ -1,10 +1,14 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
1# Filter the license, the copyleft_should_include returns True for the 7# Filter the license, the copyleft_should_include returns True for the
2# COPYLEFT_LICENSE_INCLUDE recipe, and False for the 8# COPYLEFT_LICENSE_INCLUDE recipe, and False for the
3# COPYLEFT_LICENSE_EXCLUDE. 9# COPYLEFT_LICENSE_EXCLUDE.
4# 10#
5# By default, includes all GPL and LGPL, and excludes CLOSED and Proprietary. 11# By default, includes all GPL and LGPL, and excludes CLOSED and Proprietary.
6#
7# vi:sts=4:sw=4:et
8 12
9COPYLEFT_LICENSE_INCLUDE ?= 'GPL* LGPL* AGPL*' 13COPYLEFT_LICENSE_INCLUDE ?= 'GPL* LGPL* AGPL*'
10COPYLEFT_LICENSE_INCLUDE[type] = 'list' 14COPYLEFT_LICENSE_INCLUDE[type] = 'list'
diff --git a/meta/classes/core-image.bbclass b/meta/classes/core-image.bbclass
deleted file mode 100644
index 88ca272145..0000000000
--- a/meta/classes/core-image.bbclass
+++ /dev/null
@@ -1,75 +0,0 @@
1# Common code for generating core reference images
2#
3# Copyright (C) 2007-2011 Linux Foundation
4
5# IMAGE_FEATURES control content of the core reference images
6#
7# By default we install packagegroup-core-boot and packagegroup-base-extended packages;
8# this gives us working (console only) rootfs.
9#
10# Available IMAGE_FEATURES:
11#
12# - x11 - X server
13# - x11-base - X server with minimal environment
14# - x11-sato - OpenedHand Sato environment
15# - tools-debug - debugging tools
16# - eclipse-debug - Eclipse remote debugging support
17# - tools-profile - profiling tools
18# - tools-testapps - tools usable to make some device tests
19# - tools-sdk - SDK (C/C++ compiler, autotools, etc.)
20# - nfs-server - NFS server
21# - nfs-client - NFS client
22# - ssh-server-dropbear - SSH server (dropbear)
23# - ssh-server-openssh - SSH server (openssh)
24# - hwcodecs - Install hardware acceleration codecs
25# - package-management - installs package management tools and preserves the package manager database
26# - debug-tweaks - makes an image suitable for development, e.g. allowing passwordless root logins
27# - empty-root-password
28# - allow-empty-password
29# - allow-root-login
30# - post-install-logging
31# - dev-pkgs - development packages (headers, etc.) for all installed packages in the rootfs
32# - dbg-pkgs - debug symbol packages for all installed packages in the rootfs
33# - doc-pkgs - documentation packages for all installed packages in the rootfs
34# - bash-completion-pkgs - bash-completion packages for recipes using bash-completion bbclass
35# - ptest-pkgs - ptest packages for all ptest-enabled recipes
36# - read-only-rootfs - tweaks an image to support read-only rootfs
37# - stateless-rootfs - systemctl-native not run, image populated by systemd at runtime
38# - splash - bootup splash screen
39#
40FEATURE_PACKAGES_x11 = "packagegroup-core-x11"
41FEATURE_PACKAGES_x11-base = "packagegroup-core-x11-base"
42FEATURE_PACKAGES_x11-sato = "packagegroup-core-x11-sato"
43FEATURE_PACKAGES_tools-debug = "packagegroup-core-tools-debug"
44FEATURE_PACKAGES_eclipse-debug = "packagegroup-core-eclipse-debug"
45FEATURE_PACKAGES_tools-profile = "packagegroup-core-tools-profile"
46FEATURE_PACKAGES_tools-testapps = "packagegroup-core-tools-testapps"
47FEATURE_PACKAGES_tools-sdk = "packagegroup-core-sdk packagegroup-core-standalone-sdk-target"
48FEATURE_PACKAGES_nfs-server = "packagegroup-core-nfs-server"
49FEATURE_PACKAGES_nfs-client = "packagegroup-core-nfs-client"
50FEATURE_PACKAGES_ssh-server-dropbear = "packagegroup-core-ssh-dropbear"
51FEATURE_PACKAGES_ssh-server-openssh = "packagegroup-core-ssh-openssh"
52FEATURE_PACKAGES_hwcodecs = "${MACHINE_HWCODECS}"
53
54
55# IMAGE_FEATURES_REPLACES_foo = 'bar1 bar2'
56# Including image feature foo would replace the image features bar1 and bar2
57IMAGE_FEATURES_REPLACES_ssh-server-openssh = "ssh-server-dropbear"
58
59# IMAGE_FEATURES_CONFLICTS_foo = 'bar1 bar2'
60# An error exception would be raised if both image features foo and bar1(or bar2) are included
61
62MACHINE_HWCODECS ??= ""
63
64CORE_IMAGE_BASE_INSTALL = '\
65 packagegroup-core-boot \
66 packagegroup-base-extended \
67 \
68 ${CORE_IMAGE_EXTRA_INSTALL} \
69 '
70
71CORE_IMAGE_EXTRA_INSTALL ?= ""
72
73IMAGE_INSTALL ?= "${CORE_IMAGE_BASE_INSTALL}"
74
75inherit image
diff --git a/meta/classes/cpan-base.bbclass b/meta/classes/cpan-base.bbclass
deleted file mode 100644
index 867edf8707..0000000000
--- a/meta/classes/cpan-base.bbclass
+++ /dev/null
@@ -1,18 +0,0 @@
1#
2# cpan-base providers various perl related information needed for building
3# cpan modules
4#
5FILES_${PN} += "${libdir}/perl5 ${datadir}/perl5"
6
7DEPENDS += "${@["perl", "perl-native"][(bb.data.inherits_class('native', d))]}"
8RDEPENDS_${PN} += "${@["perl", ""][(bb.data.inherits_class('native', d))]}"
9
10inherit perl-version
11
12def is_target(d):
13 if not bb.data.inherits_class('native', d):
14 return "yes"
15 return "no"
16
17PERLLIBDIRS = "${libdir}/perl5"
18PERLLIBDIRS_class-native = "${libdir}/perl5"
diff --git a/meta/classes/cpan.bbclass b/meta/classes/cpan.bbclass
deleted file mode 100644
index e9908ae4b8..0000000000
--- a/meta/classes/cpan.bbclass
+++ /dev/null
@@ -1,65 +0,0 @@
1#
2# This is for perl modules that use the old Makefile.PL build system
3#
4inherit cpan-base perlnative
5
6EXTRA_CPANFLAGS ?= ""
7EXTRA_PERLFLAGS ?= ""
8
9# Env var which tells perl if it should use host (no) or target (yes) settings
10export PERLCONFIGTARGET = "${@is_target(d)}"
11
12# Env var which tells perl where the perl include files are
13export PERL_INC = "${STAGING_LIBDIR}${PERL_OWN_DIR}/perl5/${@get_perl_version(d)}/${@get_perl_arch(d)}/CORE"
14export PERL_LIB = "${STAGING_LIBDIR}${PERL_OWN_DIR}/perl5/${@get_perl_version(d)}"
15export PERL_ARCHLIB = "${STAGING_LIBDIR}${PERL_OWN_DIR}/perl5/${@get_perl_version(d)}/${@get_perl_arch(d)}"
16export PERLHOSTLIB = "${STAGING_LIBDIR_NATIVE}/perl5/${@get_perl_version(d)}/"
17export PERLHOSTARCHLIB = "${STAGING_LIBDIR_NATIVE}/perl5/${@get_perl_version(d)}/${@get_perl_hostarch(d)}/"
18
19cpan_do_configure () {
20 yes '' | perl ${EXTRA_PERLFLAGS} Makefile.PL INSTALLDIRS=vendor NO_PERLLOCAL=1 NO_PACKLIST=1 PERL=$(which perl) ${EXTRA_CPANFLAGS}
21
22 # Makefile.PLs can exit with success without generating a
23 # Makefile, e.g. in cases of missing configure time
24 # dependencies. This is considered a best practice by
25 # cpantesters.org. See:
26 # * http://wiki.cpantesters.org/wiki/CPANAuthorNotes
27 # * http://www.nntp.perl.org/group/perl.qa/2008/08/msg11236.html
28 [ -e Makefile ] || bbfatal "No Makefile was generated by Makefile.PL"
29
30 if [ "${BUILD_SYS}" != "${HOST_SYS}" ]; then
31 . ${STAGING_LIBDIR}${PERL_OWN_DIR}/perl5/config.sh
32 # Use find since there can be a Makefile generated for each Makefile.PL
33 for f in `find -name Makefile.PL`; do
34 f2=`echo $f | sed -e 's/.PL//'`
35 test -f $f2 || continue
36 sed -i -e "s:\(PERL_ARCHLIB = \).*:\1${PERL_ARCHLIB}:" \
37 -e 's/perl.real/perl/' \
38 -e "s|^\(CCFLAGS =.*\)|\1 ${CFLAGS}|" \
39 $f2
40 done
41 fi
42}
43
44do_configure_append_class-target() {
45 find . -name Makefile | xargs sed -E -i \
46 -e 's:LD_RUN_PATH ?= ?"?[^"]*"?::g'
47}
48
49do_configure_append_class-nativesdk() {
50 find . -name Makefile | xargs sed -E -i \
51 -e 's:LD_RUN_PATH ?= ?"?[^"]*"?::g'
52}
53
54cpan_do_compile () {
55 oe_runmake PASTHRU_INC="${CFLAGS}" LD="${CCLD}"
56}
57
58cpan_do_install () {
59 oe_runmake DESTDIR="${D}" install_vendor
60 for PERLSCRIPT in `grep -rIEl '#! *${bindir}/perl-native.*/perl' ${D}`; do
61 sed -i -e 's|${bindir}/perl-native.*/perl|/usr/bin/env nativeperl|' $PERLSCRIPT
62 done
63}
64
65EXPORT_FUNCTIONS do_configure do_compile do_install
diff --git a/meta/classes/cpan_build.bbclass b/meta/classes/cpan_build.bbclass
deleted file mode 100644
index f3fb4666ef..0000000000
--- a/meta/classes/cpan_build.bbclass
+++ /dev/null
@@ -1,41 +0,0 @@
1#
2# This is for perl modules that use the new Build.PL build system
3#
4inherit cpan-base perlnative
5
6EXTRA_CPAN_BUILD_FLAGS ?= ""
7
8# Env var which tells perl if it should use host (no) or target (yes) settings
9export PERLCONFIGTARGET = "${@is_target(d)}"
10export PERL_ARCHLIB = "${STAGING_LIBDIR}${PERL_OWN_DIR}/perl5/${@get_perl_version(d)}/${@get_perl_arch(d)}"
11export PERLHOSTLIB = "${STAGING_LIBDIR_NATIVE}/perl5/${@get_perl_version(d)}/"
12export PERLHOSTARCHLIB = "${STAGING_LIBDIR_NATIVE}/perl5/${@get_perl_version(d)}/${@get_perl_hostarch(d)}/"
13export LD = "${CCLD}"
14
15cpan_build_do_configure () {
16 if [ "${@is_target(d)}" = "yes" ]; then
17 # build for target
18 . ${STAGING_LIBDIR}/perl5/config.sh
19 fi
20
21 perl Build.PL --installdirs vendor --destdir ${D} \
22 ${EXTRA_CPAN_BUILD_FLAGS}
23
24 # Build.PLs can exit with success without generating a
25 # Build, e.g. in cases of missing configure time
26 # dependencies. This is considered a best practice by
27 # cpantesters.org. See:
28 # * http://wiki.cpantesters.org/wiki/CPANAuthorNotes
29 # * http://www.nntp.perl.org/group/perl.qa/2008/08/msg11236.html
30 [ -e Build ] || bbfatal "No Build was generated by Build.PL"
31}
32
33cpan_build_do_compile () {
34 perl Build --perl "${bindir}/perl" verbose=1
35}
36
37cpan_build_do_install () {
38 perl Build install --destdir ${D}
39}
40
41EXPORT_FUNCTIONS do_configure do_compile do_install
diff --git a/meta/classes/create-spdx-2.2.bbclass b/meta/classes/create-spdx-2.2.bbclass
new file mode 100644
index 0000000000..486efadba9
--- /dev/null
+++ b/meta/classes/create-spdx-2.2.bbclass
@@ -0,0 +1,1158 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: GPL-2.0-only
5#
6
7DEPLOY_DIR_SPDX ??= "${DEPLOY_DIR}/spdx"
8
9# The product name that the CVE database uses. Defaults to BPN, but may need to
10# be overriden per recipe (for example tiff.bb sets CVE_PRODUCT=libtiff).
11CVE_PRODUCT ??= "${BPN}"
12CVE_VERSION ??= "${PV}"
13
14SPDXDIR ??= "${WORKDIR}/spdx"
15SPDXDEPLOY = "${SPDXDIR}/deploy"
16SPDXWORK = "${SPDXDIR}/work"
17SPDXIMAGEWORK = "${SPDXDIR}/image-work"
18SPDXSDKWORK = "${SPDXDIR}/sdk-work"
19SPDXDEPS = "${SPDXDIR}/deps.json"
20
21SPDX_TOOL_NAME ??= "oe-spdx-creator"
22SPDX_TOOL_VERSION ??= "1.0"
23
24SPDXRUNTIMEDEPLOY = "${SPDXDIR}/runtime-deploy"
25
26SPDX_INCLUDE_SOURCES ??= "0"
27SPDX_ARCHIVE_SOURCES ??= "0"
28SPDX_ARCHIVE_PACKAGED ??= "0"
29
30SPDX_UUID_NAMESPACE ??= "sbom.openembedded.org"
31SPDX_NAMESPACE_PREFIX ??= "http://spdx.org/spdxdoc"
32SPDX_PRETTY ??= "0"
33
34SPDX_LICENSES ??= "${COREBASE}/meta/files/spdx-licenses.json"
35
36SPDX_CUSTOM_ANNOTATION_VARS ??= ""
37
38SPDX_ORG ??= "OpenEmbedded ()"
39SPDX_SUPPLIER ??= "Organization: ${SPDX_ORG}"
40SPDX_SUPPLIER[doc] = "The SPDX PackageSupplier field for SPDX packages created from \
41 this recipe. For SPDX documents create using this class during the build, this \
42 is the contact information for the person or organization who is doing the \
43 build."
44
45def extract_licenses(filename):
46 import re
47
48 lic_regex = re.compile(rb'^\W*SPDX-License-Identifier:\s*([ \w\d.()+-]+?)(?:\s+\W*)?$', re.MULTILINE)
49
50 try:
51 with open(filename, 'rb') as f:
52 size = min(15000, os.stat(filename).st_size)
53 txt = f.read(size)
54 licenses = re.findall(lic_regex, txt)
55 if licenses:
56 ascii_licenses = [lic.decode('ascii') for lic in licenses]
57 return ascii_licenses
58 except Exception as e:
59 bb.warn(f"Exception reading {filename}: {e}")
60 return None
61
62def get_doc_namespace(d, doc):
63 import uuid
64 namespace_uuid = uuid.uuid5(uuid.NAMESPACE_DNS, d.getVar("SPDX_UUID_NAMESPACE"))
65 return "%s/%s-%s" % (d.getVar("SPDX_NAMESPACE_PREFIX"), doc.name, str(uuid.uuid5(namespace_uuid, doc.name)))
66
67def create_annotation(d, comment):
68 from datetime import datetime, timezone
69
70 creation_time = datetime.now(tz=timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
71 annotation = oe.spdx.SPDXAnnotation()
72 annotation.annotationDate = creation_time
73 annotation.annotationType = "OTHER"
74 annotation.annotator = "Tool: %s - %s" % (d.getVar("SPDX_TOOL_NAME"), d.getVar("SPDX_TOOL_VERSION"))
75 annotation.comment = comment
76 return annotation
77
78def recipe_spdx_is_native(d, recipe):
79 return any(a.annotationType == "OTHER" and
80 a.annotator == "Tool: %s - %s" % (d.getVar("SPDX_TOOL_NAME"), d.getVar("SPDX_TOOL_VERSION")) and
81 a.comment == "isNative" for a in recipe.annotations)
82
83def is_work_shared_spdx(d):
84 return bb.data.inherits_class('kernel', d) or ('work-shared' in d.getVar('WORKDIR'))
85
86def get_json_indent(d):
87 if d.getVar("SPDX_PRETTY") == "1":
88 return 2
89 return None
90
91python() {
92 import json
93 if d.getVar("SPDX_LICENSE_DATA"):
94 return
95
96 with open(d.getVar("SPDX_LICENSES"), "r") as f:
97 data = json.load(f)
98 # Transform the license array to a dictionary
99 data["licenses"] = {l["licenseId"]: l for l in data["licenses"]}
100 d.setVar("SPDX_LICENSE_DATA", data)
101}
102
103def convert_license_to_spdx(lic, document, d, existing={}):
104 from pathlib import Path
105 import oe.spdx
106
107 license_data = d.getVar("SPDX_LICENSE_DATA")
108 extracted = {}
109
110 def add_extracted_license(ident, name):
111 nonlocal document
112
113 if name in extracted:
114 return
115
116 extracted_info = oe.spdx.SPDXExtractedLicensingInfo()
117 extracted_info.name = name
118 extracted_info.licenseId = ident
119 extracted_info.extractedText = None
120
121 if name == "PD":
122 # Special-case this.
123 extracted_info.extractedText = "Software released to the public domain"
124 else:
125 # Seach for the license in COMMON_LICENSE_DIR and LICENSE_PATH
126 for directory in [d.getVar('COMMON_LICENSE_DIR')] + (d.getVar('LICENSE_PATH') or '').split():
127 try:
128 with (Path(directory) / name).open(errors="replace") as f:
129 extracted_info.extractedText = f.read()
130 break
131 except FileNotFoundError:
132 pass
133 if extracted_info.extractedText is None:
134 # If it's not SPDX or PD, then NO_GENERIC_LICENSE must be set
135 filename = d.getVarFlag('NO_GENERIC_LICENSE', name)
136 if filename:
137 filename = d.expand("${S}/" + filename)
138 with open(filename, errors="replace") as f:
139 extracted_info.extractedText = f.read()
140 else:
141 bb.fatal("Cannot find any text for license %s" % name)
142
143 extracted[name] = extracted_info
144 document.hasExtractedLicensingInfos.append(extracted_info)
145
146 def convert(l):
147 if l == "(" or l == ")":
148 return l
149
150 if l == "&":
151 return "AND"
152
153 if l == "|":
154 return "OR"
155
156 if l == "CLOSED":
157 return "NONE"
158
159 spdx_license = d.getVarFlag("SPDXLICENSEMAP", l) or l
160 if spdx_license in license_data["licenses"]:
161 return spdx_license
162
163 try:
164 spdx_license = existing[l]
165 except KeyError:
166 spdx_license = "LicenseRef-" + l
167 add_extracted_license(spdx_license, l)
168
169 return spdx_license
170
171 lic_split = lic.replace("(", " ( ").replace(")", " ) ").replace("|", " | ").replace("&", " & ").split()
172
173 return ' '.join(convert(l) for l in lic_split)
174
175def process_sources(d):
176 pn = d.getVar('PN')
177 assume_provided = (d.getVar("ASSUME_PROVIDED") or "").split()
178 if pn in assume_provided:
179 for p in d.getVar("PROVIDES").split():
180 if p != pn:
181 pn = p
182 break
183
184 # glibc-locale: do_fetch, do_unpack and do_patch tasks have been deleted,
185 # so avoid archiving source here.
186 if pn.startswith('glibc-locale'):
187 return False
188 if d.getVar('PN') == "libtool-cross":
189 return False
190 if d.getVar('PN') == "libgcc-initial":
191 return False
192 if d.getVar('PN') == "shadow-sysroot":
193 return False
194
195 # We just archive gcc-source for all the gcc related recipes
196 if d.getVar('BPN') in ['gcc', 'libgcc']:
197 bb.debug(1, 'spdx: There is bug in scan of %s is, do nothing' % pn)
198 return False
199
200 return True
201
202
203def add_package_files(d, doc, spdx_pkg, topdir, get_spdxid, get_types, *, archive=None, ignore_dirs=[], ignore_top_level_dirs=[]):
204 from pathlib import Path
205 import oe.spdx
206 import hashlib
207
208 source_date_epoch = d.getVar("SOURCE_DATE_EPOCH")
209 if source_date_epoch:
210 source_date_epoch = int(source_date_epoch)
211
212 sha1s = []
213 spdx_files = []
214
215 file_counter = 1
216 for subdir, dirs, files in os.walk(topdir):
217 dirs[:] = [d for d in dirs if d not in ignore_dirs]
218 if subdir == str(topdir):
219 dirs[:] = [d for d in dirs if d not in ignore_top_level_dirs]
220
221 for file in files:
222 filepath = Path(subdir) / file
223 filename = str(filepath.relative_to(topdir))
224
225 if not filepath.is_symlink() and filepath.is_file():
226 spdx_file = oe.spdx.SPDXFile()
227 spdx_file.SPDXID = get_spdxid(file_counter)
228 for t in get_types(filepath):
229 spdx_file.fileTypes.append(t)
230 spdx_file.fileName = filename
231
232 if archive is not None:
233 with filepath.open("rb") as f:
234 info = archive.gettarinfo(fileobj=f)
235 info.name = filename
236 info.uid = 0
237 info.gid = 0
238 info.uname = "root"
239 info.gname = "root"
240
241 if source_date_epoch is not None and info.mtime > source_date_epoch:
242 info.mtime = source_date_epoch
243
244 archive.addfile(info, f)
245
246 sha1 = bb.utils.sha1_file(filepath)
247 sha1s.append(sha1)
248 spdx_file.checksums.append(oe.spdx.SPDXChecksum(
249 algorithm="SHA1",
250 checksumValue=sha1,
251 ))
252 spdx_file.checksums.append(oe.spdx.SPDXChecksum(
253 algorithm="SHA256",
254 checksumValue=bb.utils.sha256_file(filepath),
255 ))
256
257 if "SOURCE" in spdx_file.fileTypes:
258 extracted_lics = extract_licenses(filepath)
259 if extracted_lics:
260 spdx_file.licenseInfoInFiles = extracted_lics
261
262 doc.files.append(spdx_file)
263 doc.add_relationship(spdx_pkg, "CONTAINS", spdx_file)
264 spdx_pkg.hasFiles.append(spdx_file.SPDXID)
265
266 spdx_files.append(spdx_file)
267
268 file_counter += 1
269
270 sha1s.sort()
271 verifier = hashlib.sha1()
272 for v in sha1s:
273 verifier.update(v.encode("utf-8"))
274 spdx_pkg.packageVerificationCode.packageVerificationCodeValue = verifier.hexdigest()
275
276 return spdx_files
277
278
279def add_package_sources_from_debug(d, package_doc, spdx_package, package, package_files, sources):
280 from pathlib import Path
281 import hashlib
282 import oe.packagedata
283 import oe.spdx
284
285 debug_search_paths = [
286 Path(d.getVar('PKGD')),
287 Path(d.getVar('STAGING_DIR_TARGET')),
288 Path(d.getVar('STAGING_DIR_NATIVE')),
289 Path(d.getVar('STAGING_KERNEL_DIR')),
290 ]
291
292 pkg_data = oe.packagedata.read_subpkgdata_extended(package, d)
293
294 if pkg_data is None:
295 return
296
297 for file_path, file_data in pkg_data["files_info"].items():
298 if not "debugsrc" in file_data:
299 continue
300
301 for pkg_file in package_files:
302 if file_path.lstrip("/") == pkg_file.fileName.lstrip("/"):
303 break
304 else:
305 bb.fatal("No package file found for %s in %s; SPDX found: %s" % (str(file_path), package,
306 " ".join(p.fileName for p in package_files)))
307 continue
308
309 for debugsrc in file_data["debugsrc"]:
310 ref_id = "NOASSERTION"
311 for search in debug_search_paths:
312 if debugsrc.startswith("/usr/src/kernel"):
313 debugsrc_path = search / debugsrc.replace('/usr/src/kernel/', '')
314 else:
315 debugsrc_path = search / debugsrc.lstrip("/")
316 if not debugsrc_path.exists():
317 continue
318
319 file_sha256 = bb.utils.sha256_file(debugsrc_path)
320
321 if file_sha256 in sources:
322 source_file = sources[file_sha256]
323
324 doc_ref = package_doc.find_external_document_ref(source_file.doc.documentNamespace)
325 if doc_ref is None:
326 doc_ref = oe.spdx.SPDXExternalDocumentRef()
327 doc_ref.externalDocumentId = "DocumentRef-dependency-" + source_file.doc.name
328 doc_ref.spdxDocument = source_file.doc.documentNamespace
329 doc_ref.checksum.algorithm = "SHA1"
330 doc_ref.checksum.checksumValue = source_file.doc_sha1
331 package_doc.externalDocumentRefs.append(doc_ref)
332
333 ref_id = "%s:%s" % (doc_ref.externalDocumentId, source_file.file.SPDXID)
334 else:
335 bb.debug(1, "Debug source %s with SHA256 %s not found in any dependency" % (str(debugsrc_path), file_sha256))
336 break
337 else:
338 bb.debug(1, "Debug source %s not found" % debugsrc)
339
340 package_doc.add_relationship(pkg_file, "GENERATED_FROM", ref_id, comment=debugsrc)
341
342add_package_sources_from_debug[vardepsexclude] += "STAGING_KERNEL_DIR"
343
344def collect_dep_recipes(d, doc, spdx_recipe):
345 import json
346 from pathlib import Path
347 import oe.sbom
348 import oe.spdx
349
350 deploy_dir_spdx = Path(d.getVar("DEPLOY_DIR_SPDX"))
351 spdx_deps_file = Path(d.getVar("SPDXDEPS"))
352 package_archs = d.getVar("SSTATE_ARCHS").split()
353 package_archs.reverse()
354
355 dep_recipes = []
356
357 with spdx_deps_file.open("r") as f:
358 deps = json.load(f)
359
360 for dep_pn, dep_hashfn in deps:
361 dep_recipe_path = oe.sbom.doc_find_by_hashfn(deploy_dir_spdx, package_archs, "recipe-" + dep_pn, dep_hashfn)
362 if not dep_recipe_path:
363 bb.fatal("Cannot find any SPDX file for recipe %s, %s" % (dep_pn, dep_hashfn))
364
365 spdx_dep_doc, spdx_dep_sha1 = oe.sbom.read_doc(dep_recipe_path)
366
367 for pkg in spdx_dep_doc.packages:
368 if pkg.name == dep_pn:
369 spdx_dep_recipe = pkg
370 break
371 else:
372 continue
373
374 dep_recipes.append(oe.sbom.DepRecipe(spdx_dep_doc, spdx_dep_sha1, spdx_dep_recipe))
375
376 dep_recipe_ref = oe.spdx.SPDXExternalDocumentRef()
377 dep_recipe_ref.externalDocumentId = "DocumentRef-dependency-" + spdx_dep_doc.name
378 dep_recipe_ref.spdxDocument = spdx_dep_doc.documentNamespace
379 dep_recipe_ref.checksum.algorithm = "SHA1"
380 dep_recipe_ref.checksum.checksumValue = spdx_dep_sha1
381
382 doc.externalDocumentRefs.append(dep_recipe_ref)
383
384 doc.add_relationship(
385 "%s:%s" % (dep_recipe_ref.externalDocumentId, spdx_dep_recipe.SPDXID),
386 "BUILD_DEPENDENCY_OF",
387 spdx_recipe
388 )
389
390 return dep_recipes
391
392collect_dep_recipes[vardepsexclude] = "SSTATE_ARCHS"
393
394def collect_dep_sources(d, dep_recipes):
395 import oe.sbom
396
397 sources = {}
398 for dep in dep_recipes:
399 # Don't collect sources from native recipes as they
400 # match non-native sources also.
401 if recipe_spdx_is_native(d, dep.recipe):
402 continue
403 recipe_files = set(dep.recipe.hasFiles)
404
405 for spdx_file in dep.doc.files:
406 if spdx_file.SPDXID not in recipe_files:
407 continue
408
409 if "SOURCE" in spdx_file.fileTypes:
410 for checksum in spdx_file.checksums:
411 if checksum.algorithm == "SHA256":
412 sources[checksum.checksumValue] = oe.sbom.DepSource(dep.doc, dep.doc_sha1, dep.recipe, spdx_file)
413 break
414
415 return sources
416
417def add_download_packages(d, doc, recipe):
418 import os.path
419 from bb.fetch2 import decodeurl, CHECKSUM_LIST
420 import bb.process
421 import oe.spdx
422 import oe.sbom
423
424 for download_idx, src_uri in enumerate(d.getVar('SRC_URI').split()):
425 f = bb.fetch2.FetchData(src_uri, d)
426
427 for name in f.names:
428 package = oe.spdx.SPDXPackage()
429 package.name = "%s-source-%d" % (d.getVar("PN"), download_idx + 1)
430 package.SPDXID = oe.sbom.get_download_spdxid(d, download_idx + 1)
431
432 if f.type == "file":
433 continue
434
435 uri = f.type
436 proto = getattr(f, "proto", None)
437 if proto is not None:
438 uri = uri + "+" + proto
439 uri = uri + "://" + f.host + f.path
440
441 if f.method.supports_srcrev():
442 uri = uri + "@" + f.revisions[name]
443
444 if f.method.supports_checksum(f):
445 for checksum_id in CHECKSUM_LIST:
446 if checksum_id.upper() not in oe.spdx.SPDXPackage.ALLOWED_CHECKSUMS:
447 continue
448
449 expected_checksum = getattr(f, "%s_expected" % checksum_id)
450 if expected_checksum is None:
451 continue
452
453 c = oe.spdx.SPDXChecksum()
454 c.algorithm = checksum_id.upper()
455 c.checksumValue = expected_checksum
456 package.checksums.append(c)
457
458 package.downloadLocation = uri
459 doc.packages.append(package)
460 doc.add_relationship(doc, "DESCRIBES", package)
461 # In the future, we might be able to do more fancy dependencies,
462 # but this should be sufficient for now
463 doc.add_relationship(package, "BUILD_DEPENDENCY_OF", recipe)
464
465def collect_direct_deps(d, dep_task):
466 current_task = "do_" + d.getVar("BB_CURRENTTASK")
467 pn = d.getVar("PN")
468
469 taskdepdata = d.getVar("BB_TASKDEPDATA", False)
470
471 for this_dep in taskdepdata.values():
472 if this_dep[0] == pn and this_dep[1] == current_task:
473 break
474 else:
475 bb.fatal(f"Unable to find this {pn}:{current_task} in taskdepdata")
476
477 deps = set()
478 for dep_name in this_dep[3]:
479 dep_data = taskdepdata[dep_name]
480 if dep_data[1] == dep_task and dep_data[0] != pn:
481 deps.add((dep_data[0], dep_data[7]))
482
483 return sorted(deps)
484
485collect_direct_deps[vardepsexclude] += "BB_TASKDEPDATA"
486collect_direct_deps[vardeps] += "DEPENDS"
487
488python do_collect_spdx_deps() {
489 # This task calculates the build time dependencies of the recipe, and is
490 # required because while a task can deptask on itself, those dependencies
491 # do not show up in BB_TASKDEPDATA. To work around that, this task does the
492 # deptask on do_create_spdx and writes out the dependencies it finds, then
493 # do_create_spdx reads in the found dependencies when writing the actual
494 # SPDX document
495 import json
496 from pathlib import Path
497
498 spdx_deps_file = Path(d.getVar("SPDXDEPS"))
499
500 deps = collect_direct_deps(d, "do_create_spdx")
501
502 with spdx_deps_file.open("w") as f:
503 json.dump(deps, f)
504}
505# NOTE: depending on do_unpack is a hack that is necessary to get it's dependencies for archive the source
506addtask do_collect_spdx_deps after do_unpack
507do_collect_spdx_deps[depends] += "${PATCHDEPENDENCY}"
508do_collect_spdx_deps[deptask] = "do_create_spdx"
509do_collect_spdx_deps[dirs] = "${SPDXDIR}"
510
511python do_create_spdx() {
512 from datetime import datetime, timezone
513 import oe.sbom
514 import oe.spdx
515 import uuid
516 from pathlib import Path
517 from contextlib import contextmanager
518 import oe.cve_check
519
520 @contextmanager
521 def optional_tarfile(name, guard, mode="w"):
522 import tarfile
523 import bb.compress.zstd
524
525 num_threads = int(d.getVar("BB_NUMBER_THREADS"))
526
527 if guard:
528 name.parent.mkdir(parents=True, exist_ok=True)
529 with bb.compress.zstd.open(name, mode=mode + "b", num_threads=num_threads) as f:
530 with tarfile.open(fileobj=f, mode=mode + "|") as tf:
531 yield tf
532 else:
533 yield None
534
535
536 deploy_dir_spdx = Path(d.getVar("DEPLOY_DIR_SPDX"))
537 spdx_workdir = Path(d.getVar("SPDXWORK"))
538 include_sources = d.getVar("SPDX_INCLUDE_SOURCES") == "1"
539 archive_sources = d.getVar("SPDX_ARCHIVE_SOURCES") == "1"
540 archive_packaged = d.getVar("SPDX_ARCHIVE_PACKAGED") == "1"
541 pkg_arch = d.getVar("SSTATE_PKGARCH")
542
543 creation_time = datetime.now(tz=timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
544
545 doc = oe.spdx.SPDXDocument()
546
547 doc.name = "recipe-" + d.getVar("PN")
548 doc.documentNamespace = get_doc_namespace(d, doc)
549 doc.creationInfo.created = creation_time
550 doc.creationInfo.comment = "This document was created by analyzing recipe files during the build."
551 doc.creationInfo.licenseListVersion = d.getVar("SPDX_LICENSE_DATA")["licenseListVersion"]
552 doc.creationInfo.creators.append("Tool: OpenEmbedded Core create-spdx.bbclass")
553 doc.creationInfo.creators.append("Organization: %s" % d.getVar("SPDX_ORG"))
554 doc.creationInfo.creators.append("Person: N/A ()")
555
556 recipe = oe.spdx.SPDXPackage()
557 recipe.name = d.getVar("PN")
558 recipe.versionInfo = d.getVar("PV")
559 recipe.SPDXID = oe.sbom.get_recipe_spdxid(d)
560 recipe.supplier = d.getVar("SPDX_SUPPLIER")
561 if bb.data.inherits_class("native", d) or bb.data.inherits_class("cross", d):
562 recipe.annotations.append(create_annotation(d, "isNative"))
563
564 homepage = d.getVar("HOMEPAGE")
565 if homepage:
566 recipe.homepage = homepage
567
568 license = d.getVar("LICENSE")
569 if license:
570 recipe.licenseDeclared = convert_license_to_spdx(license, doc, d)
571
572 summary = d.getVar("SUMMARY")
573 if summary:
574 recipe.summary = summary
575
576 description = d.getVar("DESCRIPTION")
577 if description:
578 recipe.description = description
579
580 if d.getVar("SPDX_CUSTOM_ANNOTATION_VARS"):
581 for var in d.getVar('SPDX_CUSTOM_ANNOTATION_VARS').split():
582 recipe.annotations.append(create_annotation(d, var + "=" + d.getVar(var)))
583
584 # Some CVEs may be patched during the build process without incrementing the version number,
585 # so querying for CVEs based on the CPE id can lead to false positives. To account for this,
586 # save the CVEs fixed by patches to source information field in the SPDX.
587 patched_cves = oe.cve_check.get_patched_cves(d)
588 patched_cves = list(patched_cves)
589 patched_cves = ' '.join(patched_cves)
590 if patched_cves:
591 recipe.sourceInfo = "CVEs fixed: " + patched_cves
592
593 cpe_ids = oe.cve_check.get_cpe_ids(d.getVar("CVE_PRODUCT"), d.getVar("CVE_VERSION"))
594 if cpe_ids:
595 for cpe_id in cpe_ids:
596 cpe = oe.spdx.SPDXExternalReference()
597 cpe.referenceCategory = "SECURITY"
598 cpe.referenceType = "http://spdx.org/rdf/references/cpe23Type"
599 cpe.referenceLocator = cpe_id
600 recipe.externalRefs.append(cpe)
601
602 doc.packages.append(recipe)
603 doc.add_relationship(doc, "DESCRIBES", recipe)
604
605 add_download_packages(d, doc, recipe)
606
607 if process_sources(d) and include_sources:
608 recipe_archive = deploy_dir_spdx / "recipes" / (doc.name + ".tar.zst")
609 with optional_tarfile(recipe_archive, archive_sources) as archive:
610 spdx_get_src(d)
611
612 add_package_files(
613 d,
614 doc,
615 recipe,
616 spdx_workdir,
617 lambda file_counter: "SPDXRef-SourceFile-%s-%d" % (d.getVar("PN"), file_counter),
618 lambda filepath: ["SOURCE"],
619 ignore_dirs=[".git"],
620 ignore_top_level_dirs=["temp"],
621 archive=archive,
622 )
623
624 if archive is not None:
625 recipe.packageFileName = str(recipe_archive.name)
626
627 dep_recipes = collect_dep_recipes(d, doc, recipe)
628
629 doc_sha1 = oe.sbom.write_doc(d, doc, pkg_arch, "recipes", indent=get_json_indent(d))
630 dep_recipes.append(oe.sbom.DepRecipe(doc, doc_sha1, recipe))
631
632 recipe_ref = oe.spdx.SPDXExternalDocumentRef()
633 recipe_ref.externalDocumentId = "DocumentRef-recipe-" + recipe.name
634 recipe_ref.spdxDocument = doc.documentNamespace
635 recipe_ref.checksum.algorithm = "SHA1"
636 recipe_ref.checksum.checksumValue = doc_sha1
637
638 sources = collect_dep_sources(d, dep_recipes)
639 found_licenses = {license.name:recipe_ref.externalDocumentId + ":" + license.licenseId for license in doc.hasExtractedLicensingInfos}
640
641 if not recipe_spdx_is_native(d, recipe):
642 bb.build.exec_func("read_subpackage_metadata", d)
643
644 pkgdest = Path(d.getVar("PKGDEST"))
645 for package in d.getVar("PACKAGES").split():
646 if not oe.packagedata.packaged(package, d):
647 continue
648
649 package_doc = oe.spdx.SPDXDocument()
650 pkg_name = d.getVar("PKG:%s" % package) or package
651 package_doc.name = pkg_name
652 package_doc.documentNamespace = get_doc_namespace(d, package_doc)
653 package_doc.creationInfo.created = creation_time
654 package_doc.creationInfo.comment = "This document was created by analyzing packages created during the build."
655 package_doc.creationInfo.licenseListVersion = d.getVar("SPDX_LICENSE_DATA")["licenseListVersion"]
656 package_doc.creationInfo.creators.append("Tool: OpenEmbedded Core create-spdx.bbclass")
657 package_doc.creationInfo.creators.append("Organization: %s" % d.getVar("SPDX_ORG"))
658 package_doc.creationInfo.creators.append("Person: N/A ()")
659 package_doc.externalDocumentRefs.append(recipe_ref)
660
661 package_license = d.getVar("LICENSE:%s" % package) or d.getVar("LICENSE")
662
663 spdx_package = oe.spdx.SPDXPackage()
664
665 spdx_package.SPDXID = oe.sbom.get_package_spdxid(pkg_name)
666 spdx_package.name = pkg_name
667 spdx_package.versionInfo = d.getVar("PV")
668 spdx_package.licenseDeclared = convert_license_to_spdx(package_license, package_doc, d, found_licenses)
669 spdx_package.supplier = d.getVar("SPDX_SUPPLIER")
670
671 package_doc.packages.append(spdx_package)
672
673 package_doc.add_relationship(spdx_package, "GENERATED_FROM", "%s:%s" % (recipe_ref.externalDocumentId, recipe.SPDXID))
674 package_doc.add_relationship(package_doc, "DESCRIBES", spdx_package)
675
676 package_archive = deploy_dir_spdx / "packages" / (package_doc.name + ".tar.zst")
677 with optional_tarfile(package_archive, archive_packaged) as archive:
678 package_files = add_package_files(
679 d,
680 package_doc,
681 spdx_package,
682 pkgdest / package,
683 lambda file_counter: oe.sbom.get_packaged_file_spdxid(pkg_name, file_counter),
684 lambda filepath: ["BINARY"],
685 ignore_top_level_dirs=['CONTROL', 'DEBIAN'],
686 archive=archive,
687 )
688
689 if archive is not None:
690 spdx_package.packageFileName = str(package_archive.name)
691
692 add_package_sources_from_debug(d, package_doc, spdx_package, package, package_files, sources)
693
694 oe.sbom.write_doc(d, package_doc, pkg_arch, "packages", indent=get_json_indent(d))
695}
696do_create_spdx[vardepsexclude] += "BB_NUMBER_THREADS"
697# NOTE: depending on do_unpack is a hack that is necessary to get it's dependencies for archive the source
698addtask do_create_spdx after do_package do_packagedata do_unpack do_collect_spdx_deps before do_populate_sdk do_build do_rm_work
699
700SSTATETASKS += "do_create_spdx"
701do_create_spdx[sstate-inputdirs] = "${SPDXDEPLOY}"
702do_create_spdx[sstate-outputdirs] = "${DEPLOY_DIR_SPDX}"
703
704python do_create_spdx_setscene () {
705 sstate_setscene(d)
706}
707addtask do_create_spdx_setscene
708
709do_create_spdx[dirs] = "${SPDXWORK}"
710do_create_spdx[cleandirs] = "${SPDXDEPLOY} ${SPDXWORK}"
711do_create_spdx[depends] += "${PATCHDEPENDENCY}"
712
713def collect_package_providers(d):
714 from pathlib import Path
715 import oe.sbom
716 import oe.spdx
717 import json
718
719 deploy_dir_spdx = Path(d.getVar("DEPLOY_DIR_SPDX"))
720
721 providers = {}
722
723 deps = collect_direct_deps(d, "do_create_spdx")
724 deps.append((d.getVar("PN"), d.getVar("BB_HASHFILENAME")))
725
726 for dep_pn, dep_hashfn in deps:
727 localdata = d
728 recipe_data = oe.packagedata.read_pkgdata(dep_pn, localdata)
729 if not recipe_data:
730 localdata = bb.data.createCopy(d)
731 localdata.setVar("PKGDATA_DIR", "${PKGDATA_DIR_SDK}")
732 recipe_data = oe.packagedata.read_pkgdata(dep_pn, localdata)
733
734 for pkg in recipe_data.get("PACKAGES", "").split():
735
736 pkg_data = oe.packagedata.read_subpkgdata_dict(pkg, localdata)
737 rprovides = set(n for n, _ in bb.utils.explode_dep_versions2(pkg_data.get("RPROVIDES", "")).items())
738 rprovides.add(pkg)
739
740 if "PKG" in pkg_data:
741 pkg = pkg_data["PKG"]
742 rprovides.add(pkg)
743
744 for r in rprovides:
745 providers[r] = (pkg, dep_hashfn)
746
747 return providers
748
749collect_package_providers[vardepsexclude] += "BB_TASKDEPDATA"
750
751python do_create_runtime_spdx() {
752 from datetime import datetime, timezone
753 import oe.sbom
754 import oe.spdx
755 import oe.packagedata
756 from pathlib import Path
757
758 deploy_dir_spdx = Path(d.getVar("DEPLOY_DIR_SPDX"))
759 spdx_deploy = Path(d.getVar("SPDXRUNTIMEDEPLOY"))
760 is_native = bb.data.inherits_class("native", d) or bb.data.inherits_class("cross", d)
761
762 creation_time = datetime.now(tz=timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
763
764 providers = collect_package_providers(d)
765 pkg_arch = d.getVar("SSTATE_PKGARCH")
766 package_archs = d.getVar("SSTATE_ARCHS").split()
767 package_archs.reverse()
768
769 if not is_native:
770 bb.build.exec_func("read_subpackage_metadata", d)
771
772 dep_package_cache = {}
773
774 pkgdest = Path(d.getVar("PKGDEST"))
775 for package in d.getVar("PACKAGES").split():
776 localdata = bb.data.createCopy(d)
777 pkg_name = d.getVar("PKG:%s" % package) or package
778 localdata.setVar("PKG", pkg_name)
779 localdata.setVar('OVERRIDES', d.getVar("OVERRIDES", False) + ":" + package)
780
781 if not oe.packagedata.packaged(package, localdata):
782 continue
783
784 pkg_spdx_path = oe.sbom.doc_path(deploy_dir_spdx, pkg_name, pkg_arch, "packages")
785
786 package_doc, package_doc_sha1 = oe.sbom.read_doc(pkg_spdx_path)
787
788 for p in package_doc.packages:
789 if p.name == pkg_name:
790 spdx_package = p
791 break
792 else:
793 bb.fatal("Package '%s' not found in %s" % (pkg_name, pkg_spdx_path))
794
795 runtime_doc = oe.spdx.SPDXDocument()
796 runtime_doc.name = "runtime-" + pkg_name
797 runtime_doc.documentNamespace = get_doc_namespace(localdata, runtime_doc)
798 runtime_doc.creationInfo.created = creation_time
799 runtime_doc.creationInfo.comment = "This document was created by analyzing package runtime dependencies."
800 runtime_doc.creationInfo.licenseListVersion = d.getVar("SPDX_LICENSE_DATA")["licenseListVersion"]
801 runtime_doc.creationInfo.creators.append("Tool: OpenEmbedded Core create-spdx.bbclass")
802 runtime_doc.creationInfo.creators.append("Organization: %s" % d.getVar("SPDX_ORG"))
803 runtime_doc.creationInfo.creators.append("Person: N/A ()")
804
805 package_ref = oe.spdx.SPDXExternalDocumentRef()
806 package_ref.externalDocumentId = "DocumentRef-package-" + package
807 package_ref.spdxDocument = package_doc.documentNamespace
808 package_ref.checksum.algorithm = "SHA1"
809 package_ref.checksum.checksumValue = package_doc_sha1
810
811 runtime_doc.externalDocumentRefs.append(package_ref)
812
813 runtime_doc.add_relationship(
814 runtime_doc.SPDXID,
815 "AMENDS",
816 "%s:%s" % (package_ref.externalDocumentId, package_doc.SPDXID)
817 )
818
819 deps = bb.utils.explode_dep_versions2(localdata.getVar("RDEPENDS") or "")
820 seen_deps = set()
821 for dep, _ in deps.items():
822 if dep in seen_deps:
823 continue
824
825 if dep not in providers:
826 continue
827
828 (dep, dep_hashfn) = providers[dep]
829
830 if not oe.packagedata.packaged(dep, localdata):
831 continue
832
833 dep_pkg_data = oe.packagedata.read_subpkgdata_dict(dep, d)
834 dep_pkg = dep_pkg_data["PKG"]
835
836 if dep in dep_package_cache:
837 (dep_spdx_package, dep_package_ref) = dep_package_cache[dep]
838 else:
839 dep_path = oe.sbom.doc_find_by_hashfn(deploy_dir_spdx, package_archs, dep_pkg, dep_hashfn)
840 if not dep_path:
841 bb.fatal("No SPDX file found for package %s, %s" % (dep_pkg, dep_hashfn))
842
843 spdx_dep_doc, spdx_dep_sha1 = oe.sbom.read_doc(dep_path)
844
845 for pkg in spdx_dep_doc.packages:
846 if pkg.name == dep_pkg:
847 dep_spdx_package = pkg
848 break
849 else:
850 bb.fatal("Package '%s' not found in %s" % (dep_pkg, dep_path))
851
852 dep_package_ref = oe.spdx.SPDXExternalDocumentRef()
853 dep_package_ref.externalDocumentId = "DocumentRef-runtime-dependency-" + spdx_dep_doc.name
854 dep_package_ref.spdxDocument = spdx_dep_doc.documentNamespace
855 dep_package_ref.checksum.algorithm = "SHA1"
856 dep_package_ref.checksum.checksumValue = spdx_dep_sha1
857
858 dep_package_cache[dep] = (dep_spdx_package, dep_package_ref)
859
860 runtime_doc.externalDocumentRefs.append(dep_package_ref)
861
862 runtime_doc.add_relationship(
863 "%s:%s" % (dep_package_ref.externalDocumentId, dep_spdx_package.SPDXID),
864 "RUNTIME_DEPENDENCY_OF",
865 "%s:%s" % (package_ref.externalDocumentId, spdx_package.SPDXID)
866 )
867 seen_deps.add(dep)
868
869 oe.sbom.write_doc(d, runtime_doc, pkg_arch, "runtime", spdx_deploy, indent=get_json_indent(d))
870}
871
872do_create_runtime_spdx[vardepsexclude] += "OVERRIDES SSTATE_ARCHS"
873
874addtask do_create_runtime_spdx after do_create_spdx before do_build do_rm_work
875SSTATETASKS += "do_create_runtime_spdx"
876do_create_runtime_spdx[sstate-inputdirs] = "${SPDXRUNTIMEDEPLOY}"
877do_create_runtime_spdx[sstate-outputdirs] = "${DEPLOY_DIR_SPDX}"
878
879python do_create_runtime_spdx_setscene () {
880 sstate_setscene(d)
881}
882addtask do_create_runtime_spdx_setscene
883
884do_create_runtime_spdx[dirs] = "${SPDXRUNTIMEDEPLOY}"
885do_create_runtime_spdx[cleandirs] = "${SPDXRUNTIMEDEPLOY}"
886do_create_runtime_spdx[rdeptask] = "do_create_spdx"
887
888def spdx_get_src(d):
889 """
890 save patched source of the recipe in SPDX_WORKDIR.
891 """
892 import shutil
893 spdx_workdir = d.getVar('SPDXWORK')
894 spdx_sysroot_native = d.getVar('STAGING_DIR_NATIVE')
895 pn = d.getVar('PN')
896
897 workdir = d.getVar("WORKDIR")
898
899 try:
900 # The kernel class functions require it to be on work-shared, so we dont change WORKDIR
901 if not is_work_shared_spdx(d):
902 # Change the WORKDIR to make do_unpack do_patch run in another dir.
903 d.setVar('WORKDIR', spdx_workdir)
904 # Restore the original path to recipe's native sysroot (it's relative to WORKDIR).
905 d.setVar('STAGING_DIR_NATIVE', spdx_sysroot_native)
906
907 # The changed 'WORKDIR' also caused 'B' changed, create dir 'B' for the
908 # possibly requiring of the following tasks (such as some recipes's
909 # do_patch required 'B' existed).
910 bb.utils.mkdirhier(d.getVar('B'))
911
912 bb.build.exec_func('do_unpack', d)
913 # Copy source of kernel to spdx_workdir
914 if is_work_shared_spdx(d):
915 share_src = d.getVar('WORKDIR')
916 d.setVar('WORKDIR', spdx_workdir)
917 d.setVar('STAGING_DIR_NATIVE', spdx_sysroot_native)
918 src_dir = spdx_workdir + "/" + d.getVar('PN')+ "-" + d.getVar('PV') + "-" + d.getVar('PR')
919 bb.utils.mkdirhier(src_dir)
920 if bb.data.inherits_class('kernel',d):
921 share_src = d.getVar('STAGING_KERNEL_DIR')
922 cmd_copy_share = "cp -rf " + share_src + "/* " + src_dir + "/"
923 cmd_copy_shared_res = os.popen(cmd_copy_share).read()
924 bb.note("cmd_copy_shared_result = " + cmd_copy_shared_res)
925
926 git_path = src_dir + "/.git"
927 if os.path.exists(git_path):
928 shutils.rmtree(git_path)
929
930 # Make sure gcc and kernel sources are patched only once
931 if not (d.getVar('SRC_URI') == "" or is_work_shared_spdx(d)):
932 bb.build.exec_func('do_patch', d)
933
934 # Some userland has no source.
935 if not os.path.exists( spdx_workdir ):
936 bb.utils.mkdirhier(spdx_workdir)
937 finally:
938 d.setVar("WORKDIR", workdir)
939
940spdx_get_src[vardepsexclude] += "STAGING_KERNEL_DIR"
941
942do_rootfs[recrdeptask] += "do_create_spdx do_create_runtime_spdx"
943do_rootfs[cleandirs] += "${SPDXIMAGEWORK}"
944
945ROOTFS_POSTUNINSTALL_COMMAND =+ "image_combine_spdx"
946
947do_populate_sdk[recrdeptask] += "do_create_spdx do_create_runtime_spdx"
948do_populate_sdk[cleandirs] += "${SPDXSDKWORK}"
949POPULATE_SDK_POST_HOST_COMMAND:append:task-populate-sdk = " sdk_host_combine_spdx"
950POPULATE_SDK_POST_TARGET_COMMAND:append:task-populate-sdk = " sdk_target_combine_spdx"
951
952python image_combine_spdx() {
953 import os
954 import oe.sbom
955 from pathlib import Path
956 from oe.rootfs import image_list_installed_packages
957
958 image_name = d.getVar("IMAGE_NAME")
959 image_link_name = d.getVar("IMAGE_LINK_NAME")
960 imgdeploydir = Path(d.getVar("IMGDEPLOYDIR"))
961 img_spdxid = oe.sbom.get_image_spdxid(image_name)
962 packages = image_list_installed_packages(d)
963
964 combine_spdx(d, image_name, imgdeploydir, img_spdxid, packages, Path(d.getVar("SPDXIMAGEWORK")))
965
966 def make_image_link(target_path, suffix):
967 if image_link_name:
968 link = imgdeploydir / (image_link_name + suffix)
969 if link != target_path:
970 link.symlink_to(os.path.relpath(target_path, link.parent))
971
972 spdx_tar_path = imgdeploydir / (image_name + ".spdx.tar.zst")
973 make_image_link(spdx_tar_path, ".spdx.tar.zst")
974}
975
976python sdk_host_combine_spdx() {
977 sdk_combine_spdx(d, "host")
978}
979
980python sdk_target_combine_spdx() {
981 sdk_combine_spdx(d, "target")
982}
983
984def sdk_combine_spdx(d, sdk_type):
985 import oe.sbom
986 from pathlib import Path
987 from oe.sdk import sdk_list_installed_packages
988
989 sdk_name = d.getVar("TOOLCHAIN_OUTPUTNAME") + "-" + sdk_type
990 sdk_deploydir = Path(d.getVar("SDKDEPLOYDIR"))
991 sdk_spdxid = oe.sbom.get_sdk_spdxid(sdk_name)
992 sdk_packages = sdk_list_installed_packages(d, sdk_type == "target")
993 combine_spdx(d, sdk_name, sdk_deploydir, sdk_spdxid, sdk_packages, Path(d.getVar('SPDXSDKWORK')))
994
995def combine_spdx(d, rootfs_name, rootfs_deploydir, rootfs_spdxid, packages, spdx_workdir):
996 import os
997 import oe.spdx
998 import oe.sbom
999 import io
1000 import json
1001 from datetime import timezone, datetime
1002 from pathlib import Path
1003 import tarfile
1004 import bb.compress.zstd
1005
1006 providers = collect_package_providers(d)
1007 package_archs = d.getVar("SSTATE_ARCHS").split()
1008 package_archs.reverse()
1009
1010 creation_time = datetime.now(tz=timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
1011 deploy_dir_spdx = Path(d.getVar("DEPLOY_DIR_SPDX"))
1012 source_date_epoch = d.getVar("SOURCE_DATE_EPOCH")
1013
1014 doc = oe.spdx.SPDXDocument()
1015 doc.name = rootfs_name
1016 doc.documentNamespace = get_doc_namespace(d, doc)
1017 doc.creationInfo.created = creation_time
1018 doc.creationInfo.comment = "This document was created by analyzing the source of the Yocto recipe during the build."
1019 doc.creationInfo.licenseListVersion = d.getVar("SPDX_LICENSE_DATA")["licenseListVersion"]
1020 doc.creationInfo.creators.append("Tool: OpenEmbedded Core create-spdx.bbclass")
1021 doc.creationInfo.creators.append("Organization: %s" % d.getVar("SPDX_ORG"))
1022 doc.creationInfo.creators.append("Person: N/A ()")
1023
1024 image = oe.spdx.SPDXPackage()
1025 image.name = d.getVar("PN")
1026 image.versionInfo = d.getVar("PV")
1027 image.SPDXID = rootfs_spdxid
1028 image.supplier = d.getVar("SPDX_SUPPLIER")
1029
1030 doc.packages.append(image)
1031
1032 for name in sorted(packages.keys()):
1033 if name not in providers:
1034 bb.fatal("Unable to find SPDX provider for '%s'" % name)
1035
1036 pkg_name, pkg_hashfn = providers[name]
1037
1038 pkg_spdx_path = oe.sbom.doc_find_by_hashfn(deploy_dir_spdx, package_archs, pkg_name, pkg_hashfn)
1039 if not pkg_spdx_path:
1040 bb.fatal("No SPDX file found for package %s, %s" % (pkg_name, pkg_hashfn))
1041
1042 pkg_doc, pkg_doc_sha1 = oe.sbom.read_doc(pkg_spdx_path)
1043
1044 for p in pkg_doc.packages:
1045 if p.name == name:
1046 pkg_ref = oe.spdx.SPDXExternalDocumentRef()
1047 pkg_ref.externalDocumentId = "DocumentRef-%s" % pkg_doc.name
1048 pkg_ref.spdxDocument = pkg_doc.documentNamespace
1049 pkg_ref.checksum.algorithm = "SHA1"
1050 pkg_ref.checksum.checksumValue = pkg_doc_sha1
1051
1052 doc.externalDocumentRefs.append(pkg_ref)
1053 doc.add_relationship(image, "CONTAINS", "%s:%s" % (pkg_ref.externalDocumentId, p.SPDXID))
1054 break
1055 else:
1056 bb.fatal("Unable to find package with name '%s' in SPDX file %s" % (name, pkg_spdx_path))
1057
1058 runtime_spdx_path = oe.sbom.doc_find_by_hashfn(deploy_dir_spdx, package_archs, "runtime-" + name, pkg_hashfn)
1059 if not runtime_spdx_path:
1060 bb.fatal("No runtime SPDX document found for %s, %s" % (name, pkg_hashfn))
1061
1062 runtime_doc, runtime_doc_sha1 = oe.sbom.read_doc(runtime_spdx_path)
1063
1064 runtime_ref = oe.spdx.SPDXExternalDocumentRef()
1065 runtime_ref.externalDocumentId = "DocumentRef-%s" % runtime_doc.name
1066 runtime_ref.spdxDocument = runtime_doc.documentNamespace
1067 runtime_ref.checksum.algorithm = "SHA1"
1068 runtime_ref.checksum.checksumValue = runtime_doc_sha1
1069
1070 # "OTHER" isn't ideal here, but I can't find a relationship that makes sense
1071 doc.externalDocumentRefs.append(runtime_ref)
1072 doc.add_relationship(
1073 image,
1074 "OTHER",
1075 "%s:%s" % (runtime_ref.externalDocumentId, runtime_doc.SPDXID),
1076 comment="Runtime dependencies for %s" % name
1077 )
1078 bb.utils.mkdirhier(spdx_workdir)
1079 image_spdx_path = spdx_workdir / (rootfs_name + ".spdx.json")
1080
1081 with image_spdx_path.open("wb") as f:
1082 doc.to_json(f, sort_keys=True, indent=get_json_indent(d))
1083
1084 num_threads = int(d.getVar("BB_NUMBER_THREADS"))
1085
1086 visited_docs = set()
1087
1088 index = {"documents": []}
1089
1090 spdx_tar_path = rootfs_deploydir / (rootfs_name + ".spdx.tar.zst")
1091 with bb.compress.zstd.open(spdx_tar_path, "w", num_threads=num_threads) as f:
1092 with tarfile.open(fileobj=f, mode="w|") as tar:
1093 def collect_spdx_document(path):
1094 nonlocal tar
1095 nonlocal deploy_dir_spdx
1096 nonlocal source_date_epoch
1097 nonlocal index
1098
1099 if path in visited_docs:
1100 return
1101
1102 visited_docs.add(path)
1103
1104 with path.open("rb") as f:
1105 doc, sha1 = oe.sbom.read_doc(f)
1106 f.seek(0)
1107
1108 if doc.documentNamespace in visited_docs:
1109 return
1110
1111 bb.note("Adding SPDX document %s" % path)
1112 visited_docs.add(doc.documentNamespace)
1113 info = tar.gettarinfo(fileobj=f)
1114
1115 info.name = doc.name + ".spdx.json"
1116 info.uid = 0
1117 info.gid = 0
1118 info.uname = "root"
1119 info.gname = "root"
1120
1121 if source_date_epoch is not None and info.mtime > int(source_date_epoch):
1122 info.mtime = int(source_date_epoch)
1123
1124 tar.addfile(info, f)
1125
1126 index["documents"].append({
1127 "filename": info.name,
1128 "documentNamespace": doc.documentNamespace,
1129 "sha1": sha1,
1130 })
1131
1132 for ref in doc.externalDocumentRefs:
1133 ref_path = oe.sbom.doc_find_by_namespace(deploy_dir_spdx, package_archs, ref.spdxDocument)
1134 if not ref_path:
1135 bb.fatal("Cannot find any SPDX file for document %s" % ref.spdxDocument)
1136 collect_spdx_document(ref_path)
1137
1138 collect_spdx_document(image_spdx_path)
1139
1140 index["documents"].sort(key=lambda x: x["filename"])
1141
1142 index_str = io.BytesIO(json.dumps(
1143 index,
1144 sort_keys=True,
1145 indent=get_json_indent(d),
1146 ).encode("utf-8"))
1147
1148 info = tarfile.TarInfo()
1149 info.name = "index.json"
1150 info.size = len(index_str.getvalue())
1151 info.uid = 0
1152 info.gid = 0
1153 info.uname = "root"
1154 info.gname = "root"
1155
1156 tar.addfile(info, fileobj=index_str)
1157
1158combine_spdx[vardepsexclude] += "BB_NUMBER_THREADS SSTATE_ARCHS"
diff --git a/meta/classes/create-spdx.bbclass b/meta/classes/create-spdx.bbclass
new file mode 100644
index 0000000000..19c6c0ff0b
--- /dev/null
+++ b/meta/classes/create-spdx.bbclass
@@ -0,0 +1,8 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: GPL-2.0-only
5#
6# Include this class when you don't care what version of SPDX you get; it will
7# be updated to the latest stable version that is supported
8inherit create-spdx-2.2
diff --git a/meta/classes/cross-canadian.bbclass b/meta/classes/cross-canadian.bbclass
deleted file mode 100644
index f5c9f61595..0000000000
--- a/meta/classes/cross-canadian.bbclass
+++ /dev/null
@@ -1,194 +0,0 @@
1#
2# NOTE - When using this class the user is responsible for ensuring that
3# TRANSLATED_TARGET_ARCH is added into PN. This ensures that if the TARGET_ARCH
4# is changed, another nativesdk xxx-canadian-cross can be installed
5#
6
7
8# SDK packages are built either explicitly by the user,
9# or indirectly via dependency. No need to be in 'world'.
10EXCLUDE_FROM_WORLD = "1"
11NATIVESDKLIBC ?= "libc-glibc"
12LIBCOVERRIDE = ":${NATIVESDKLIBC}"
13CLASSOVERRIDE = "class-cross-canadian"
14STAGING_BINDIR_TOOLCHAIN = "${STAGING_DIR_NATIVE}${bindir_native}/${SDK_ARCH}${SDK_VENDOR}-${SDK_OS}:${STAGING_DIR_NATIVE}${bindir_native}/${TARGET_ARCH}${TARGET_VENDOR}-${TARGET_OS}"
15
16#
17# Update BASE_PACKAGE_ARCH and PACKAGE_ARCHS
18#
19PACKAGE_ARCH = "${SDK_ARCH}-${SDKPKGSUFFIX}"
20BASECANADIANEXTRAOS ?= "linux-musl"
21CANADIANEXTRAOS = "${BASECANADIANEXTRAOS}"
22CANADIANEXTRAVENDOR = ""
23MODIFYTOS ??= "1"
24python () {
25 archs = d.getVar('PACKAGE_ARCHS').split()
26 sdkarchs = []
27 for arch in archs:
28 sdkarchs.append(arch + '-${SDKPKGSUFFIX}')
29 d.setVar('PACKAGE_ARCHS', " ".join(sdkarchs))
30
31 # Allow the following code segment to be disabled, e.g. meta-environment
32 if d.getVar("MODIFYTOS") != "1":
33 return
34
35 if d.getVar("TCLIBC") in [ 'baremetal', 'newlib' ]:
36 return
37
38 tos = d.getVar("TARGET_OS")
39 whitelist = []
40 extralibcs = [""]
41 if "musl" in d.getVar("BASECANADIANEXTRAOS"):
42 extralibcs.append("musl")
43 for variant in ["", "spe", "x32", "eabi", "n32", "_ilp32"]:
44 for libc in extralibcs:
45 entry = "linux"
46 if variant and libc:
47 entry = entry + "-" + libc + variant
48 elif variant:
49 entry = entry + "-gnu" + variant
50 elif libc:
51 entry = entry + "-" + libc
52 whitelist.append(entry)
53 if tos not in whitelist:
54 bb.fatal("Building cross-candian for an unknown TARGET_SYS (%s), please update cross-canadian.bbclass" % d.getVar("TARGET_SYS"))
55
56 for n in ["PROVIDES", "DEPENDS"]:
57 d.setVar(n, d.getVar(n))
58 d.setVar("STAGING_BINDIR_TOOLCHAIN", d.getVar("STAGING_BINDIR_TOOLCHAIN"))
59 for prefix in ["AR", "AS", "DLLTOOL", "CC", "CXX", "GCC", "LD", "LIPO", "NM", "OBJDUMP", "RANLIB", "STRIP", "WINDRES"]:
60 n = prefix + "_FOR_TARGET"
61 d.setVar(n, d.getVar(n))
62 # This is a bit ugly. We need to zero LIBC/ABI extension which will change TARGET_OS
63 # however we need the old value in some variables. We expand those here first.
64 tarch = d.getVar("TARGET_ARCH")
65 if tarch == "x86_64":
66 d.setVar("LIBCEXTENSION", "")
67 d.setVar("ABIEXTENSION", "")
68 d.appendVar("CANADIANEXTRAOS", " linux-gnux32")
69 for extraos in d.getVar("BASECANADIANEXTRAOS").split():
70 d.appendVar("CANADIANEXTRAOS", " " + extraos + "x32")
71 elif tarch == "powerpc":
72 # PowerPC can build "linux" and "linux-gnuspe"
73 d.setVar("LIBCEXTENSION", "")
74 d.setVar("ABIEXTENSION", "")
75 d.appendVar("CANADIANEXTRAOS", " linux-gnuspe")
76 for extraos in d.getVar("BASECANADIANEXTRAOS").split():
77 d.appendVar("CANADIANEXTRAOS", " " + extraos + "spe")
78 elif tarch == "mips64":
79 d.appendVar("CANADIANEXTRAOS", " linux-gnun32")
80 for extraos in d.getVar("BASECANADIANEXTRAOS").split():
81 d.appendVar("CANADIANEXTRAOS", " " + extraos + "n32")
82 if tarch == "arm" or tarch == "armeb":
83 d.appendVar("CANADIANEXTRAOS", " linux-gnueabi linux-musleabi")
84 d.setVar("TARGET_OS", "linux-gnueabi")
85 else:
86 d.setVar("TARGET_OS", "linux")
87
88 # Also need to handle multilib target vendors
89 vendors = d.getVar("CANADIANEXTRAVENDOR")
90 if not vendors:
91 vendors = all_multilib_tune_values(d, 'TARGET_VENDOR')
92 origvendor = d.getVar("TARGET_VENDOR_MULTILIB_ORIGINAL")
93 if origvendor:
94 d.setVar("TARGET_VENDOR", origvendor)
95 if origvendor not in vendors.split():
96 vendors = origvendor + " " + vendors
97 d.setVar("CANADIANEXTRAVENDOR", vendors)
98}
99MULTIMACH_TARGET_SYS = "${PACKAGE_ARCH}${HOST_VENDOR}-${HOST_OS}"
100
101INHIBIT_DEFAULT_DEPS = "1"
102
103STAGING_DIR_HOST = "${RECIPE_SYSROOT}"
104
105TOOLCHAIN_OPTIONS = " --sysroot=${RECIPE_SYSROOT}"
106
107PATH_append = ":${TMPDIR}/sysroots/${HOST_ARCH}/${bindir_cross}"
108PKGHIST_DIR = "${TMPDIR}/pkghistory/${HOST_ARCH}-${SDKPKGSUFFIX}${HOST_VENDOR}-${HOST_OS}/"
109
110HOST_ARCH = "${SDK_ARCH}"
111HOST_VENDOR = "${SDK_VENDOR}"
112HOST_OS = "${SDK_OS}"
113HOST_PREFIX = "${SDK_PREFIX}"
114HOST_CC_ARCH = "${SDK_CC_ARCH}"
115HOST_LD_ARCH = "${SDK_LD_ARCH}"
116HOST_AS_ARCH = "${SDK_AS_ARCH}"
117
118#assign DPKG_ARCH
119DPKG_ARCH = "${@debian_arch_map(d.getVar('SDK_ARCH'), '')}"
120
121CPPFLAGS = "${BUILDSDK_CPPFLAGS}"
122CFLAGS = "${BUILDSDK_CFLAGS}"
123CXXFLAGS = "${BUILDSDK_CFLAGS}"
124LDFLAGS = "${BUILDSDK_LDFLAGS} \
125 -Wl,-rpath-link,${STAGING_LIBDIR}/.. \
126 -Wl,-rpath,${libdir}/.. "
127
128#
129# We need chrpath >= 0.14 to ensure we can deal with 32 and 64 bit
130# binaries
131#
132DEPENDS_append = " chrpath-replacement-native"
133EXTRANATIVEPATH += "chrpath-native"
134
135# Path mangling needed by the cross packaging
136# Note that we use := here to ensure that libdir and includedir are
137# target paths.
138target_base_prefix := "${base_prefix}"
139target_prefix := "${prefix}"
140target_exec_prefix := "${exec_prefix}"
141target_base_libdir = "${target_base_prefix}/${baselib}"
142target_libdir = "${target_exec_prefix}/${baselib}"
143target_includedir := "${includedir}"
144
145# Change to place files in SDKPATH
146base_prefix = "${SDKPATHNATIVE}"
147prefix = "${SDKPATHNATIVE}${prefix_nativesdk}"
148exec_prefix = "${SDKPATHNATIVE}${prefix_nativesdk}"
149bindir = "${exec_prefix}/bin/${TARGET_ARCH}${TARGET_VENDOR}-${TARGET_OS}"
150sbindir = "${bindir}"
151base_bindir = "${bindir}"
152base_sbindir = "${bindir}"
153libdir = "${exec_prefix}/lib/${TARGET_ARCH}${TARGET_VENDOR}-${TARGET_OS}"
154libexecdir = "${exec_prefix}/libexec/${TARGET_ARCH}${TARGET_VENDOR}-${TARGET_OS}"
155
156FILES_${PN} = "${prefix}"
157
158export PKG_CONFIG_DIR = "${STAGING_DIR_HOST}${layout_libdir}/pkgconfig"
159export PKG_CONFIG_SYSROOT_DIR = "${STAGING_DIR_HOST}"
160
161do_populate_sysroot[stamp-extra-info] = ""
162do_packagedata[stamp-extra-info] = ""
163
164USE_NLS = "${SDKUSE_NLS}"
165
166# We have to us TARGET_ARCH but we care about the absolute value
167# and not any particular tune that is enabled.
168TARGET_ARCH[vardepsexclude] = "TUNE_ARCH"
169
170PKGDATA_DIR = "${TMPDIR}/pkgdata/${SDK_SYS}"
171# If MLPREFIX is set by multilib code, shlibs
172# points to the wrong place so force it
173SHLIBSDIRS = "${PKGDATA_DIR}/nativesdk-shlibs2"
174SHLIBSWORKDIR = "${PKGDATA_DIR}/nativesdk-shlibs2"
175
176cross_canadian_bindirlinks () {
177 for i in linux ${CANADIANEXTRAOS}
178 do
179 for v in ${CANADIANEXTRAVENDOR}
180 do
181 d=${D}${bindir}/../${TARGET_ARCH}$v-$i
182 if [ -d $d ];
183 then
184 continue
185 fi
186 install -d $d
187 for j in `ls ${D}${bindir}`
188 do
189 p=${TARGET_ARCH}$v-$i-`echo $j | sed -e s,${TARGET_PREFIX},,`
190 ln -s ../${TARGET_SYS}/$j $d/$p
191 done
192 done
193 done
194}
diff --git a/meta/classes/cross.bbclass b/meta/classes/cross.bbclass
deleted file mode 100644
index bfec91d043..0000000000
--- a/meta/classes/cross.bbclass
+++ /dev/null
@@ -1,99 +0,0 @@
1inherit relocatable
2
3# Cross packages are built indirectly via dependency,
4# no need for them to be a direct target of 'world'
5EXCLUDE_FROM_WORLD = "1"
6
7CLASSOVERRIDE = "class-cross"
8PACKAGES = ""
9PACKAGES_DYNAMIC = ""
10PACKAGES_DYNAMIC_class-native = ""
11
12HOST_ARCH = "${BUILD_ARCH}"
13HOST_VENDOR = "${BUILD_VENDOR}"
14HOST_OS = "${BUILD_OS}"
15HOST_PREFIX = "${BUILD_PREFIX}"
16HOST_CC_ARCH = "${BUILD_CC_ARCH}"
17HOST_LD_ARCH = "${BUILD_LD_ARCH}"
18HOST_AS_ARCH = "${BUILD_AS_ARCH}"
19
20# No strip sysroot when DEBUG_BUILD is enabled
21INHIBIT_SYSROOT_STRIP ?= "${@oe.utils.vartrue('DEBUG_BUILD', '1', '', d)}"
22
23export lt_cv_sys_lib_dlsearch_path_spec = "${libdir} ${base_libdir} /lib /lib64 /usr/lib /usr/lib64"
24
25STAGING_DIR_HOST = "${RECIPE_SYSROOT_NATIVE}"
26
27PACKAGE_ARCH = "${BUILD_ARCH}"
28
29MULTIMACH_TARGET_SYS = "${BUILD_ARCH}${BUILD_VENDOR}-${BUILD_OS}"
30
31export PKG_CONFIG_DIR = "${exec_prefix}/lib/pkgconfig"
32export PKG_CONFIG_SYSROOT_DIR = ""
33
34TARGET_CPPFLAGS = ""
35TARGET_CFLAGS = ""
36TARGET_CXXFLAGS = ""
37TARGET_LDFLAGS = ""
38
39CPPFLAGS = "${BUILD_CPPFLAGS}"
40CFLAGS = "${BUILD_CFLAGS}"
41CXXFLAGS = "${BUILD_CFLAGS}"
42LDFLAGS = "${BUILD_LDFLAGS}"
43
44TOOLCHAIN_OPTIONS = ""
45
46# This class encodes staging paths into its scripts data so can only be
47# reused if we manipulate the paths.
48SSTATE_SCAN_CMD ?= "${SSTATE_SCAN_CMD_NATIVE}"
49
50# Path mangling needed by the cross packaging
51# Note that we use := here to ensure that libdir and includedir are
52# target paths.
53target_base_prefix := "${root_prefix}"
54target_prefix := "${prefix}"
55target_exec_prefix := "${exec_prefix}"
56target_base_libdir = "${target_base_prefix}/${baselib}"
57target_libdir = "${target_exec_prefix}/${baselib}"
58target_includedir := "${includedir}"
59
60# Overrides for paths
61CROSS_TARGET_SYS_DIR = "${TARGET_SYS}"
62prefix = "${STAGING_DIR_NATIVE}${prefix_native}"
63base_prefix = "${STAGING_DIR_NATIVE}"
64exec_prefix = "${STAGING_DIR_NATIVE}${prefix_native}"
65bindir = "${exec_prefix}/bin/${CROSS_TARGET_SYS_DIR}"
66sbindir = "${bindir}"
67base_bindir = "${bindir}"
68base_sbindir = "${bindir}"
69libdir = "${exec_prefix}/lib/${CROSS_TARGET_SYS_DIR}"
70libexecdir = "${exec_prefix}/libexec/${CROSS_TARGET_SYS_DIR}"
71
72do_populate_sysroot[sstate-inputdirs] = "${SYSROOT_DESTDIR}/${STAGING_DIR_NATIVE}/"
73do_packagedata[stamp-extra-info] = ""
74
75do_install () {
76 oe_runmake 'DESTDIR=${D}' install
77}
78
79USE_NLS = "no"
80
81export CC = "${BUILD_CC}"
82export CXX = "${BUILD_CXX}"
83export FC = "${BUILD_FC}"
84export CPP = "${BUILD_CPP}"
85export LD = "${BUILD_LD}"
86export CCLD = "${BUILD_CCLD}"
87export AR = "${BUILD_AR}"
88export AS = "${BUILD_AS}"
89export RANLIB = "${BUILD_RANLIB}"
90export STRIP = "${BUILD_STRIP}"
91export NM = "${BUILD_NM}"
92
93inherit nopackages
94
95python do_addto_recipe_sysroot () {
96 bb.build.exec_func("extend_recipe_sysroot", d)
97}
98addtask addto_recipe_sysroot after do_populate_sysroot
99do_addto_recipe_sysroot[deptask] = "do_populate_sysroot"
diff --git a/meta/classes/crosssdk.bbclass b/meta/classes/crosssdk.bbclass
deleted file mode 100644
index 04aecb694e..0000000000
--- a/meta/classes/crosssdk.bbclass
+++ /dev/null
@@ -1,51 +0,0 @@
1inherit cross
2
3CLASSOVERRIDE = "class-crosssdk"
4NATIVESDKLIBC ?= "libc-glibc"
5LIBCOVERRIDE = ":${NATIVESDKLIBC}"
6MACHINEOVERRIDES = ""
7PACKAGE_ARCH = "${SDK_ARCH}"
8
9python () {
10 # set TUNE_PKGARCH to SDK_ARCH
11 d.setVar('TUNE_PKGARCH', d.getVar('SDK_ARCH'))
12 # Set features here to prevent appends and distro features backfill
13 # from modifying nativesdk distro features
14 features = set(d.getVar("DISTRO_FEATURES_NATIVESDK").split())
15 filtered = set(bb.utils.filter("DISTRO_FEATURES", d.getVar("DISTRO_FEATURES_FILTER_NATIVESDK"), d).split())
16 d.setVar("DISTRO_FEATURES", " ".join(sorted(features | filtered)))
17}
18
19STAGING_BINDIR_TOOLCHAIN = "${STAGING_DIR_NATIVE}${bindir_native}/${TARGET_ARCH}${TARGET_VENDOR}-${TARGET_OS}"
20
21# This class encodes staging paths into its scripts data so can only be
22# reused if we manipulate the paths.
23SSTATE_SCAN_CMD ?= "${SSTATE_SCAN_CMD_NATIVE}"
24
25TARGET_ARCH = "${SDK_ARCH}"
26TARGET_VENDOR = "${SDK_VENDOR}"
27TARGET_OS = "${SDK_OS}"
28TARGET_PREFIX = "${SDK_PREFIX}"
29TARGET_CC_ARCH = "${SDK_CC_ARCH}"
30TARGET_LD_ARCH = "${SDK_LD_ARCH}"
31TARGET_AS_ARCH = "${SDK_AS_ARCH}"
32TARGET_CPPFLAGS = ""
33TARGET_CFLAGS = ""
34TARGET_CXXFLAGS = ""
35TARGET_LDFLAGS = ""
36TARGET_FPU = ""
37
38
39target_libdir = "${SDKPATHNATIVE}${libdir_nativesdk}"
40target_includedir = "${SDKPATHNATIVE}${includedir_nativesdk}"
41target_base_libdir = "${SDKPATHNATIVE}${base_libdir_nativesdk}"
42target_prefix = "${SDKPATHNATIVE}${prefix_nativesdk}"
43target_exec_prefix = "${SDKPATHNATIVE}${prefix_nativesdk}"
44baselib = "lib"
45
46do_packagedata[stamp-extra-info] = ""
47
48# Need to force this to ensure consitency across architectures
49EXTRA_OECONF_GCC_FLOAT = ""
50
51USE_NLS = "no"
diff --git a/meta/classes/cve-check.bbclass b/meta/classes/cve-check.bbclass
index 112ee3379d..56ba8bceef 100644
--- a/meta/classes/cve-check.bbclass
+++ b/meta/classes/cve-check.bbclass
@@ -1,3 +1,9 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
1# This class is used to check recipes against public CVEs. 7# This class is used to check recipes against public CVEs.
2# 8#
3# In order to use this class just inherit the class in the 9# In order to use this class just inherit the class in the
@@ -20,13 +26,13 @@
20# the only method to check against CVEs. Running this tool 26# the only method to check against CVEs. Running this tool
21# doesn't guarantee your packages are free of CVEs. 27# doesn't guarantee your packages are free of CVEs.
22 28
23# The product name that the CVE database uses. Defaults to BPN, but may need to 29# The product name that the CVE database uses defaults to BPN, but may need to
24# be overriden per recipe (for example tiff.bb sets CVE_PRODUCT=libtiff). 30# be overriden per recipe (for example tiff.bb sets CVE_PRODUCT=libtiff).
25CVE_PRODUCT ??= "${BPN}" 31CVE_PRODUCT ??= "${BPN}"
26CVE_VERSION ??= "${PV}" 32CVE_VERSION ??= "${PV}"
27 33
28CVE_CHECK_DB_DIR ?= "${DL_DIR}/CVE_CHECK" 34CVE_CHECK_DB_DIR ?= "${DL_DIR}/CVE_CHECK"
29CVE_CHECK_DB_FILE ?= "${CVE_CHECK_DB_DIR}/nvdcve_1.1.db" 35CVE_CHECK_DB_FILE ?= "${CVE_CHECK_DB_DIR}/nvdcve_2-1.db"
30CVE_CHECK_DB_FILE_LOCK ?= "${CVE_CHECK_DB_FILE}.lock" 36CVE_CHECK_DB_FILE_LOCK ?= "${CVE_CHECK_DB_FILE}.lock"
31 37
32CVE_CHECK_LOG ?= "${T}/cve.log" 38CVE_CHECK_LOG ?= "${T}/cve.log"
@@ -34,38 +40,115 @@ CVE_CHECK_TMP_FILE ?= "${TMPDIR}/cve_check"
34CVE_CHECK_SUMMARY_DIR ?= "${LOG_DIR}/cve" 40CVE_CHECK_SUMMARY_DIR ?= "${LOG_DIR}/cve"
35CVE_CHECK_SUMMARY_FILE_NAME ?= "cve-summary" 41CVE_CHECK_SUMMARY_FILE_NAME ?= "cve-summary"
36CVE_CHECK_SUMMARY_FILE ?= "${CVE_CHECK_SUMMARY_DIR}/${CVE_CHECK_SUMMARY_FILE_NAME}" 42CVE_CHECK_SUMMARY_FILE ?= "${CVE_CHECK_SUMMARY_DIR}/${CVE_CHECK_SUMMARY_FILE_NAME}"
43CVE_CHECK_SUMMARY_FILE_NAME_JSON = "cve-summary.json"
44CVE_CHECK_SUMMARY_INDEX_PATH = "${CVE_CHECK_SUMMARY_DIR}/cve-summary-index.txt"
45
46CVE_CHECK_LOG_JSON ?= "${T}/cve.json"
37 47
38CVE_CHECK_DIR ??= "${DEPLOY_DIR}/cve" 48CVE_CHECK_DIR ??= "${DEPLOY_DIR}/cve"
39CVE_CHECK_RECIPE_FILE ?= "${CVE_CHECK_DIR}/${PN}" 49CVE_CHECK_RECIPE_FILE ?= "${CVE_CHECK_DIR}/${PN}"
40CVE_CHECK_MANIFEST ?= "${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.cve" 50CVE_CHECK_RECIPE_FILE_JSON ?= "${CVE_CHECK_DIR}/${PN}_cve.json"
51CVE_CHECK_MANIFEST ?= "${IMGDEPLOYDIR}/${IMAGE_NAME}.cve"
52CVE_CHECK_MANIFEST_JSON ?= "${IMGDEPLOYDIR}/${IMAGE_NAME}.json"
41CVE_CHECK_COPY_FILES ??= "1" 53CVE_CHECK_COPY_FILES ??= "1"
42CVE_CHECK_CREATE_MANIFEST ??= "1" 54CVE_CHECK_CREATE_MANIFEST ??= "1"
43 55
56# Report Patched or Ignored CVEs
44CVE_CHECK_REPORT_PATCHED ??= "1" 57CVE_CHECK_REPORT_PATCHED ??= "1"
45 58
46# Whitelist for packages (PN) 59CVE_CHECK_SHOW_WARNINGS ??= "1"
47CVE_CHECK_PN_WHITELIST ?= "" 60
61# Provide text output
62CVE_CHECK_FORMAT_TEXT ??= "1"
63
64# Provide JSON output
65CVE_CHECK_FORMAT_JSON ??= "1"
48 66
49# Whitelist for CVE. If a CVE is found, then it is considered patched. 67# Check for packages without CVEs (no issues or missing product name)
50# The value is a string containing space separated CVE values: 68CVE_CHECK_COVERAGE ??= "1"
69
70# Skip CVE Check for packages (PN)
71CVE_CHECK_SKIP_RECIPE ?= ""
72
73# Replace NVD DB check status for a given CVE. Each of CVE has to be mentioned
74# separately with optional detail and description for this status.
75#
76# CVE_STATUS[CVE-1234-0001] = "not-applicable-platform: Issue only applies on Windows"
77# CVE_STATUS[CVE-1234-0002] = "fixed-version: Fixed externally"
78#
79# Settings the same status and reason for multiple CVEs is possible
80# via CVE_STATUS_GROUPS variable.
81#
82# CVE_STATUS_GROUPS = "CVE_STATUS_WIN CVE_STATUS_PATCHED"
83#
84# CVE_STATUS_WIN = "CVE-1234-0001 CVE-1234-0003"
85# CVE_STATUS_WIN[status] = "not-applicable-platform: Issue only applies on Windows"
86# CVE_STATUS_PATCHED = "CVE-1234-0002 CVE-1234-0004"
87# CVE_STATUS_PATCHED[status] = "fixed-version: Fixed externally"
51# 88#
52# CVE_CHECK_WHITELIST = 'CVE-2014-2524 CVE-2018-1234' 89# All possible CVE statuses could be found in cve-check-map.conf
90# CVE_CHECK_STATUSMAP[not-applicable-platform] = "Ignored"
91# CVE_CHECK_STATUSMAP[fixed-version] = "Patched"
53# 92#
54CVE_CHECK_WHITELIST ?= "" 93# CVE_CHECK_IGNORE is deprecated and CVE_STATUS has to be used instead.
94# Keep CVE_CHECK_IGNORE until other layers migrate to new variables
95CVE_CHECK_IGNORE ?= ""
55 96
56# Layers to be excluded 97# Layers to be excluded
57CVE_CHECK_LAYER_EXCLUDELIST ??= "" 98CVE_CHECK_LAYER_EXCLUDELIST ??= ""
58 99
59# Layers to be included 100# Layers to be included
60CVE_CHECK_LAYER_INCLUDELIST ??= "" 101CVE_CHECK_LAYER_INCLUDELIST ??= ""
61 102
62 103
63# set to "alphabetical" for version using single alphabetical character as increament release 104# set to "alphabetical" for version using single alphabetical character as increment release
64CVE_VERSION_SUFFIX ??= "" 105CVE_VERSION_SUFFIX ??= ""
65 106
107python () {
108 # Fallback all CVEs from CVE_CHECK_IGNORE to CVE_STATUS
109 cve_check_ignore = d.getVar("CVE_CHECK_IGNORE")
110 if cve_check_ignore:
111 bb.warn("CVE_CHECK_IGNORE is deprecated in favor of CVE_STATUS")
112 for cve in (d.getVar("CVE_CHECK_IGNORE") or "").split():
113 d.setVarFlag("CVE_STATUS", cve, "ignored")
114
115 # Process CVE_STATUS_GROUPS to set multiple statuses and optional detail or description at once
116 for cve_status_group in (d.getVar("CVE_STATUS_GROUPS") or "").split():
117 cve_group = d.getVar(cve_status_group)
118 if cve_group is not None:
119 for cve in cve_group.split():
120 d.setVarFlag("CVE_STATUS", cve, d.getVarFlag(cve_status_group, "status"))
121 else:
122 bb.warn("CVE_STATUS_GROUPS contains undefined variable %s" % cve_status_group)
123}
124
125def generate_json_report(d, out_path, link_path):
126 if os.path.exists(d.getVar("CVE_CHECK_SUMMARY_INDEX_PATH")):
127 import json
128 from oe.cve_check import cve_check_merge_jsons, update_symlinks
129
130 bb.note("Generating JSON CVE summary")
131 index_file = d.getVar("CVE_CHECK_SUMMARY_INDEX_PATH")
132 summary = {"version":"1", "package": []}
133 with open(index_file) as f:
134 filename = f.readline()
135 while filename:
136 with open(filename.rstrip()) as j:
137 data = json.load(j)
138 cve_check_merge_jsons(summary, data)
139 filename = f.readline()
140
141 summary["package"].sort(key=lambda d: d['name'])
142
143 with open(out_path, "w") as f:
144 json.dump(summary, f, indent=2)
145
146 update_symlinks(out_path, link_path)
147
66python cve_save_summary_handler () { 148python cve_save_summary_handler () {
67 import shutil 149 import shutil
68 import datetime 150 import datetime
151 from oe.cve_check import update_symlinks
69 152
70 cve_tmp_file = d.getVar("CVE_CHECK_TMP_FILE") 153 cve_tmp_file = d.getVar("CVE_CHECK_TMP_FILE")
71 154
@@ -78,13 +161,15 @@ python cve_save_summary_handler () {
78 161
79 if os.path.exists(cve_tmp_file): 162 if os.path.exists(cve_tmp_file):
80 shutil.copyfile(cve_tmp_file, cve_summary_file) 163 shutil.copyfile(cve_tmp_file, cve_summary_file)
81 164 cvefile_link = os.path.join(cvelogpath, cve_summary_name)
82 if cve_summary_file and os.path.exists(cve_summary_file): 165 update_symlinks(cve_summary_file, cvefile_link)
83 cvefile_link = os.path.join(cvelogpath, cve_summary_name) 166 bb.plain("Complete CVE report summary created at: %s" % cvefile_link)
84 167
85 if os.path.exists(os.path.realpath(cvefile_link)): 168 if d.getVar("CVE_CHECK_FORMAT_JSON") == "1":
86 os.remove(cvefile_link) 169 json_summary_link_name = os.path.join(cvelogpath, d.getVar("CVE_CHECK_SUMMARY_FILE_NAME_JSON"))
87 os.symlink(os.path.basename(cve_summary_file), cvefile_link) 170 json_summary_name = os.path.join(cvelogpath, "%s-%s.json" % (cve_summary_name, timestamp))
171 generate_json_report(d, json_summary_name, json_summary_link_name)
172 bb.plain("Complete CVE JSON report summary created at: %s" % json_summary_link_name)
88} 173}
89 174
90addhandler cve_save_summary_handler 175addhandler cve_save_summary_handler
@@ -94,23 +179,25 @@ python do_cve_check () {
94 """ 179 """
95 Check recipe for patched and unpatched CVEs 180 Check recipe for patched and unpatched CVEs
96 """ 181 """
182 from oe.cve_check import get_patched_cves
97 183
98 if os.path.exists(d.getVar("CVE_CHECK_DB_FILE")): 184 with bb.utils.fileslocked([d.getVar("CVE_CHECK_DB_FILE_LOCK")], shared=True):
99 try: 185 if os.path.exists(d.getVar("CVE_CHECK_DB_FILE")):
100 patched_cves = get_patches_cves(d) 186 try:
101 except FileNotFoundError: 187 patched_cves = get_patched_cves(d)
102 bb.fatal("Failure in searching patches") 188 except FileNotFoundError:
103 whitelisted, patched, unpatched = check_cves(d, patched_cves) 189 bb.fatal("Failure in searching patches")
104 if patched or unpatched: 190 ignored, patched, unpatched, status = check_cves(d, patched_cves)
105 cve_data = get_cve_info(d, patched + unpatched) 191 if patched or unpatched or (d.getVar("CVE_CHECK_COVERAGE") == "1" and status):
106 cve_write_data(d, patched, unpatched, whitelisted, cve_data) 192 cve_data = get_cve_info(d, patched + unpatched + ignored)
107 else: 193 cve_write_data(d, patched, unpatched, ignored, cve_data, status)
108 bb.note("No CVE database found, skipping CVE check") 194 else:
195 bb.note("No CVE database found, skipping CVE check")
109 196
110} 197}
111 198
112addtask cve_check before do_build after do_fetch 199addtask cve_check before do_build
113do_cve_check[depends] = "cve-update-db-native:do_fetch" 200do_cve_check[depends] = "cve-update-nvd2-native:do_fetch"
114do_cve_check[nostamp] = "1" 201do_cve_check[nostamp] = "1"
115 202
116python cve_check_cleanup () { 203python cve_check_cleanup () {
@@ -118,10 +205,11 @@ python cve_check_cleanup () {
118 Delete the file used to gather all the CVE information. 205 Delete the file used to gather all the CVE information.
119 """ 206 """
120 bb.utils.remove(e.data.getVar("CVE_CHECK_TMP_FILE")) 207 bb.utils.remove(e.data.getVar("CVE_CHECK_TMP_FILE"))
208 bb.utils.remove(e.data.getVar("CVE_CHECK_SUMMARY_INDEX_PATH"))
121} 209}
122 210
123addhandler cve_check_cleanup 211addhandler cve_check_cleanup
124cve_check_cleanup[eventmask] = "bb.cooker.CookerExit" 212cve_check_cleanup[eventmask] = "bb.event.BuildCompleted"
125 213
126python cve_check_write_rootfs_manifest () { 214python cve_check_write_rootfs_manifest () {
127 """ 215 """
@@ -129,116 +217,113 @@ python cve_check_write_rootfs_manifest () {
129 """ 217 """
130 218
131 import shutil 219 import shutil
220 import json
221 from oe.rootfs import image_list_installed_packages
222 from oe.cve_check import cve_check_merge_jsons, update_symlinks
132 223
133 if d.getVar("CVE_CHECK_COPY_FILES") == "1": 224 if d.getVar("CVE_CHECK_COPY_FILES") == "1":
134 deploy_file = d.getVar("CVE_CHECK_RECIPE_FILE") 225 deploy_file = d.getVar("CVE_CHECK_RECIPE_FILE")
135 if os.path.exists(deploy_file): 226 if os.path.exists(deploy_file):
136 bb.utils.remove(deploy_file) 227 bb.utils.remove(deploy_file)
137 228 deploy_file_json = d.getVar("CVE_CHECK_RECIPE_FILE_JSON")
138 if os.path.exists(d.getVar("CVE_CHECK_TMP_FILE")): 229 if os.path.exists(deploy_file_json):
139 bb.note("Writing rootfs CVE manifest") 230 bb.utils.remove(deploy_file_json)
140 deploy_dir = d.getVar("DEPLOY_DIR_IMAGE") 231
141 link_name = d.getVar("IMAGE_LINK_NAME") 232 # Create a list of relevant recipies
233 recipies = set()
234 for pkg in list(image_list_installed_packages(d)):
235 pkg_info = os.path.join(d.getVar('PKGDATA_DIR'),
236 'runtime-reverse', pkg)
237 pkg_data = oe.packagedata.read_pkgdatafile(pkg_info)
238 recipies.add(pkg_data["PN"])
239
240 bb.note("Writing rootfs CVE manifest")
241 deploy_dir = d.getVar("IMGDEPLOYDIR")
242 link_name = d.getVar("IMAGE_LINK_NAME")
243
244 json_data = {"version":"1", "package": []}
245 text_data = ""
246 enable_json = d.getVar("CVE_CHECK_FORMAT_JSON") == "1"
247 enable_text = d.getVar("CVE_CHECK_FORMAT_TEXT") == "1"
248
249 save_pn = d.getVar("PN")
250
251 for pkg in recipies:
252 # To be able to use the CVE_CHECK_RECIPE_FILE variable we have to evaluate
253 # it with the different PN names set each time.
254 d.setVar("PN", pkg)
255 if enable_text:
256 pkgfilepath = d.getVar("CVE_CHECK_RECIPE_FILE")
257 if os.path.exists(pkgfilepath):
258 with open(pkgfilepath) as pfile:
259 text_data += pfile.read()
260
261 if enable_json:
262 pkgfilepath = d.getVar("CVE_CHECK_RECIPE_FILE_JSON")
263 if os.path.exists(pkgfilepath):
264 with open(pkgfilepath) as j:
265 data = json.load(j)
266 cve_check_merge_jsons(json_data, data)
267
268 d.setVar("PN", save_pn)
269
270 if enable_text:
271 link_path = os.path.join(deploy_dir, "%s.cve" % link_name)
142 manifest_name = d.getVar("CVE_CHECK_MANIFEST") 272 manifest_name = d.getVar("CVE_CHECK_MANIFEST")
143 cve_tmp_file = d.getVar("CVE_CHECK_TMP_FILE")
144 273
145 shutil.copyfile(cve_tmp_file, manifest_name) 274 with open(manifest_name, "w") as f:
275 f.write(text_data)
146 276
147 if manifest_name and os.path.exists(manifest_name): 277 update_symlinks(manifest_name, link_path)
148 manifest_link = os.path.join(deploy_dir, "%s.cve" % link_name) 278 bb.plain("Image CVE report stored in: %s" % manifest_name)
149 # If we already have another manifest, update symlinks
150 if os.path.exists(os.path.realpath(manifest_link)):
151 os.remove(manifest_link)
152 os.symlink(os.path.basename(manifest_name), manifest_link)
153 bb.plain("Image CVE report stored in: %s" % manifest_name)
154}
155 279
156ROOTFS_POSTPROCESS_COMMAND_prepend = "${@'cve_check_write_rootfs_manifest; ' if d.getVar('CVE_CHECK_CREATE_MANIFEST') == '1' else ''}" 280 if enable_json:
157do_rootfs[recrdeptask] += "${@'do_cve_check' if d.getVar('CVE_CHECK_CREATE_MANIFEST') == '1' else ''}" 281 link_path = os.path.join(deploy_dir, "%s.json" % link_name)
282 manifest_name = d.getVar("CVE_CHECK_MANIFEST_JSON")
158 283
159def get_patches_cves(d): 284 with open(manifest_name, "w") as f:
160 """ 285 json.dump(json_data, f, indent=2)
161 Get patches that solve CVEs using the "CVE: " tag.
162 """
163 286
164 import re 287 update_symlinks(manifest_name, link_path)
165 288 bb.plain("Image CVE JSON report stored in: %s" % manifest_name)
166 pn = d.getVar("PN") 289}
167 cve_match = re.compile("CVE:( CVE\-\d{4}\-\d+)+")
168
169 # Matches last CVE-1234-211432 in the file name, also if written
170 # with small letters. Not supporting multiple CVE id's in a single
171 # file name.
172 cve_file_name_match = re.compile(".*([Cc][Vv][Ee]\-\d{4}\-\d+)")
173
174 patched_cves = set()
175 bb.debug(2, "Looking for patches that solves CVEs for %s" % pn)
176 for url in src_patches(d):
177 patch_file = bb.fetch.decodeurl(url)[2]
178
179 if not os.path.isfile(patch_file):
180 bb.error("File Not found: %s" % patch_file)
181 raise FileNotFoundError
182
183 # Check patch file name for CVE ID
184 fname_match = cve_file_name_match.search(patch_file)
185 if fname_match:
186 cve = fname_match.group(1).upper()
187 patched_cves.add(cve)
188 bb.debug(2, "Found CVE %s from patch file name %s" % (cve, patch_file))
189
190 with open(patch_file, "r", encoding="utf-8") as f:
191 try:
192 patch_text = f.read()
193 except UnicodeDecodeError:
194 bb.debug(1, "Failed to read patch %s using UTF-8 encoding"
195 " trying with iso8859-1" % patch_file)
196 f.close()
197 with open(patch_file, "r", encoding="iso8859-1") as f:
198 patch_text = f.read()
199
200 # Search for one or more "CVE: " lines
201 text_match = False
202 for match in cve_match.finditer(patch_text):
203 # Get only the CVEs without the "CVE: " tag
204 cves = patch_text[match.start()+5:match.end()]
205 for cve in cves.split():
206 bb.debug(2, "Patch %s solves %s" % (patch_file, cve))
207 patched_cves.add(cve)
208 text_match = True
209
210 if not fname_match and not text_match:
211 bb.debug(2, "Patch %s doesn't solve CVEs" % patch_file)
212 290
213 return patched_cves 291ROOTFS_POSTPROCESS_COMMAND:prepend = "${@'cve_check_write_rootfs_manifest ' if d.getVar('CVE_CHECK_CREATE_MANIFEST') == '1' else ''}"
292do_rootfs[recrdeptask] += "${@'do_cve_check' if d.getVar('CVE_CHECK_CREATE_MANIFEST') == '1' else ''}"
293do_populate_sdk[recrdeptask] += "${@'do_cve_check' if d.getVar('CVE_CHECK_CREATE_MANIFEST') == '1' else ''}"
214 294
215def check_cves(d, patched_cves): 295def check_cves(d, patched_cves):
216 """ 296 """
217 Connect to the NVD database and find unpatched cves. 297 Connect to the NVD database and find unpatched cves.
218 """ 298 """
219 from oe.cve_check import Version 299 from oe.cve_check import Version, convert_cve_version, decode_cve_status
220 300
221 pn = d.getVar("PN") 301 pn = d.getVar("PN")
222 real_pv = d.getVar("PV") 302 real_pv = d.getVar("PV")
223 suffix = d.getVar("CVE_VERSION_SUFFIX") 303 suffix = d.getVar("CVE_VERSION_SUFFIX")
224 304
225 cves_unpatched = [] 305 cves_unpatched = []
306 cves_ignored = []
307 cves_status = []
308 cves_in_recipe = False
226 # CVE_PRODUCT can contain more than one product (eg. curl/libcurl) 309 # CVE_PRODUCT can contain more than one product (eg. curl/libcurl)
227 products = d.getVar("CVE_PRODUCT").split() 310 products = d.getVar("CVE_PRODUCT").split()
228 # If this has been unset then we're not scanning for CVEs here (for example, image recipes) 311 # If this has been unset then we're not scanning for CVEs here (for example, image recipes)
229 if not products: 312 if not products:
230 return ([], [], []) 313 return ([], [], [], [])
231 pv = d.getVar("CVE_VERSION").split("+git")[0] 314 pv = d.getVar("CVE_VERSION").split("+git")[0]
232 315
233 # If the recipe has been whitlisted we return empty lists 316 # If the recipe has been skipped/ignored we return empty lists
234 if pn in d.getVar("CVE_CHECK_PN_WHITELIST").split(): 317 if pn in d.getVar("CVE_CHECK_SKIP_RECIPE").split():
235 bb.note("Recipe has been whitelisted, skipping check") 318 bb.note("Recipe has been skipped by cve-check")
236 return ([], [], []) 319 return ([], [], [], [])
237 320
238 old_cve_whitelist = d.getVar("CVE_CHECK_CVE_WHITELIST") 321 # Convert CVE_STATUS into ignored CVEs and check validity
239 if old_cve_whitelist: 322 cve_ignore = []
240 bb.warn("CVE_CHECK_CVE_WHITELIST is deprecated, please use CVE_CHECK_WHITELIST.") 323 for cve in (d.getVarFlags("CVE_STATUS") or {}):
241 cve_whitelist = d.getVar("CVE_CHECK_WHITELIST").split() 324 decoded_status, _, _ = decode_cve_status(d, cve)
325 if decoded_status == "Ignored":
326 cve_ignore.append(cve)
242 327
243 import sqlite3 328 import sqlite3
244 db_file = d.expand("file:${CVE_CHECK_DB_FILE}?mode=ro") 329 db_file = d.expand("file:${CVE_CHECK_DB_FILE}?mode=ro")
@@ -246,28 +331,42 @@ def check_cves(d, patched_cves):
246 331
247 # For each of the known product names (e.g. curl has CPEs using curl and libcurl)... 332 # For each of the known product names (e.g. curl has CPEs using curl and libcurl)...
248 for product in products: 333 for product in products:
334 cves_in_product = False
249 if ":" in product: 335 if ":" in product:
250 vendor, product = product.split(":", 1) 336 vendor, product = product.split(":", 1)
251 else: 337 else:
252 vendor = "%" 338 vendor = "%"
253 339
254 # Find all relevant CVE IDs. 340 # Find all relevant CVE IDs.
255 for cverow in conn.execute("SELECT DISTINCT ID FROM PRODUCTS WHERE PRODUCT IS ? AND VENDOR LIKE ?", (product, vendor)): 341 cve_cursor = conn.execute("SELECT DISTINCT ID FROM PRODUCTS WHERE PRODUCT IS ? AND VENDOR LIKE ?", (product, vendor))
342 for cverow in cve_cursor:
256 cve = cverow[0] 343 cve = cverow[0]
257 344
258 if cve in cve_whitelist: 345 if cve in cve_ignore:
259 bb.note("%s-%s has been whitelisted for %s" % (product, pv, cve)) 346 bb.note("%s-%s ignores %s" % (product, pv, cve))
260 # TODO: this should be in the report as 'whitelisted' 347 cves_ignored.append(cve)
261 patched_cves.add(cve)
262 continue 348 continue
263 elif cve in patched_cves: 349 elif cve in patched_cves:
264 bb.note("%s has been patched" % (cve)) 350 bb.note("%s has been patched" % (cve))
265 continue 351 continue
352 # Write status once only for each product
353 if not cves_in_product:
354 cves_status.append([product, True])
355 cves_in_product = True
356 cves_in_recipe = True
266 357
267 vulnerable = False 358 vulnerable = False
268 for row in conn.execute("SELECT * FROM PRODUCTS WHERE ID IS ? AND PRODUCT IS ? AND VENDOR LIKE ?", (cve, product, vendor)): 359 ignored = False
360
361 product_cursor = conn.execute("SELECT * FROM PRODUCTS WHERE ID IS ? AND PRODUCT IS ? AND VENDOR LIKE ?", (cve, product, vendor))
362 for row in product_cursor:
269 (_, _, _, version_start, operator_start, version_end, operator_end) = row 363 (_, _, _, version_start, operator_start, version_end, operator_end) = row
270 #bb.debug(2, "Evaluating row " + str(row)) 364 #bb.debug(2, "Evaluating row " + str(row))
365 if cve in cve_ignore:
366 ignored = True
367
368 version_start = convert_cve_version(version_start)
369 version_end = convert_cve_version(version_end)
271 370
272 if (operator_start == '=' and pv == version_start) or version_start == '-': 371 if (operator_start == '=' and pv == version_start) or version_start == '-':
273 vulnerable = True 372 vulnerable = True
@@ -300,18 +399,33 @@ def check_cves(d, patched_cves):
300 vulnerable = vulnerable_start or vulnerable_end 399 vulnerable = vulnerable_start or vulnerable_end
301 400
302 if vulnerable: 401 if vulnerable:
303 bb.note("%s-%s is vulnerable to %s" % (pn, real_pv, cve)) 402 if ignored:
304 cves_unpatched.append(cve) 403 bb.note("%s is ignored in %s-%s" % (cve, pn, real_pv))
404 cves_ignored.append(cve)
405 else:
406 bb.note("%s-%s is vulnerable to %s" % (pn, real_pv, cve))
407 cves_unpatched.append(cve)
305 break 408 break
409 product_cursor.close()
306 410
307 if not vulnerable: 411 if not vulnerable:
308 bb.note("%s-%s is not vulnerable to %s" % (pn, real_pv, cve)) 412 bb.note("%s-%s is not vulnerable to %s" % (pn, real_pv, cve))
309 # TODO: not patched but not vulnerable
310 patched_cves.add(cve) 413 patched_cves.add(cve)
414 cve_cursor.close()
415
416 if not cves_in_product:
417 bb.note("No CVE records found for product %s, pn %s" % (product, pn))
418 cves_status.append([product, False])
311 419
312 conn.close() 420 conn.close()
421 diff_ignore = list(set(cve_ignore) - set(cves_ignored))
422 if diff_ignore:
423 oe.qa.handle_error("cve_status_not_in_db", "Found CVE (%s) with CVE_STATUS set that are not found in database for this component" % " ".join(diff_ignore), d)
313 424
314 return (list(cve_whitelist), list(patched_cves), cves_unpatched) 425 if not cves_in_recipe:
426 bb.note("No CVE records for products in recipe %s" % (pn))
427
428 return (list(cves_ignored), list(patched_cves), cves_unpatched, cves_status)
315 429
316def get_cve_info(d, cves): 430def get_cve_info(d, cves):
317 """ 431 """
@@ -321,26 +435,30 @@ def get_cve_info(d, cves):
321 import sqlite3 435 import sqlite3
322 436
323 cve_data = {} 437 cve_data = {}
324 conn = sqlite3.connect(d.getVar("CVE_CHECK_DB_FILE")) 438 db_file = d.expand("file:${CVE_CHECK_DB_FILE}?mode=ro")
439 conn = sqlite3.connect(db_file, uri=True)
325 440
326 for cve in cves: 441 for cve in cves:
327 for row in conn.execute("SELECT * FROM NVD WHERE ID IS ?", (cve,)): 442 cursor = conn.execute("SELECT * FROM NVD WHERE ID IS ?", (cve,))
443 for row in cursor:
328 cve_data[row[0]] = {} 444 cve_data[row[0]] = {}
329 cve_data[row[0]]["summary"] = row[1] 445 cve_data[row[0]]["summary"] = row[1]
330 cve_data[row[0]]["scorev2"] = row[2] 446 cve_data[row[0]]["scorev2"] = row[2]
331 cve_data[row[0]]["scorev3"] = row[3] 447 cve_data[row[0]]["scorev3"] = row[3]
332 cve_data[row[0]]["modified"] = row[4] 448 cve_data[row[0]]["modified"] = row[4]
333 cve_data[row[0]]["vector"] = row[5] 449 cve_data[row[0]]["vector"] = row[5]
334 450 cve_data[row[0]]["vectorString"] = row[6]
451 cursor.close()
335 conn.close() 452 conn.close()
336 return cve_data 453 return cve_data
337 454
338def cve_write_data(d, patched, unpatched, whitelisted, cve_data): 455def cve_write_data_text(d, patched, unpatched, ignored, cve_data):
339 """ 456 """
340 Write CVE information in WORKDIR; and to CVE_CHECK_DIR, and 457 Write CVE information in WORKDIR; and to CVE_CHECK_DIR, and
341 CVE manifest if enabled. 458 CVE manifest if enabled.
342 """ 459 """
343 460
461 from oe.cve_check import decode_cve_status
344 462
345 cve_file = d.getVar("CVE_CHECK_LOG") 463 cve_file = d.getVar("CVE_CHECK_LOG")
346 fdir_name = d.getVar("FILE_DIRNAME") 464 fdir_name = d.getVar("FILE_DIRNAME")
@@ -349,55 +467,195 @@ def cve_write_data(d, patched, unpatched, whitelisted, cve_data):
349 include_layers = d.getVar("CVE_CHECK_LAYER_INCLUDELIST").split() 467 include_layers = d.getVar("CVE_CHECK_LAYER_INCLUDELIST").split()
350 exclude_layers = d.getVar("CVE_CHECK_LAYER_EXCLUDELIST").split() 468 exclude_layers = d.getVar("CVE_CHECK_LAYER_EXCLUDELIST").split()
351 469
470 report_all = d.getVar("CVE_CHECK_REPORT_PATCHED") == "1"
471
352 if exclude_layers and layer in exclude_layers: 472 if exclude_layers and layer in exclude_layers:
353 return 473 return
354 474
355 if include_layers and layer not in include_layers: 475 if include_layers and layer not in include_layers:
356 return 476 return
357 477
358 nvd_link = "https://web.nvd.nist.gov/view/vuln/detail?vulnId=" 478 # Early exit, the text format does not report packages without CVEs
479 if not patched+unpatched+ignored:
480 return
481
482 nvd_link = "https://nvd.nist.gov/vuln/detail/"
359 write_string = "" 483 write_string = ""
360 unpatched_cves = [] 484 unpatched_cves = []
361 bb.utils.mkdirhier(os.path.dirname(cve_file)) 485 bb.utils.mkdirhier(os.path.dirname(cve_file))
362 486
363 for cve in sorted(cve_data): 487 for cve in sorted(cve_data):
364 is_patched = cve in patched 488 is_patched = cve in patched
365 if is_patched and (d.getVar("CVE_CHECK_REPORT_PATCHED") != "1"): 489 is_ignored = cve in ignored
490
491 status = "Unpatched"
492 if (is_patched or is_ignored) and not report_all:
366 continue 493 continue
494 if is_ignored:
495 status = "Ignored"
496 elif is_patched:
497 status = "Patched"
498 else:
499 # default value of status is Unpatched
500 unpatched_cves.append(cve)
501
367 write_string += "LAYER: %s\n" % layer 502 write_string += "LAYER: %s\n" % layer
368 write_string += "PACKAGE NAME: %s\n" % d.getVar("PN") 503 write_string += "PACKAGE NAME: %s\n" % d.getVar("PN")
369 write_string += "PACKAGE VERSION: %s%s\n" % (d.getVar("EXTENDPE"), d.getVar("PV")) 504 write_string += "PACKAGE VERSION: %s%s\n" % (d.getVar("EXTENDPE"), d.getVar("PV"))
370 write_string += "CVE: %s\n" % cve 505 write_string += "CVE: %s\n" % cve
371 if cve in whitelisted: 506 write_string += "CVE STATUS: %s\n" % status
372 write_string += "CVE STATUS: Whitelisted\n" 507 _, detail, description = decode_cve_status(d, cve)
373 elif is_patched: 508 if detail:
374 write_string += "CVE STATUS: Patched\n" 509 write_string += "CVE DETAIL: %s\n" % detail
375 else: 510 if description:
376 unpatched_cves.append(cve) 511 write_string += "CVE DESCRIPTION: %s\n" % description
377 write_string += "CVE STATUS: Unpatched\n"
378 write_string += "CVE SUMMARY: %s\n" % cve_data[cve]["summary"] 512 write_string += "CVE SUMMARY: %s\n" % cve_data[cve]["summary"]
379 write_string += "CVSS v2 BASE SCORE: %s\n" % cve_data[cve]["scorev2"] 513 write_string += "CVSS v2 BASE SCORE: %s\n" % cve_data[cve]["scorev2"]
380 write_string += "CVSS v3 BASE SCORE: %s\n" % cve_data[cve]["scorev3"] 514 write_string += "CVSS v3 BASE SCORE: %s\n" % cve_data[cve]["scorev3"]
381 write_string += "VECTOR: %s\n" % cve_data[cve]["vector"] 515 write_string += "VECTOR: %s\n" % cve_data[cve]["vector"]
516 write_string += "VECTORSTRING: %s\n" % cve_data[cve]["vectorString"]
382 write_string += "MORE INFORMATION: %s%s\n\n" % (nvd_link, cve) 517 write_string += "MORE INFORMATION: %s%s\n\n" % (nvd_link, cve)
383 518
384 if unpatched_cves: 519 if unpatched_cves and d.getVar("CVE_CHECK_SHOW_WARNINGS") == "1":
385 bb.warn("Found unpatched CVE (%s), for more information check %s" % (" ".join(unpatched_cves),cve_file)) 520 bb.warn("Found unpatched CVE (%s), for more information check %s" % (" ".join(unpatched_cves),cve_file))
386 521
387 if write_string: 522 with open(cve_file, "w") as f:
388 with open(cve_file, "w") as f: 523 bb.note("Writing file %s with CVE information" % cve_file)
389 bb.note("Writing file %s with CVE information" % cve_file) 524 f.write(write_string)
525
526 if d.getVar("CVE_CHECK_COPY_FILES") == "1":
527 deploy_file = d.getVar("CVE_CHECK_RECIPE_FILE")
528 bb.utils.mkdirhier(os.path.dirname(deploy_file))
529 with open(deploy_file, "w") as f:
390 f.write(write_string) 530 f.write(write_string)
391 531
392 if d.getVar("CVE_CHECK_COPY_FILES") == "1": 532 if d.getVar("CVE_CHECK_CREATE_MANIFEST") == "1":
393 deploy_file = d.getVar("CVE_CHECK_RECIPE_FILE") 533 cvelogpath = d.getVar("CVE_CHECK_SUMMARY_DIR")
394 bb.utils.mkdirhier(os.path.dirname(deploy_file)) 534 bb.utils.mkdirhier(cvelogpath)
395 with open(deploy_file, "w") as f: 535
396 f.write(write_string) 536 with open(d.getVar("CVE_CHECK_TMP_FILE"), "a") as f:
537 f.write("%s" % write_string)
538
539def cve_check_write_json_output(d, output, direct_file, deploy_file, manifest_file):
540 """
541 Write CVE information in the JSON format: to WORKDIR; and to
542 CVE_CHECK_DIR, if CVE manifest if enabled, write fragment
543 files that will be assembled at the end in cve_check_write_rootfs_manifest.
544 """
545
546 import json
547
548 write_string = json.dumps(output, indent=2)
549 with open(direct_file, "w") as f:
550 bb.note("Writing file %s with CVE information" % direct_file)
551 f.write(write_string)
552
553 if d.getVar("CVE_CHECK_COPY_FILES") == "1":
554 bb.utils.mkdirhier(os.path.dirname(deploy_file))
555 with open(deploy_file, "w") as f:
556 f.write(write_string)
557
558 if d.getVar("CVE_CHECK_CREATE_MANIFEST") == "1":
559 cvelogpath = d.getVar("CVE_CHECK_SUMMARY_DIR")
560 index_path = d.getVar("CVE_CHECK_SUMMARY_INDEX_PATH")
561 bb.utils.mkdirhier(cvelogpath)
562 fragment_file = os.path.basename(deploy_file)
563 fragment_path = os.path.join(cvelogpath, fragment_file)
564 with open(fragment_path, "w") as f:
565 f.write(write_string)
566 with open(index_path, "a+") as f:
567 f.write("%s\n" % fragment_path)
568
569def cve_write_data_json(d, patched, unpatched, ignored, cve_data, cve_status):
570 """
571 Prepare CVE data for the JSON format, then write it.
572 """
573
574 from oe.cve_check import decode_cve_status
575
576 output = {"version":"1", "package": []}
577 nvd_link = "https://nvd.nist.gov/vuln/detail/"
397 578
398 if d.getVar("CVE_CHECK_CREATE_MANIFEST") == "1": 579 fdir_name = d.getVar("FILE_DIRNAME")
399 cvelogpath = d.getVar("CVE_CHECK_SUMMARY_DIR") 580 layer = fdir_name.split("/")[-3]
400 bb.utils.mkdirhier(cvelogpath) 581
582 include_layers = d.getVar("CVE_CHECK_LAYER_INCLUDELIST").split()
583 exclude_layers = d.getVar("CVE_CHECK_LAYER_EXCLUDELIST").split()
584
585 report_all = d.getVar("CVE_CHECK_REPORT_PATCHED") == "1"
586
587 if exclude_layers and layer in exclude_layers:
588 return
589
590 if include_layers and layer not in include_layers:
591 return
592
593 unpatched_cves = []
594
595 product_data = []
596 for s in cve_status:
597 p = {"product": s[0], "cvesInRecord": "Yes"}
598 if s[1] == False:
599 p["cvesInRecord"] = "No"
600 product_data.append(p)
601
602 package_version = "%s%s" % (d.getVar("EXTENDPE"), d.getVar("PV"))
603 package_data = {
604 "name" : d.getVar("PN"),
605 "layer" : layer,
606 "version" : package_version,
607 "products": product_data
608 }
609 cve_list = []
610
611 for cve in sorted(cve_data):
612 is_patched = cve in patched
613 is_ignored = cve in ignored
614 status = "Unpatched"
615 if (is_patched or is_ignored) and not report_all:
616 continue
617 if is_ignored:
618 status = "Ignored"
619 elif is_patched:
620 status = "Patched"
621 else:
622 # default value of status is Unpatched
623 unpatched_cves.append(cve)
624
625 issue_link = "%s%s" % (nvd_link, cve)
626
627 cve_item = {
628 "id" : cve,
629 "summary" : cve_data[cve]["summary"],
630 "scorev2" : cve_data[cve]["scorev2"],
631 "scorev3" : cve_data[cve]["scorev3"],
632 "vector" : cve_data[cve]["vector"],
633 "vectorString" : cve_data[cve]["vectorString"],
634 "status" : status,
635 "link": issue_link
636 }
637 _, detail, description = decode_cve_status(d, cve)
638 if detail:
639 cve_item["detail"] = detail
640 if description:
641 cve_item["description"] = description
642 cve_list.append(cve_item)
643
644 package_data["issue"] = cve_list
645 output["package"].append(package_data)
646
647 direct_file = d.getVar("CVE_CHECK_LOG_JSON")
648 deploy_file = d.getVar("CVE_CHECK_RECIPE_FILE_JSON")
649 manifest_file = d.getVar("CVE_CHECK_SUMMARY_FILE_NAME_JSON")
650
651 cve_check_write_json_output(d, output, direct_file, deploy_file, manifest_file)
652
653def cve_write_data(d, patched, unpatched, ignored, cve_data, status):
654 """
655 Write CVE data in each enabled format.
656 """
401 657
402 with open(d.getVar("CVE_CHECK_TMP_FILE"), "a") as f: 658 if d.getVar("CVE_CHECK_FORMAT_TEXT") == "1":
403 f.write("%s" % write_string) 659 cve_write_data_text(d, patched, unpatched, ignored, cve_data)
660 if d.getVar("CVE_CHECK_FORMAT_JSON") == "1":
661 cve_write_data_json(d, patched, unpatched, ignored, cve_data, status)
diff --git a/meta/classes/debian.bbclass b/meta/classes/debian.bbclass
deleted file mode 100644
index 6f8a599ccb..0000000000
--- a/meta/classes/debian.bbclass
+++ /dev/null
@@ -1,146 +0,0 @@
1# Debian package renaming only occurs when a package is built
2# We therefore have to make sure we build all runtime packages
3# before building the current package to make the packages runtime
4# depends are correct
5#
6# Custom library package names can be defined setting
7# DEBIANNAME_ + pkgname to the desired name.
8#
9# Better expressed as ensure all RDEPENDS package before we package
10# This means we can't have circular RDEPENDS/RRECOMMENDS
11
12AUTO_LIBNAME_PKGS = "${PACKAGES}"
13
14inherit package
15
16DEBIANRDEP = "do_packagedata"
17do_package_write_ipk[rdeptask] = "${DEBIANRDEP}"
18do_package_write_deb[rdeptask] = "${DEBIANRDEP}"
19do_package_write_tar[rdeptask] = "${DEBIANRDEP}"
20do_package_write_rpm[rdeptask] = "${DEBIANRDEP}"
21
22python () {
23 if not d.getVar("PACKAGES"):
24 d.setVar("DEBIANRDEP", "")
25}
26
27python debian_package_name_hook () {
28 import glob, copy, stat, errno, re, pathlib, subprocess
29
30 pkgdest = d.getVar("PKGDEST")
31 packages = d.getVar('PACKAGES')
32 so_re = re.compile(r"lib.*\.so")
33
34 def socrunch(s):
35 s = s.lower().replace('_', '-')
36 m = re.match(r"^(.*)(.)\.so\.(.*)$", s)
37 if m is None:
38 return None
39 if m.group(2) in '0123456789':
40 bin = '%s%s-%s' % (m.group(1), m.group(2), m.group(3))
41 else:
42 bin = m.group(1) + m.group(2) + m.group(3)
43 dev = m.group(1) + m.group(2)
44 return (bin, dev)
45
46 def isexec(path):
47 try:
48 s = os.stat(path)
49 except (os.error, AttributeError):
50 return 0
51 return (s[stat.ST_MODE] & stat.S_IEXEC)
52
53 def add_rprovides(pkg, d):
54 newpkg = d.getVar('PKG_' + pkg)
55 if newpkg and newpkg != pkg:
56 provs = (d.getVar('RPROVIDES_' + pkg) or "").split()
57 if pkg not in provs:
58 d.appendVar('RPROVIDES_' + pkg, " " + pkg + " (=" + d.getVar("PKGV") + ")")
59
60 def auto_libname(packages, orig_pkg):
61 p = lambda var: pathlib.PurePath(d.getVar(var))
62 libdirs = (p("base_libdir"), p("libdir"))
63 bindirs = (p("base_bindir"), p("base_sbindir"), p("bindir"), p("sbindir"))
64
65 sonames = []
66 has_bins = 0
67 has_libs = 0
68 for f in pkgfiles[orig_pkg]:
69 # This is .../packages-split/orig_pkg/
70 pkgpath = pathlib.PurePath(pkgdest, orig_pkg)
71 # Strip pkgpath off the full path to a file in the package, re-root
72 # so it is absolute, and then get the parent directory of the file.
73 path = pathlib.PurePath("/") / (pathlib.PurePath(f).relative_to(pkgpath).parent)
74 if path in bindirs:
75 has_bins = 1
76 if path in libdirs:
77 has_libs = 1
78 if so_re.match(os.path.basename(f)):
79 try:
80 cmd = [d.expand("${TARGET_PREFIX}objdump"), "-p", f]
81 output = subprocess.check_output(cmd).decode("utf-8")
82 for m in re.finditer(r"\s+SONAME\s+([^\s]+)", output):
83 if m.group(1) not in sonames:
84 sonames.append(m.group(1))
85 except subprocess.CalledProcessError:
86 pass
87 bb.debug(1, 'LIBNAMES: pkg %s libs %d bins %d sonames %s' % (orig_pkg, has_libs, has_bins, sonames))
88 soname = None
89 if len(sonames) == 1:
90 soname = sonames[0]
91 elif len(sonames) > 1:
92 lead = d.getVar('LEAD_SONAME')
93 if lead:
94 r = re.compile(lead)
95 filtered = []
96 for s in sonames:
97 if r.match(s):
98 filtered.append(s)
99 if len(filtered) == 1:
100 soname = filtered[0]
101 elif len(filtered) > 1:
102 bb.note("Multiple matches (%s) for LEAD_SONAME '%s'" % (", ".join(filtered), lead))
103 else:
104 bb.note("Multiple libraries (%s) found, but LEAD_SONAME '%s' doesn't match any of them" % (", ".join(sonames), lead))
105 else:
106 bb.note("Multiple libraries (%s) found and LEAD_SONAME not defined" % ", ".join(sonames))
107
108 if has_libs and not has_bins and soname:
109 soname_result = socrunch(soname)
110 if soname_result:
111 (pkgname, devname) = soname_result
112 for pkg in packages.split():
113 if (d.getVar('PKG_' + pkg, False) or d.getVar('DEBIAN_NOAUTONAME_' + pkg, False)):
114 add_rprovides(pkg, d)
115 continue
116 debian_pn = d.getVar('DEBIANNAME_' + pkg, False)
117 if debian_pn:
118 newpkg = debian_pn
119 elif pkg == orig_pkg:
120 newpkg = pkgname
121 else:
122 newpkg = pkg.replace(orig_pkg, devname, 1)
123 mlpre=d.getVar('MLPREFIX')
124 if mlpre:
125 if not newpkg.find(mlpre) == 0:
126 newpkg = mlpre + newpkg
127 if newpkg != pkg:
128 bb.note("debian: renaming %s to %s" % (pkg, newpkg))
129 d.setVar('PKG_' + pkg, newpkg)
130 add_rprovides(pkg, d)
131 else:
132 add_rprovides(orig_pkg, d)
133
134 # reversed sort is needed when some package is substring of another
135 # ie in ncurses we get without reverse sort:
136 # DEBUG: LIBNAMES: pkgname libtic5 devname libtic pkg ncurses-libtic orig_pkg ncurses-libtic debian_pn None newpkg libtic5
137 # and later
138 # DEBUG: LIBNAMES: pkgname libtic5 devname libtic pkg ncurses-libticw orig_pkg ncurses-libtic debian_pn None newpkg libticw
139 # so we need to handle ncurses-libticw->libticw5 before ncurses-libtic->libtic5
140 for pkg in sorted((d.getVar('AUTO_LIBNAME_PKGS') or "").split(), reverse=True):
141 auto_libname(packages, pkg)
142}
143
144EXPORT_FUNCTIONS package_name_hook
145
146DEBIAN_NAMES = "1"
diff --git a/meta/classes/deploy.bbclass b/meta/classes/deploy.bbclass
deleted file mode 100644
index 737c26122b..0000000000
--- a/meta/classes/deploy.bbclass
+++ /dev/null
@@ -1,12 +0,0 @@
1DEPLOYDIR = "${WORKDIR}/deploy-${PN}"
2SSTATETASKS += "do_deploy"
3do_deploy[sstate-inputdirs] = "${DEPLOYDIR}"
4do_deploy[sstate-outputdirs] = "${DEPLOY_DIR_IMAGE}"
5
6python do_deploy_setscene () {
7 sstate_setscene(d)
8}
9addtask do_deploy_setscene
10do_deploy[dirs] = "${DEPLOYDIR} ${B}"
11do_deploy[cleandirs] = "${DEPLOYDIR}"
12do_deploy[stamp-extra-info] = "${MACHINE_ARCH}"
diff --git a/meta/classes/devicetree.bbclass b/meta/classes/devicetree.bbclass
deleted file mode 100644
index ece883accf..0000000000
--- a/meta/classes/devicetree.bbclass
+++ /dev/null
@@ -1,148 +0,0 @@
1# This bbclass implements device tree compliation for user provided device tree
2# sources. The compilation of the device tree sources is the same as the kernel
3# device tree compilation process, this includes being able to include sources
4# from the kernel such as soc dtsi files or header files such as gpio.h. In
5# addition to device trees this bbclass also handles compilation of device tree
6# overlays.
7#
8# The output of this class behaves similar to how kernel-devicetree.bbclass
9# operates in that the output files are installed into /boot/devicetree.
10# However this class on purpose separates the deployed device trees into the
11# 'devicetree' subdirectory. This prevents clashes with the kernel-devicetree
12# output. Additionally the device trees are populated into the sysroot for
13# access via the sysroot from within other recipes.
14
15SECTION ?= "bsp"
16
17# The default inclusion of kernel device tree includes and headers means that
18# device trees built with them are at least GPLv2 (and in some cases dual
19# licensed). Default to GPLv2 if the recipe does not specify a license.
20LICENSE ?= "GPLv2"
21LIC_FILES_CHKSUM ?= "file://${COMMON_LICENSE_DIR}/GPL-2.0-only;md5=801f80980d171dd6425610833a22dbe6"
22
23INHIBIT_DEFAULT_DEPS = "1"
24DEPENDS += "dtc-native"
25
26inherit deploy kernel-arch
27
28COMPATIBLE_MACHINE ?= "^$"
29
30PROVIDES = "virtual/dtb"
31
32PACKAGE_ARCH = "${MACHINE_ARCH}"
33
34SYSROOT_DIRS += "/boot/devicetree"
35FILES_${PN} = "/boot/devicetree/*.dtb /boot/devicetree/*.dtbo"
36
37S = "${WORKDIR}"
38B = "${WORKDIR}/build"
39
40# Default kernel includes, these represent what are normally used for in-kernel
41# sources.
42KERNEL_INCLUDE ??= " \
43 ${STAGING_KERNEL_DIR}/arch/${ARCH}/boot/dts \
44 ${STAGING_KERNEL_DIR}/arch/${ARCH}/boot/dts/* \
45 ${STAGING_KERNEL_DIR}/scripts/dtc/include-prefixes \
46 "
47
48DT_INCLUDE[doc] = "Search paths to be made available to both the device tree compiler and preprocessor for inclusion."
49DT_INCLUDE ?= "${DT_FILES_PATH} ${KERNEL_INCLUDE}"
50DT_FILES_PATH[doc] = "Defaults to source directory, can be used to select dts files that are not in source (e.g. generated)."
51DT_FILES_PATH ?= "${S}"
52
53DT_PADDING_SIZE[doc] = "Size of padding on the device tree blob, used as extra space typically for additional properties during boot."
54DT_PADDING_SIZE ??= "0x3000"
55DT_RESERVED_MAP[doc] = "Number of reserved map entires."
56DT_RESERVED_MAP ??= "8"
57DT_BOOT_CPU[doc] = "The boot cpu, defaults to 0"
58DT_BOOT_CPU ??= "0"
59
60DTC_FLAGS ?= "-R ${DT_RESERVED_MAP} -b ${DT_BOOT_CPU}"
61DTC_PPFLAGS ?= "-nostdinc -undef -D__DTS__ -x assembler-with-cpp"
62DTC_BFLAGS ?= "-p ${DT_PADDING_SIZE} -@"
63DTC_OFLAGS ?= "-p 0 -@ -H epapr"
64
65python () {
66 if d.getVar("KERNEL_INCLUDE"):
67 # auto add dependency on kernel tree, but only if kernel include paths
68 # are specified.
69 d.appendVarFlag("do_compile", "depends", " virtual/kernel:do_configure")
70}
71
72def expand_includes(varname, d):
73 import glob
74 includes = set()
75 # expand all includes with glob
76 for i in (d.getVar(varname) or "").split():
77 for g in glob.glob(i):
78 if os.path.isdir(g): # only add directories to include path
79 includes.add(g)
80 return includes
81
82def devicetree_source_is_overlay(path):
83 # determine if a dts file is an overlay by checking if it uses "/plugin/;"
84 with open(path, "r") as f:
85 for i in f:
86 if i.startswith("/plugin/;"):
87 return True
88 return False
89
90def devicetree_compile(dtspath, includes, d):
91 import subprocess
92 dts = os.path.basename(dtspath)
93 dtname = os.path.splitext(dts)[0]
94 bb.note("Processing {0} [{1}]".format(dtname, dts))
95
96 # preprocess
97 ppargs = d.getVar("BUILD_CPP").split()
98 ppargs += (d.getVar("DTC_PPFLAGS") or "").split()
99 for i in includes:
100 ppargs.append("-I{0}".format(i))
101 ppargs += ["-o", "{0}.pp".format(dts), dtspath]
102 bb.note("Running {0}".format(" ".join(ppargs)))
103 subprocess.run(ppargs, check = True)
104
105 # determine if the file is an overlay or not (using the preprocessed file)
106 isoverlay = devicetree_source_is_overlay("{0}.pp".format(dts))
107
108 # compile
109 dtcargs = ["dtc"] + (d.getVar("DTC_FLAGS") or "").split()
110 if isoverlay:
111 dtcargs += (d.getVar("DTC_OFLAGS") or "").split()
112 else:
113 dtcargs += (d.getVar("DTC_BFLAGS") or "").split()
114 for i in includes:
115 dtcargs += ["-i", i]
116 dtcargs += ["-o", "{0}.{1}".format(dtname, "dtbo" if isoverlay else "dtb")]
117 dtcargs += ["-I", "dts", "-O", "dtb", "{0}.pp".format(dts)]
118 bb.note("Running {0}".format(" ".join(dtcargs)))
119 subprocess.run(dtcargs, check = True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
120
121python devicetree_do_compile() {
122 includes = expand_includes("DT_INCLUDE", d)
123 listpath = d.getVar("DT_FILES_PATH")
124 for dts in os.listdir(listpath):
125 dtspath = os.path.join(listpath, dts)
126 try:
127 if not(os.path.isfile(dtspath)) or not(dts.endswith(".dts") or devicetree_source_is_overlay(dtspath)):
128 continue # skip non-.dts files and non-overlay files
129 except:
130 continue # skip if can't determine if overlay
131 devicetree_compile(dtspath, includes, d)
132}
133
134devicetree_do_install() {
135 for DTB_FILE in `ls *.dtb *.dtbo`; do
136 install -Dm 0644 ${B}/${DTB_FILE} ${D}/boot/devicetree/${DTB_FILE}
137 done
138}
139
140devicetree_do_deploy() {
141 for DTB_FILE in `ls *.dtb *.dtbo`; do
142 install -Dm 0644 ${B}/${DTB_FILE} ${DEPLOYDIR}/devicetree/${DTB_FILE}
143 done
144}
145addtask deploy before do_build after do_install
146
147EXPORT_FUNCTIONS do_compile do_install do_deploy
148
diff --git a/meta/classes/devshell.bbclass b/meta/classes/devshell.bbclass
deleted file mode 100644
index fdf7dc100f..0000000000
--- a/meta/classes/devshell.bbclass
+++ /dev/null
@@ -1,155 +0,0 @@
1inherit terminal
2
3DEVSHELL = "${SHELL}"
4
5python do_devshell () {
6 if d.getVarFlag("do_devshell", "manualfakeroot"):
7 d.prependVar("DEVSHELL", "pseudo ")
8 fakeenv = d.getVar("FAKEROOTENV").split()
9 for f in fakeenv:
10 k = f.split("=")
11 d.setVar(k[0], k[1])
12 d.appendVar("OE_TERMINAL_EXPORTS", " " + k[0])
13 d.delVarFlag("do_devshell", "fakeroot")
14
15 oe_terminal(d.getVar('DEVSHELL'), 'OpenEmbedded Developer Shell', d)
16}
17
18addtask devshell after do_patch do_prepare_recipe_sysroot
19
20# The directory that the terminal starts in
21DEVSHELL_STARTDIR ?= "${S}"
22do_devshell[dirs] = "${DEVSHELL_STARTDIR}"
23do_devshell[nostamp] = "1"
24
25# devshell and fakeroot/pseudo need careful handling since only the final
26# command should run under fakeroot emulation, any X connection should
27# be done as the normal user. We therfore carefully construct the envionment
28# manually
29python () {
30 if d.getVarFlag("do_devshell", "fakeroot"):
31 # We need to signal our code that we want fakeroot however we
32 # can't manipulate the environment and variables here yet (see YOCTO #4795)
33 d.setVarFlag("do_devshell", "manualfakeroot", "1")
34 d.delVarFlag("do_devshell", "fakeroot")
35}
36
37def devpyshell(d):
38
39 import code
40 import select
41 import signal
42 import termios
43
44 m, s = os.openpty()
45 sname = os.ttyname(s)
46
47 def noechoicanon(fd):
48 old = termios.tcgetattr(fd)
49 old[3] = old[3] &~ termios.ECHO &~ termios.ICANON
50 # &~ termios.ISIG
51 termios.tcsetattr(fd, termios.TCSADRAIN, old)
52
53 # No echo or buffering over the pty
54 noechoicanon(s)
55
56 pid = os.fork()
57 if pid:
58 os.close(m)
59 oe_terminal("oepydevshell-internal.py %s %d" % (sname, pid), 'OpenEmbedded Developer PyShell', d)
60 os._exit(0)
61 else:
62 os.close(s)
63
64 os.dup2(m, sys.stdin.fileno())
65 os.dup2(m, sys.stdout.fileno())
66 os.dup2(m, sys.stderr.fileno())
67
68 bb.utils.nonblockingfd(sys.stdout)
69 bb.utils.nonblockingfd(sys.stderr)
70 bb.utils.nonblockingfd(sys.stdin)
71
72 _context = {
73 "os": os,
74 "bb": bb,
75 "time": time,
76 "d": d,
77 }
78
79 ps1 = "pydevshell> "
80 ps2 = "... "
81 buf = []
82 more = False
83
84 i = code.InteractiveInterpreter(locals=_context)
85 print("OE PyShell (PN = %s)\n" % d.getVar("PN"))
86
87 def prompt(more):
88 if more:
89 prompt = ps2
90 else:
91 prompt = ps1
92 sys.stdout.write(prompt)
93 sys.stdout.flush()
94
95 # Restore Ctrl+C since bitbake masks this
96 def signal_handler(signal, frame):
97 raise KeyboardInterrupt
98 signal.signal(signal.SIGINT, signal_handler)
99
100 child = None
101
102 prompt(more)
103 while True:
104 try:
105 try:
106 (r, _, _) = select.select([sys.stdin], [], [], 1)
107 if not r:
108 continue
109 line = sys.stdin.readline().strip()
110 if not line:
111 prompt(more)
112 continue
113 except EOFError as e:
114 sys.stdout.write("\n")
115 sys.stdout.flush()
116 except (OSError, IOError) as e:
117 if e.errno == 11:
118 continue
119 if e.errno == 5:
120 return
121 raise
122 else:
123 if not child:
124 child = int(line)
125 continue
126 buf.append(line)
127 source = "\n".join(buf)
128 more = i.runsource(source, "<pyshell>")
129 if not more:
130 buf = []
131 prompt(more)
132 except KeyboardInterrupt:
133 i.write("\nKeyboardInterrupt\n")
134 buf = []
135 more = False
136 prompt(more)
137 except SystemExit:
138 # Easiest way to ensure everything exits
139 os.kill(child, signal.SIGTERM)
140 break
141
142python do_devpyshell() {
143 import signal
144
145 try:
146 devpyshell(d)
147 except SystemExit:
148 # Stop the SIGTERM above causing an error exit code
149 return
150 finally:
151 return
152}
153addtask devpyshell after do_patch
154
155do_devpyshell[nostamp] = "1"
diff --git a/meta/classes/devtool-source.bbclass b/meta/classes/devtool-source.bbclass
index 41900e651f..4158c20c7e 100644
--- a/meta/classes/devtool-source.bbclass
+++ b/meta/classes/devtool-source.bbclass
@@ -1,3 +1,9 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
1# Development tool - source extraction helper class 7# Development tool - source extraction helper class
2# 8#
3# NOTE: this class is intended for use by devtool and should not be 9# NOTE: this class is intended for use by devtool and should not be
@@ -226,6 +232,9 @@ python devtool_post_patch() {
226 bb.process.run('git rebase devtool-no-overrides', cwd=srcsubdir) 232 bb.process.run('git rebase devtool-no-overrides', cwd=srcsubdir)
227 bb.process.run('git checkout %s' % devbranch, cwd=srcsubdir) 233 bb.process.run('git checkout %s' % devbranch, cwd=srcsubdir)
228 bb.process.run('git tag -f devtool-patched', cwd=srcsubdir) 234 bb.process.run('git tag -f devtool-patched', cwd=srcsubdir)
235 if os.path.exists(os.path.join(srcsubdir, '.gitmodules')):
236 bb.process.run('git submodule foreach --recursive "git tag -f devtool-patched"', cwd=srcsubdir)
237
229} 238}
230 239
231python devtool_post_configure() { 240python devtool_post_configure() {
diff --git a/meta/classes/devupstream.bbclass b/meta/classes/devupstream.bbclass
deleted file mode 100644
index 7780c5482c..0000000000
--- a/meta/classes/devupstream.bbclass
+++ /dev/null
@@ -1,48 +0,0 @@
1# Class for use in BBCLASSEXTEND to make it easier to have a single recipe that
2# can build both stable tarballs and snapshots from upstream source
3# repositories.
4#
5# Usage:
6# BBCLASSEXTEND = "devupstream:target"
7# SRC_URI_class-devupstream = "git://git.example.com/example"
8# SRCREV_class-devupstream = "abcdef"
9#
10# If the first entry in SRC_URI is a git: URL then S is rewritten to
11# WORKDIR/git.
12#
13# There are a few caveats that remain to be solved:
14# - You can't build native or nativesdk recipes using for example
15# devupstream:native, you can only build target recipes.
16# - If the fetcher requires native tools (such as subversion-native) then
17# bitbake won't be able to add them automatically.
18
19CLASSOVERRIDE .= ":class-devupstream"
20
21python devupstream_virtclass_handler () {
22 # Do nothing if this is inherited, as it's for BBCLASSEXTEND
23 if "devupstream" not in (d.getVar('BBCLASSEXTEND') or ""):
24 bb.error("Don't inherit devupstream, use BBCLASSEXTEND")
25 return
26
27 variant = d.getVar("BBEXTENDVARIANT")
28 if variant not in ("target"):
29 bb.error("Pass the variant when using devupstream, for example devupstream:target")
30 return
31
32 # Develpment releases are never preferred by default
33 d.setVar("DEFAULT_PREFERENCE", "-1")
34
35 uri = bb.fetch2.URI(d.getVar("SRC_URI").split()[0])
36
37 if uri.scheme == "git":
38 d.setVar("S", "${WORKDIR}/git")
39
40 # Modify the PV if the recipe hasn't already overridden it
41 pv = d.getVar("PV")
42 proto_marker = "+" + uri.scheme
43 if proto_marker not in pv:
44 d.setVar("PV", pv + proto_marker + "${SRCPV}")
45}
46
47addhandler devupstream_virtclass_handler
48devupstream_virtclass_handler[eventmask] = "bb.event.RecipePreFinalise"
diff --git a/meta/classes/distro_features_check.bbclass b/meta/classes/distro_features_check.bbclass
deleted file mode 100644
index 8124a8ca27..0000000000
--- a/meta/classes/distro_features_check.bbclass
+++ /dev/null
@@ -1,7 +0,0 @@
1# Temporarily provide fallback to the old name of the class
2
3python __anonymous() {
4 bb.warn("distro_features_check.bbclass is deprecated, please use features_check.bbclass instead")
5}
6
7inherit features_check
diff --git a/meta/classes/distrooverrides.bbclass b/meta/classes/distrooverrides.bbclass
index 9f4db0d771..8d9d7cda7d 100644
--- a/meta/classes/distrooverrides.bbclass
+++ b/meta/classes/distrooverrides.bbclass
@@ -1,3 +1,9 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
1# Turns certain DISTRO_FEATURES into overrides with the same 7# Turns certain DISTRO_FEATURES into overrides with the same
2# name plus a df- prefix. Ensures that these special 8# name plus a df- prefix. Ensures that these special
3# distro features remain set also for native and nativesdk 9# distro features remain set also for native and nativesdk
@@ -6,7 +12,7 @@
6# This makes it simpler to write .bbappends that only change the 12# This makes it simpler to write .bbappends that only change the
7# task signatures of the recipe if the change is really enabled, 13# task signatures of the recipe if the change is really enabled,
8# for example with: 14# for example with:
9# do_install_append_df-my-feature () { ... } 15# do_install:append:df-my-feature () { ... }
10# where "my-feature" is a DISTRO_FEATURE. 16# where "my-feature" is a DISTRO_FEATURE.
11# 17#
12# The class is meant to be used in a layer.conf or distro 18# The class is meant to be used in a layer.conf or distro
@@ -22,8 +28,8 @@ DISTRO_FEATURES_OVERRIDES ?= ""
22DISTRO_FEATURES_OVERRIDES[doc] = "A space-separated list of <feature> entries. \ 28DISTRO_FEATURES_OVERRIDES[doc] = "A space-separated list of <feature> entries. \
23Each entry is added to OVERRIDES as df-<feature> if <feature> is in DISTRO_FEATURES." 29Each entry is added to OVERRIDES as df-<feature> if <feature> is in DISTRO_FEATURES."
24 30
25DISTRO_FEATURES_FILTER_NATIVE_append = " ${DISTRO_FEATURES_OVERRIDES}" 31DISTRO_FEATURES_FILTER_NATIVE:append = " ${DISTRO_FEATURES_OVERRIDES}"
26DISTRO_FEATURES_FILTER_NATIVESDK_append = " ${DISTRO_FEATURES_OVERRIDES}" 32DISTRO_FEATURES_FILTER_NATIVESDK:append = " ${DISTRO_FEATURES_OVERRIDES}"
27 33
28# If DISTRO_FEATURES_OVERRIDES or DISTRO_FEATURES show up in a task 34# If DISTRO_FEATURES_OVERRIDES or DISTRO_FEATURES show up in a task
29# signature because of this line, then the task dependency on 35# signature because of this line, then the task dependency on
diff --git a/meta/classes/distutils-common-base.bbclass b/meta/classes/distutils-common-base.bbclass
deleted file mode 100644
index 43a38e5a3a..0000000000
--- a/meta/classes/distutils-common-base.bbclass
+++ /dev/null
@@ -1,25 +0,0 @@
1export STAGING_INCDIR
2export STAGING_LIBDIR
3
4# LDSHARED is the ld *command* used to create shared library
5export LDSHARED = "${CCLD} -shared"
6# LDXXSHARED is the ld *command* used to create shared library of C++
7# objects
8export LDCXXSHARED = "${CXX} -shared"
9# CCSHARED are the C *flags* used to create objects to go into a shared
10# library (module)
11export CCSHARED = "-fPIC -DPIC"
12# LINKFORSHARED are the flags passed to the $(CC) command that links
13# the python executable
14export LINKFORSHARED = "${SECURITY_CFLAGS} -Xlinker -export-dynamic"
15
16FILES_${PN} += "${libdir}/* ${libdir}/${PYTHON_DIR}/*"
17
18FILES_${PN}-staticdev += "\
19 ${PYTHON_SITEPACKAGES_DIR}/*.a \
20"
21FILES_${PN}-dev += "\
22 ${datadir}/pkgconfig \
23 ${libdir}/pkgconfig \
24 ${PYTHON_SITEPACKAGES_DIR}/*.la \
25"
diff --git a/meta/classes/distutils3-base.bbclass b/meta/classes/distutils3-base.bbclass
deleted file mode 100644
index 302ee8c82c..0000000000
--- a/meta/classes/distutils3-base.bbclass
+++ /dev/null
@@ -1,6 +0,0 @@
1DEPENDS_append_class-target = " ${PYTHON_PN}-native ${PYTHON_PN}"
2DEPENDS_append_class-nativesdk = " ${PYTHON_PN}-native ${PYTHON_PN}"
3RDEPENDS_${PN} += "${@['', '${PYTHON_PN}-core']['${CLASSOVERRIDE}' == 'class-target']}"
4
5inherit distutils-common-base python3native python3targetconfig
6
diff --git a/meta/classes/distutils3.bbclass b/meta/classes/distutils3.bbclass
deleted file mode 100644
index a916a8000c..0000000000
--- a/meta/classes/distutils3.bbclass
+++ /dev/null
@@ -1,67 +0,0 @@
1inherit distutils3-base
2
3B = "${WORKDIR}/build"
4distutils_do_configure[cleandirs] = "${B}"
5
6DISTUTILS_BUILD_ARGS ?= ""
7DISTUTILS_INSTALL_ARGS ?= "--root=${D} \
8 --prefix=${prefix} \
9 --install-lib=${PYTHON_SITEPACKAGES_DIR} \
10 --install-data=${datadir}"
11
12DISTUTILS_PYTHON = "python3"
13DISTUTILS_PYTHON_class-native = "nativepython3"
14
15DISTUTILS_SETUP_PATH ?= "${S}"
16
17distutils3_do_configure() {
18 :
19}
20
21distutils3_do_compile() {
22 cd ${DISTUTILS_SETUP_PATH}
23 NO_FETCH_BUILD=1 \
24 STAGING_INCDIR=${STAGING_INCDIR} \
25 STAGING_LIBDIR=${STAGING_LIBDIR} \
26 ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py \
27 build --build-base=${B} ${DISTUTILS_BUILD_ARGS} || \
28 bbfatal_log "'${PYTHON_PN} setup.py build ${DISTUTILS_BUILD_ARGS}' execution failed."
29}
30distutils3_do_compile[vardepsexclude] = "MACHINE"
31
32distutils3_do_install() {
33 cd ${DISTUTILS_SETUP_PATH}
34 install -d ${D}${PYTHON_SITEPACKAGES_DIR}
35 STAGING_INCDIR=${STAGING_INCDIR} \
36 STAGING_LIBDIR=${STAGING_LIBDIR} \
37 PYTHONPATH=${D}${PYTHON_SITEPACKAGES_DIR} \
38 ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py \
39 build --build-base=${B} install --skip-build ${DISTUTILS_INSTALL_ARGS} || \
40 bbfatal_log "'${PYTHON_PN} setup.py install ${DISTUTILS_INSTALL_ARGS}' execution failed."
41
42 # support filenames with *spaces*
43 find ${D} -name "*.py" -exec grep -q ${D} {} \; \
44 -exec sed -i -e s:${D}::g {} \;
45
46 for i in ${D}${bindir}/* ${D}${sbindir}/*; do
47 if [ -f "$i" ]; then
48 sed -i -e s:${PYTHON}:${USRBINPATH}/env\ ${DISTUTILS_PYTHON}:g $i
49 sed -i -e s:${STAGING_BINDIR_NATIVE}:${bindir}:g $i
50 fi
51 done
52
53 rm -f ${D}${PYTHON_SITEPACKAGES_DIR}/easy-install.pth
54
55 #
56 # FIXME: Bandaid against wrong datadir computation
57 #
58 if [ -e ${D}${datadir}/share ]; then
59 mv -f ${D}${datadir}/share/* ${D}${datadir}/
60 rmdir ${D}${datadir}/share
61 fi
62}
63distutils3_do_install[vardepsexclude] = "MACHINE"
64
65EXPORT_FUNCTIONS do_configure do_compile do_install
66
67export LDSHARED="${CCLD} -shared"
diff --git a/meta/classes/dos2unix.bbclass b/meta/classes/dos2unix.bbclass
deleted file mode 100644
index 3fc17e2196..0000000000
--- a/meta/classes/dos2unix.bbclass
+++ /dev/null
@@ -1,14 +0,0 @@
1# Class for use to convert all CRLF line terminators to LF
2# provided that some projects are being developed/maintained
3# on Windows so they have different line terminators(CRLF) vs
4# on Linux(LF), which can cause annoying patching errors during
5# git push/checkout processes.
6
7do_convert_crlf_to_lf[depends] += "dos2unix-native:do_populate_sysroot"
8
9# Convert CRLF line terminators to LF
10do_convert_crlf_to_lf () {
11 find ${S} -type f -exec dos2unix {} \;
12}
13
14addtask convert_crlf_to_lf after do_unpack before do_patch
diff --git a/meta/classes/externalsrc.bbclass b/meta/classes/externalsrc.bbclass
index c7b2bf2f49..70e27a8d35 100644
--- a/meta/classes/externalsrc.bbclass
+++ b/meta/classes/externalsrc.bbclass
@@ -2,7 +2,8 @@
2# Author: Richard Purdie 2# Author: Richard Purdie
3# Some code and influence taken from srctree.bbclass: 3# Some code and influence taken from srctree.bbclass:
4# Copyright (C) 2009 Chris Larson <clarson@kergoth.com> 4# Copyright (C) 2009 Chris Larson <clarson@kergoth.com>
5# Released under the MIT license (see COPYING.MIT for the terms) 5#
6# SPDX-License-Identifier: MIT
6# 7#
7# externalsrc.bbclass enables use of an existing source tree, usually external to 8# externalsrc.bbclass enables use of an existing source tree, usually external to
8# the build system to build a piece of software rather than the usual fetch/unpack/patch 9# the build system to build a piece of software rather than the usual fetch/unpack/patch
@@ -13,7 +14,7 @@
13# called "myrecipe" you would do: 14# called "myrecipe" you would do:
14# 15#
15# INHERIT += "externalsrc" 16# INHERIT += "externalsrc"
16# EXTERNALSRC_pn-myrecipe = "/path/to/my/source/tree" 17# EXTERNALSRC:pn-myrecipe = "/path/to/my/source/tree"
17# 18#
18# In order to make this class work for both target and native versions (or with 19# In order to make this class work for both target and native versions (or with
19# multilibs/cross or other BBCLASSEXTEND variants), B is set to point to a separate 20# multilibs/cross or other BBCLASSEXTEND variants), B is set to point to a separate
@@ -21,7 +22,7 @@
21# the default, but the build directory can be set to the source directory if 22# the default, but the build directory can be set to the source directory if
22# circumstances dictate by setting EXTERNALSRC_BUILD to the same value, e.g.: 23# circumstances dictate by setting EXTERNALSRC_BUILD to the same value, e.g.:
23# 24#
24# EXTERNALSRC_BUILD_pn-myrecipe = "/path/to/my/source/tree" 25# EXTERNALSRC_BUILD:pn-myrecipe = "/path/to/my/source/tree"
25# 26#
26 27
27SRCTREECOVEREDTASKS ?= "do_patch do_unpack do_fetch" 28SRCTREECOVEREDTASKS ?= "do_patch do_unpack do_fetch"
@@ -45,11 +46,11 @@ python () {
45 if bpn == d.getVar('PN') or not classextend: 46 if bpn == d.getVar('PN') or not classextend:
46 if (externalsrc or 47 if (externalsrc or
47 ('native' in classextend and 48 ('native' in classextend and
48 d.getVar('EXTERNALSRC_pn-%s-native' % bpn)) or 49 d.getVar('EXTERNALSRC:pn-%s-native' % bpn)) or
49 ('nativesdk' in classextend and 50 ('nativesdk' in classextend and
50 d.getVar('EXTERNALSRC_pn-nativesdk-%s' % bpn)) or 51 d.getVar('EXTERNALSRC:pn-nativesdk-%s' % bpn)) or
51 ('cross' in classextend and 52 ('cross' in classextend and
52 d.getVar('EXTERNALSRC_pn-%s-cross' % bpn))): 53 d.getVar('EXTERNALSRC:pn-%s-cross' % bpn))):
53 d.setVar('BB_DONT_CACHE', '1') 54 d.setVar('BB_DONT_CACHE', '1')
54 55
55 if externalsrc: 56 if externalsrc:
@@ -60,22 +61,21 @@ python () {
60 if externalsrcbuild: 61 if externalsrcbuild:
61 d.setVar('B', externalsrcbuild) 62 d.setVar('B', externalsrcbuild)
62 else: 63 else:
63 d.setVar('B', '${WORKDIR}/${BPN}-${PV}/') 64 d.setVar('B', '${WORKDIR}/${BPN}-${PV}')
64 65
66 bb.fetch.get_hashvalue(d)
65 local_srcuri = [] 67 local_srcuri = []
66 fetch = bb.fetch2.Fetch((d.getVar('SRC_URI') or '').split(), d) 68 fetch = bb.fetch2.Fetch((d.getVar('SRC_URI') or '').split(), d)
67 for url in fetch.urls: 69 for url in fetch.urls:
68 url_data = fetch.ud[url] 70 url_data = fetch.ud[url]
69 parm = url_data.parm 71 parm = url_data.parm
70 if (url_data.type == 'file' or 72 if url_data.type in ['file', 'npmsw', 'crate'] or parm.get('type') in ['kmeta', 'git-dependency']:
71 url_data.type == 'npmsw' or
72 'type' in parm and parm['type'] == 'kmeta'):
73 local_srcuri.append(url) 73 local_srcuri.append(url)
74 74
75 d.setVar('SRC_URI', ' '.join(local_srcuri)) 75 d.setVar('SRC_URI', ' '.join(local_srcuri))
76 76
77 # Dummy value because the default function can't be called with blank SRC_URI 77 # sstate is never going to work for external source trees, disable it
78 d.setVar('SRCPV', '999') 78 d.setVar('SSTATE_SKIP_CREATION', '1')
79 79
80 if d.getVar('CONFIGUREOPT_DEPTRACK') == '--disable-dependency-tracking': 80 if d.getVar('CONFIGUREOPT_DEPTRACK') == '--disable-dependency-tracking':
81 d.setVar('CONFIGUREOPT_DEPTRACK', '') 81 d.setVar('CONFIGUREOPT_DEPTRACK', '')
@@ -83,32 +83,42 @@ python () {
83 tasks = filter(lambda k: d.getVarFlag(k, "task"), d.keys()) 83 tasks = filter(lambda k: d.getVarFlag(k, "task"), d.keys())
84 84
85 for task in tasks: 85 for task in tasks:
86 if task.endswith("_setscene"): 86 if os.path.realpath(d.getVar('S')) == os.path.realpath(d.getVar('B')):
87 # sstate is never going to work for external source trees, disable it
88 bb.build.deltask(task, d)
89 elif os.path.realpath(d.getVar('S')) == os.path.realpath(d.getVar('B')):
90 # Since configure will likely touch ${S}, ensure only we lock so one task has access at a time 87 # Since configure will likely touch ${S}, ensure only we lock so one task has access at a time
91 d.appendVarFlag(task, "lockfiles", " ${S}/singletask.lock") 88 d.appendVarFlag(task, "lockfiles", " ${S}/singletask.lock")
92 89
93 # We do not want our source to be wiped out, ever (kernel.bbclass does this for do_clean) 90 for v in d.keys():
94 cleandirs = oe.recipeutils.split_var_value(d.getVarFlag(task, 'cleandirs', False) or '') 91 cleandirs = d.getVarFlag(v, "cleandirs", False)
95 setvalue = False 92 if cleandirs:
96 for cleandir in cleandirs[:]: 93 # We do not want our source to be wiped out, ever (kernel.bbclass does this for do_clean)
97 if oe.path.is_path_parent(externalsrc, d.expand(cleandir)): 94 cleandirs = oe.recipeutils.split_var_value(cleandirs)
98 cleandirs.remove(cleandir) 95 setvalue = False
99 setvalue = True 96 for cleandir in cleandirs[:]:
100 if setvalue: 97 if oe.path.is_path_parent(externalsrc, d.expand(cleandir)):
101 d.setVarFlag(task, 'cleandirs', ' '.join(cleandirs)) 98 cleandirs.remove(cleandir)
99 setvalue = True
100 if setvalue:
101 d.setVarFlag(v, 'cleandirs', ' '.join(cleandirs))
102 102
103 fetch_tasks = ['do_fetch', 'do_unpack'] 103 fetch_tasks = ['do_fetch', 'do_unpack']
104 # If we deltask do_patch, there's no dependency to ensure do_unpack gets run, so add one 104 # If we deltask do_patch, there's no dependency to ensure do_unpack gets run, so add one
105 # Note that we cannot use d.appendVarFlag() here because deps is expected to be a list object, not a string 105 # Note that we cannot use d.appendVarFlag() here because deps is expected to be a list object, not a string
106 d.setVarFlag('do_configure', 'deps', (d.getVarFlag('do_configure', 'deps', False) or []) + ['do_unpack']) 106 d.setVarFlag('do_configure', 'deps', (d.getVarFlag('do_configure', 'deps', False) or []) + ['do_unpack'])
107 d.setVarFlag('do_populate_lic', 'deps', (d.getVarFlag('do_populate_lic', 'deps', False) or []) + ['do_unpack'])
107 108
108 for task in d.getVar("SRCTREECOVEREDTASKS").split(): 109 for task in d.getVar("SRCTREECOVEREDTASKS").split():
109 if local_srcuri and task in fetch_tasks: 110 if local_srcuri and task in fetch_tasks:
110 continue 111 continue
111 bb.build.deltask(task, d) 112 bb.build.deltask(task, d)
113 if task == 'do_unpack':
114 # The reproducible build create_source_date_epoch_stamp function must
115 # be run after the source is available and before the
116 # do_deploy_source_date_epoch task. In the normal case, it's attached
117 # to do_unpack as a postfuncs, but since we removed do_unpack (above)
118 # we need to move the function elsewhere. The easiest thing to do is
119 # move it into the prefuncs of the do_deploy_source_date_epoch task.
120 # This is safe, as externalsrc runs with the source already unpacked.
121 d.prependVarFlag('do_deploy_source_date_epoch', 'prefuncs', 'create_source_date_epoch_stamp ')
112 122
113 d.prependVarFlag('do_compile', 'prefuncs', "externalsrc_compile_prefunc ") 123 d.prependVarFlag('do_compile', 'prefuncs', "externalsrc_compile_prefunc ")
114 d.prependVarFlag('do_configure', 'prefuncs', "externalsrc_configure_prefunc ") 124 d.prependVarFlag('do_configure', 'prefuncs', "externalsrc_configure_prefunc ")
@@ -116,6 +126,9 @@ python () {
116 d.setVarFlag('do_compile', 'file-checksums', '${@srctree_hash_files(d)}') 126 d.setVarFlag('do_compile', 'file-checksums', '${@srctree_hash_files(d)}')
117 d.setVarFlag('do_configure', 'file-checksums', '${@srctree_configure_hash_files(d)}') 127 d.setVarFlag('do_configure', 'file-checksums', '${@srctree_configure_hash_files(d)}')
118 128
129 d.appendVarFlag('do_compile', 'prefuncs', ' fetcher_hashes_dummyfunc')
130 d.appendVarFlag('do_configure', 'prefuncs', ' fetcher_hashes_dummyfunc')
131
119 # We don't want the workdir to go away 132 # We don't want the workdir to go away
120 d.appendVar('RM_WORK_EXCLUDE', ' ' + d.getVar('PN')) 133 d.appendVar('RM_WORK_EXCLUDE', ' ' + d.getVar('PN'))
121 134
@@ -199,8 +212,8 @@ def srctree_hash_files(d, srcdir=None):
199 try: 212 try:
200 git_dir = os.path.join(s_dir, 213 git_dir = os.path.join(s_dir,
201 subprocess.check_output(['git', '-C', s_dir, 'rev-parse', '--git-dir'], stderr=subprocess.DEVNULL).decode("utf-8").rstrip()) 214 subprocess.check_output(['git', '-C', s_dir, 'rev-parse', '--git-dir'], stderr=subprocess.DEVNULL).decode("utf-8").rstrip())
202 top_git_dir = os.path.join(s_dir, subprocess.check_output(['git', '-C', d.getVar("TOPDIR"), 'rev-parse', '--git-dir'], 215 top_git_dir = os.path.join(d.getVar("TOPDIR"),
203 stderr=subprocess.DEVNULL).decode("utf-8").rstrip()) 216 subprocess.check_output(['git', '-C', d.getVar("TOPDIR"), 'rev-parse', '--git-dir'], stderr=subprocess.DEVNULL).decode("utf-8").rstrip())
204 if git_dir == top_git_dir: 217 if git_dir == top_git_dir:
205 git_dir = None 218 git_dir = None
206 except subprocess.CalledProcessError: 219 except subprocess.CalledProcessError:
@@ -217,14 +230,16 @@ def srctree_hash_files(d, srcdir=None):
217 env['GIT_INDEX_FILE'] = tmp_index.name 230 env['GIT_INDEX_FILE'] = tmp_index.name
218 subprocess.check_output(['git', 'add', '-A', '.'], cwd=s_dir, env=env) 231 subprocess.check_output(['git', 'add', '-A', '.'], cwd=s_dir, env=env)
219 git_sha1 = subprocess.check_output(['git', 'write-tree'], cwd=s_dir, env=env).decode("utf-8") 232 git_sha1 = subprocess.check_output(['git', 'write-tree'], cwd=s_dir, env=env).decode("utf-8")
220 submodule_helper = subprocess.check_output(['git', 'submodule--helper', 'list'], cwd=s_dir, env=env).decode("utf-8") 233 if os.path.exists(os.path.join(s_dir, ".gitmodules")) and os.path.getsize(os.path.join(s_dir, ".gitmodules")) > 0:
221 for line in submodule_helper.splitlines(): 234 submodule_helper = subprocess.check_output(["git", "config", "--file", ".gitmodules", "--get-regexp", "path"], cwd=s_dir, env=env).decode("utf-8")
222 module_dir = os.path.join(s_dir, line.rsplit(maxsplit=1)[1]) 235 for line in submodule_helper.splitlines():
223 proc = subprocess.Popen(['git', 'add', '-A', '.'], cwd=module_dir, env=env, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) 236 module_dir = os.path.join(s_dir, line.rsplit(maxsplit=1)[1])
224 proc.communicate() 237 if os.path.isdir(module_dir):
225 proc = subprocess.Popen(['git', 'write-tree'], cwd=module_dir, env=env, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL) 238 proc = subprocess.Popen(['git', 'add', '-A', '.'], cwd=module_dir, env=env, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
226 stdout, _ = proc.communicate() 239 proc.communicate()
227 git_sha1 += stdout.decode("utf-8") 240 proc = subprocess.Popen(['git', 'write-tree'], cwd=module_dir, env=env, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
241 stdout, _ = proc.communicate()
242 git_sha1 += stdout.decode("utf-8")
228 sha1 = hashlib.sha1(git_sha1.encode("utf-8")).hexdigest() 243 sha1 = hashlib.sha1(git_sha1.encode("utf-8")).hexdigest()
229 with open(oe_hash_file, 'w') as fobj: 244 with open(oe_hash_file, 'w') as fobj:
230 fobj.write(sha1) 245 fobj.write(sha1)
@@ -238,6 +253,8 @@ def srctree_configure_hash_files(d):
238 Get the list of files that should trigger do_configure to re-execute, 253 Get the list of files that should trigger do_configure to re-execute,
239 based on the value of CONFIGURE_FILES 254 based on the value of CONFIGURE_FILES
240 """ 255 """
256 import fnmatch
257
241 in_files = (d.getVar('CONFIGURE_FILES') or '').split() 258 in_files = (d.getVar('CONFIGURE_FILES') or '').split()
242 out_items = [] 259 out_items = []
243 search_files = [] 260 search_files = []
@@ -249,8 +266,8 @@ def srctree_configure_hash_files(d):
249 if search_files: 266 if search_files:
250 s_dir = d.getVar('EXTERNALSRC') 267 s_dir = d.getVar('EXTERNALSRC')
251 for root, _, files in os.walk(s_dir): 268 for root, _, files in os.walk(s_dir):
252 for f in files: 269 for p in search_files:
253 if f in search_files: 270 for f in fnmatch.filter(files, p):
254 out_items.append('%s:True' % os.path.join(root, f)) 271 out_items.append('%s:True' % os.path.join(root, f))
255 return ' '.join(out_items) 272 return ' '.join(out_items)
256 273
diff --git a/meta/classes/extrausers.bbclass b/meta/classes/extrausers.bbclass
index 90811bfe2a..c825c06df9 100644
--- a/meta/classes/extrausers.bbclass
+++ b/meta/classes/extrausers.bbclass
@@ -1,3 +1,9 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
1# This bbclass is used for image level user/group configuration. 7# This bbclass is used for image level user/group configuration.
2# Inherit this class if you want to make EXTRA_USERS_PARAMS effective. 8# Inherit this class if you want to make EXTRA_USERS_PARAMS effective.
3 9
@@ -14,10 +20,10 @@
14 20
15inherit useradd_base 21inherit useradd_base
16 22
17PACKAGE_INSTALL_append = " ${@['', 'base-passwd shadow'][bool(d.getVar('EXTRA_USERS_PARAMS'))]}" 23PACKAGE_INSTALL:append = " ${@['', 'base-passwd shadow'][bool(d.getVar('EXTRA_USERS_PARAMS'))]}"
18 24
19# Image level user / group settings 25# Image level user / group settings
20ROOTFS_POSTPROCESS_COMMAND_append = " set_user_group;" 26ROOTFS_POSTPROCESS_COMMAND:append = " set_user_group"
21 27
22# Image level user / group settings 28# Image level user / group settings
23set_user_group () { 29set_user_group () {
diff --git a/meta/classes/features_check.bbclass b/meta/classes/features_check.bbclass
deleted file mode 100644
index b3c8047861..0000000000
--- a/meta/classes/features_check.bbclass
+++ /dev/null
@@ -1,57 +0,0 @@
1# Allow checking of required and conflicting features
2#
3# xxx = [DISTRO,MACHINE,COMBINED]
4#
5# ANY_OF_xxx_FEATURES: ensure at least one item on this list is included
6# in xxx_FEATURES.
7# REQUIRED_xxx_FEATURES: ensure every item on this list is included
8# in xxx_FEATURES.
9# CONFLICT_xxx_FEATURES: ensure no item in this list is included in
10# xxx_FEATURES.
11#
12# Copyright 2019 (C) Texas Instruments Inc.
13# Copyright 2013 (C) O.S. Systems Software LTDA.
14
15python () {
16 if d.getVar('PARSE_ALL_RECIPES', False):
17 return
18
19 unused = True
20
21 for kind in ['DISTRO', 'MACHINE', 'COMBINED']:
22 if d.getVar('ANY_OF_' + kind + '_FEATURES') is None and \
23 d.overridedata.get('ANY_OF_' + kind + '_FEATURES') is None and \
24 d.getVar('REQUIRED_' + kind + '_FEATURES') is None and \
25 d.overridedata.get('REQUIRED_' + kind + '_FEATURES') is None and \
26 d.getVar('CONFLICT_' + kind + '_FEATURES') is None and \
27 d.overridedata.get('CONFLICT_' + kind + '_FEATURES') is None:
28 continue
29
30 unused = False
31
32 # Assume at least one var is set.
33 features = set((d.getVar(kind + '_FEATURES') or '').split())
34
35 any_of_features = set((d.getVar('ANY_OF_' + kind + '_FEATURES') or '').split())
36 if any_of_features:
37 if set.isdisjoint(any_of_features, features):
38 raise bb.parse.SkipRecipe("one of '%s' needs to be in %s_FEATURES"
39 % (' '.join(any_of_features), kind))
40
41 required_features = set((d.getVar('REQUIRED_' + kind + '_FEATURES') or '').split())
42 if required_features:
43 missing = set.difference(required_features, features)
44 if missing:
45 raise bb.parse.SkipRecipe("missing required %s feature%s '%s' (not in %s_FEATURES)"
46 % (kind.lower(), 's' if len(missing) > 1 else '', ' '.join(missing), kind))
47
48 conflict_features = set((d.getVar('CONFLICT_' + kind + '_FEATURES') or '').split())
49 if conflict_features:
50 conflicts = set.intersection(conflict_features, features)
51 if conflicts:
52 raise bb.parse.SkipRecipe("conflicting %s feature%s '%s' (in %s_FEATURES)"
53 % (kind.lower(), 's' if len(conflicts) > 1 else '', ' '.join(conflicts), kind))
54
55 if unused:
56 bb.warn("Recipe inherits features_check but doesn't use it")
57}
diff --git a/meta/classes/fontcache.bbclass b/meta/classes/fontcache.bbclass
deleted file mode 100644
index 624a420a0d..0000000000
--- a/meta/classes/fontcache.bbclass
+++ /dev/null
@@ -1,57 +0,0 @@
1#
2# This class will generate the proper postinst/postrm scriptlets for font
3# packages.
4#
5
6PACKAGE_WRITE_DEPS += "qemu-native"
7inherit qemu
8
9FONT_PACKAGES ??= "${PN}"
10FONT_EXTRA_RDEPENDS ?= "${MLPREFIX}fontconfig-utils"
11FONTCONFIG_CACHE_DIR ?= "${localstatedir}/cache/fontconfig"
12FONTCONFIG_CACHE_PARAMS ?= "-v"
13# You can change this to e.g. FC_DEBUG=16 to debug fc-cache issues,
14# something has to be set, because qemuwrapper is using this variable after -E
15# multiple variables aren't allowed because for qemu they are separated
16# by comma and in -n "$D" case they should be separated by space
17FONTCONFIG_CACHE_ENV ?= "FC_DEBUG=1"
18fontcache_common() {
19if [ -n "$D" ] ; then
20 $INTERCEPT_DIR/postinst_intercept update_font_cache ${PKG} mlprefix=${MLPREFIX} binprefix=${MLPREFIX} \
21 'bindir="${bindir}"' \
22 'libdir="${libdir}"' \
23 'libexecdir="${libexecdir}"' \
24 'base_libdir="${base_libdir}"' \
25 'fontconfigcachedir="${FONTCONFIG_CACHE_DIR}"' \
26 'fontconfigcacheparams="${FONTCONFIG_CACHE_PARAMS}"' \
27 'fontconfigcacheenv="${FONTCONFIG_CACHE_ENV}"'
28else
29 ${FONTCONFIG_CACHE_ENV} fc-cache ${FONTCONFIG_CACHE_PARAMS}
30fi
31}
32
33python () {
34 font_pkgs = d.getVar('FONT_PACKAGES').split()
35 deps = d.getVar("FONT_EXTRA_RDEPENDS")
36
37 for pkg in font_pkgs:
38 if deps: d.appendVar('RDEPENDS_' + pkg, ' '+deps)
39}
40
41python add_fontcache_postinsts() {
42 for pkg in d.getVar('FONT_PACKAGES').split():
43 bb.note("adding fonts postinst and postrm scripts to %s" % pkg)
44 postinst = d.getVar('pkg_postinst_%s' % pkg) or d.getVar('pkg_postinst')
45 if not postinst:
46 postinst = '#!/bin/sh\n'
47 postinst += d.getVar('fontcache_common')
48 d.setVar('pkg_postinst_%s' % pkg, postinst)
49
50 postrm = d.getVar('pkg_postrm_%s' % pkg) or d.getVar('pkg_postrm')
51 if not postrm:
52 postrm = '#!/bin/sh\n'
53 postrm += d.getVar('fontcache_common')
54 d.setVar('pkg_postrm_%s' % pkg, postrm)
55}
56
57PACKAGEFUNCS =+ "add_fontcache_postinsts"
diff --git a/meta/classes/fs-uuid.bbclass b/meta/classes/fs-uuid.bbclass
deleted file mode 100644
index 9b53dfba7a..0000000000
--- a/meta/classes/fs-uuid.bbclass
+++ /dev/null
@@ -1,24 +0,0 @@
1# Extract UUID from ${ROOTFS}, which must have been built
2# by the time that this function gets called. Only works
3# on ext file systems and depends on tune2fs.
4def get_rootfs_uuid(d):
5 import subprocess
6 rootfs = d.getVar('ROOTFS')
7 output = subprocess.check_output(['tune2fs', '-l', rootfs])
8 for line in output.split('\n'):
9 if line.startswith('Filesystem UUID:'):
10 uuid = line.split()[-1]
11 bb.note('UUID of %s: %s' % (rootfs, uuid))
12 return uuid
13 bb.fatal('Could not determine filesystem UUID of %s' % rootfs)
14
15# Replace the special <<uuid-of-rootfs>> inside a string (like the
16# root= APPEND string in a syslinux.cfg or systemd-boot entry) with the
17# actual UUID of the rootfs. Does nothing if the special string
18# is not used.
19def replace_rootfs_uuid(d, string):
20 UUID_PLACEHOLDER = '<<uuid-of-rootfs>>'
21 if UUID_PLACEHOLDER in string:
22 uuid = get_rootfs_uuid(d)
23 string = string.replace(UUID_PLACEHOLDER, uuid)
24 return string
diff --git a/meta/classes/gconf.bbclass b/meta/classes/gconf.bbclass
deleted file mode 100644
index 3e3c509d5f..0000000000
--- a/meta/classes/gconf.bbclass
+++ /dev/null
@@ -1,71 +0,0 @@
1DEPENDS += "gconf"
2PACKAGE_WRITE_DEPS += "gconf-native"
3
4# These are for when gconftool is used natively and the prefix isn't necessarily
5# the sysroot. TODO: replicate the postinst logic for -native packages going
6# into sysroot as they won't be running their own install-time schema
7# registration (disabled below) nor the postinst script (as they don't happen).
8export GCONF_SCHEMA_INSTALL_SOURCE = "xml:merged:${STAGING_DIR_NATIVE}${sysconfdir}/gconf/gconf.xml.defaults"
9export GCONF_BACKEND_DIR = "${STAGING_LIBDIR_NATIVE}/GConf/2"
10
11# Disable install-time schema registration as we're a packaging system so this
12# happens in the postinst script, not at install time. Set both the configure
13# script option and the traditional envionment variable just to make sure.
14EXTRA_OECONF += "--disable-schemas-install"
15export GCONF_DISABLE_MAKEFILE_SCHEMA_INSTALL = "1"
16
17gconf_postinst() {
18if [ "x$D" != "x" ]; then
19 export GCONF_CONFIG_SOURCE="xml::$D${sysconfdir}/gconf/gconf.xml.defaults"
20else
21 export GCONF_CONFIG_SOURCE=`gconftool-2 --get-default-source`
22fi
23
24SCHEMA_LOCATION=$D/etc/gconf/schemas
25for SCHEMA in ${SCHEMA_FILES}; do
26 if [ -e $SCHEMA_LOCATION/$SCHEMA ]; then
27 HOME=$D/root gconftool-2 \
28 --makefile-install-rule $SCHEMA_LOCATION/$SCHEMA > /dev/null
29 fi
30done
31}
32
33gconf_prerm() {
34SCHEMA_LOCATION=/etc/gconf/schemas
35for SCHEMA in ${SCHEMA_FILES}; do
36 if [ -e $SCHEMA_LOCATION/$SCHEMA ]; then
37 HOME=/root GCONF_CONFIG_SOURCE=`gconftool-2 --get-default-source` \
38 gconftool-2 \
39 --makefile-uninstall-rule $SCHEMA_LOCATION/$SCHEMA > /dev/null
40 fi
41done
42}
43
44python populate_packages_append () {
45 import re
46 packages = d.getVar('PACKAGES').split()
47 pkgdest = d.getVar('PKGDEST')
48
49 for pkg in packages:
50 schema_dir = '%s/%s/etc/gconf/schemas' % (pkgdest, pkg)
51 schemas = []
52 schema_re = re.compile(r".*\.schemas$")
53 if os.path.exists(schema_dir):
54 for f in os.listdir(schema_dir):
55 if schema_re.match(f):
56 schemas.append(f)
57 if schemas != []:
58 bb.note("adding gconf postinst and prerm scripts to %s" % pkg)
59 d.setVar('SCHEMA_FILES', " ".join(schemas))
60 postinst = d.getVar('pkg_postinst_%s' % pkg)
61 if not postinst:
62 postinst = '#!/bin/sh\n'
63 postinst += d.getVar('gconf_postinst')
64 d.setVar('pkg_postinst_%s' % pkg, postinst)
65 prerm = d.getVar('pkg_prerm_%s' % pkg)
66 if not prerm:
67 prerm = '#!/bin/sh\n'
68 prerm += d.getVar('gconf_prerm')
69 d.setVar('pkg_prerm_%s' % pkg, prerm)
70 d.appendVar("RDEPENDS_%s" % pkg, ' ' + d.getVar('MLPREFIX', False) + 'gconf')
71}
diff --git a/meta/classes/gettext.bbclass b/meta/classes/gettext.bbclass
deleted file mode 100644
index be2ef3b311..0000000000
--- a/meta/classes/gettext.bbclass
+++ /dev/null
@@ -1,22 +0,0 @@
1def gettext_dependencies(d):
2 if d.getVar('INHIBIT_DEFAULT_DEPS') and not oe.utils.inherits(d, 'cross-canadian'):
3 return ""
4 if d.getVar('USE_NLS') == 'no':
5 return "gettext-minimal-native"
6 return "gettext-native"
7
8def gettext_oeconf(d):
9 if d.getVar('USE_NLS') == 'no':
10 return '--disable-nls'
11 # Remove the NLS bits if USE_NLS is no or INHIBIT_DEFAULT_DEPS is set
12 if d.getVar('INHIBIT_DEFAULT_DEPS') and not oe.utils.inherits(d, 'cross-canadian'):
13 return '--disable-nls'
14 return "--enable-nls"
15
16BASEDEPENDS_append = " ${@gettext_dependencies(d)}"
17EXTRA_OECONF_append = " ${@gettext_oeconf(d)}"
18
19# Without this, msgfmt from gettext-native will not find ITS files
20# provided by target recipes (for example, polkit.its).
21GETTEXTDATADIRS_append_class-target = ":${STAGING_DATADIR}/gettext"
22export GETTEXTDATADIRS
diff --git a/meta/classes/gio-module-cache.bbclass b/meta/classes/gio-module-cache.bbclass
deleted file mode 100644
index e429bd3197..0000000000
--- a/meta/classes/gio-module-cache.bbclass
+++ /dev/null
@@ -1,38 +0,0 @@
1PACKAGE_WRITE_DEPS += "qemu-native"
2inherit qemu
3
4GIO_MODULE_PACKAGES ??= "${PN}"
5
6gio_module_cache_common() {
7if [ "x$D" != "x" ]; then
8 $INTERCEPT_DIR/postinst_intercept update_gio_module_cache ${PKG} \
9 mlprefix=${MLPREFIX} \
10 binprefix=${MLPREFIX} \
11 libdir=${libdir} \
12 libexecdir=${libexecdir} \
13 base_libdir=${base_libdir} \
14 bindir=${bindir}
15else
16 ${libexecdir}/${MLPREFIX}gio-querymodules ${libdir}/gio/modules/
17fi
18}
19
20python populate_packages_append () {
21 packages = d.getVar('GIO_MODULE_PACKAGES').split()
22
23 for pkg in packages:
24 bb.note("adding gio-module-cache postinst and postrm scripts to %s" % pkg)
25
26 postinst = d.getVar('pkg_postinst_%s' % pkg)
27 if not postinst:
28 postinst = '#!/bin/sh\n'
29 postinst += d.getVar('gio_module_cache_common')
30 d.setVar('pkg_postinst_%s' % pkg, postinst)
31
32 postrm = d.getVar('pkg_postrm_%s' % pkg)
33 if not postrm:
34 postrm = '#!/bin/sh\n'
35 postrm += d.getVar('gio_module_cache_common')
36 d.setVar('pkg_postrm_%s' % pkg, postrm)
37}
38
diff --git a/meta/classes/glide.bbclass b/meta/classes/glide.bbclass
deleted file mode 100644
index db421745bd..0000000000
--- a/meta/classes/glide.bbclass
+++ /dev/null
@@ -1,9 +0,0 @@
1# Handle Glide Vendor Package Management use
2#
3# Copyright 2018 (C) O.S. Systems Software LTDA.
4
5DEPENDS_append = " glide-native"
6
7do_compile_prepend() {
8 ( cd ${B}/src/${GO_IMPORT} && glide install )
9}
diff --git a/meta/classes/gnomebase.bbclass b/meta/classes/gnomebase.bbclass
deleted file mode 100644
index efcb6caae1..0000000000
--- a/meta/classes/gnomebase.bbclass
+++ /dev/null
@@ -1,30 +0,0 @@
1def gnome_verdir(v):
2 return oe.utils.trim_version(v, 2)
3
4GNOME_COMPRESS_TYPE ?= "xz"
5SECTION ?= "x11/gnome"
6GNOMEBN ?= "${BPN}"
7SRC_URI = "${GNOME_MIRROR}/${GNOMEBN}/${@gnome_verdir("${PV}")}/${GNOMEBN}-${PV}.tar.${GNOME_COMPRESS_TYPE};name=archive"
8
9FILES_${PN} += "${datadir}/application-registry \
10 ${datadir}/mime-info \
11 ${datadir}/mime/packages \
12 ${datadir}/mime/application \
13 ${datadir}/gnome-2.0 \
14 ${datadir}/polkit* \
15 ${datadir}/GConf \
16 ${datadir}/glib-2.0/schemas \
17 ${datadir}/appdata \
18 ${datadir}/icons \
19"
20
21FILES_${PN}-doc += "${datadir}/devhelp"
22
23GNOMEBASEBUILDCLASS ??= "autotools"
24inherit ${GNOMEBASEBUILDCLASS} pkgconfig
25
26do_install_append() {
27 rm -rf ${D}${localstatedir}/lib/scrollkeeper/*
28 rm -rf ${D}${localstatedir}/scrollkeeper/*
29 rm -f ${D}${datadir}/applications/*.cache
30}
diff --git a/meta/classes/go-mod.bbclass b/meta/classes/go-mod.bbclass
deleted file mode 100644
index cabb04d0ec..0000000000
--- a/meta/classes/go-mod.bbclass
+++ /dev/null
@@ -1,20 +0,0 @@
1# Handle Go Modules support
2#
3# When using Go Modules, the the current working directory MUST be at or below
4# the location of the 'go.mod' file when the go tool is used, and there is no
5# way to tell it to look elsewhere. It will automatically look upwards for the
6# file, but not downwards.
7#
8# To support this use case, we provide the `GO_WORKDIR` variable, which defaults
9# to `GO_IMPORT` but allows for easy override.
10#
11# Copyright 2020 (C) O.S. Systems Software LTDA.
12
13# The '-modcacherw' option ensures we have write access to the cached objects so
14# we avoid errors during clean task as well as when removing the TMPDIR.
15GOBUILDFLAGS_append = " -modcacherw"
16
17inherit go
18
19GO_WORKDIR ?= "${GO_IMPORT}"
20do_compile[dirs] += "${B}/src/${GO_WORKDIR}"
diff --git a/meta/classes/go-ptest.bbclass b/meta/classes/go-ptest.bbclass
deleted file mode 100644
index e230a80587..0000000000
--- a/meta/classes/go-ptest.bbclass
+++ /dev/null
@@ -1,54 +0,0 @@
1inherit go ptest
2
3do_compile_ptest_base() {
4 export TMPDIR="${GOTMPDIR}"
5 rm -f ${B}/.go_compiled_tests.list
6 go_list_package_tests | while read pkg; do
7 cd ${B}/src/$pkg
8 ${GO} test ${GOPTESTBUILDFLAGS} $pkg
9 find . -mindepth 1 -maxdepth 1 -type f -name '*.test' -exec echo $pkg/{} \; | \
10 sed -e's,/\./,/,'>> ${B}/.go_compiled_tests.list
11 done
12 do_compile_ptest
13}
14
15do_compile_ptest_base[dirs] =+ "${GOTMPDIR}"
16
17go_make_ptest_wrapper() {
18 cat >${D}${PTEST_PATH}/run-ptest <<EOF
19#!/bin/sh
20RC=0
21run_test() (
22 cd "\$1"
23 ((((./\$2 ${GOPTESTFLAGS}; echo \$? >&3) | sed -r -e"s,^(PASS|SKIP|FAIL)\$,\\1: \$1/\$2," >&4) 3>&1) | (read rc; exit \$rc)) 4>&1
24 exit \$?)
25EOF
26
27}
28
29do_install_ptest_base() {
30 test -f "${B}/.go_compiled_tests.list" || exit 0
31 install -d ${D}${PTEST_PATH}
32 go_stage_testdata
33 go_make_ptest_wrapper
34 havetests=""
35 while read test; do
36 testdir=`dirname $test`
37 testprog=`basename $test`
38 install -d ${D}${PTEST_PATH}/$testdir
39 install -m 0755 ${B}/src/$test ${D}${PTEST_PATH}/$test
40 echo "run_test $testdir $testprog || RC=1" >> ${D}${PTEST_PATH}/run-ptest
41 havetests="yes"
42 done < ${B}/.go_compiled_tests.list
43 if [ -n "$havetests" ]; then
44 echo "exit \$RC" >> ${D}${PTEST_PATH}/run-ptest
45 chmod +x ${D}${PTEST_PATH}/run-ptest
46 else
47 rm -rf ${D}${PTEST_PATH}
48 fi
49 do_install_ptest
50 chown -R root:root ${D}${PTEST_PATH}
51}
52
53INSANE_SKIP_${PN}-ptest += "ldflags"
54
diff --git a/meta/classes/go-vendor.bbclass b/meta/classes/go-vendor.bbclass
new file mode 100644
index 0000000000..1bbb99ac79
--- /dev/null
+++ b/meta/classes/go-vendor.bbclass
@@ -0,0 +1,211 @@
1#
2# Copyright 2023 (C) Weidmueller GmbH & Co KG
3# Author: Lukas Funke <lukas.funke@weidmueller.com>
4#
5# Handle Go vendor support for offline builds
6#
7# When importing Go modules, Go downloads the imported modules using
8# a network (proxy) connection ahead of the compile stage. This contradicts
9# the yocto build concept of fetching every source ahead of build-time
10# and supporting offline builds.
11#
12# To support offline builds, we use Go 'vendoring': module dependencies are
13# downloaded during the fetch-phase and unpacked into the modules 'vendor'
14# folder. Additionally a manifest file is generated for the 'vendor' folder
15#
16
17inherit go-mod
18
19def go_src_uri(repo, version, path=None, subdir=None, \
20 vcs='git', replaces=None, pathmajor=None):
21
22 destsuffix = "git/src/import/vendor.fetch"
23 module_path = repo if not path else path
24
25 src_uri = "{}://{};name={}".format(vcs, repo, module_path.replace('/', '.'))
26 src_uri += ";destsuffix={}/{}@{}".format(destsuffix, repo, version)
27
28 if vcs == "git":
29 src_uri += ";nobranch=1;protocol=https"
30
31 src_uri += ";go_module_path={}".format(module_path)
32
33 if replaces:
34 src_uri += ";go_module_replacement={}".format(replaces)
35 if subdir:
36 src_uri += ";go_subdir={}".format(subdir)
37 if pathmajor:
38 src_uri += ";go_pathmajor={}".format(pathmajor)
39 src_uri += ";is_go_dependency=1"
40
41 return src_uri
42
43python do_vendor_unlink() {
44 go_import = d.getVar('GO_IMPORT')
45 source_dir = d.getVar('S')
46 linkname = os.path.join(source_dir, *['src', go_import, 'vendor'])
47
48 os.unlink(linkname)
49}
50
51addtask vendor_unlink before do_package after do_install
52
53python do_go_vendor() {
54 import shutil
55
56 src_uri = (d.getVar('SRC_URI') or "").split()
57
58 if not src_uri:
59 bb.fatal("SRC_URI is empty")
60
61 default_destsuffix = "git/src/import/vendor.fetch"
62 fetcher = bb.fetch2.Fetch(src_uri, d)
63 go_import = d.getVar('GO_IMPORT')
64 source_dir = d.getVar('S')
65
66 linkname = os.path.join(source_dir, *['src', go_import, 'vendor'])
67 vendor_dir = os.path.join(source_dir, *['src', 'import', 'vendor'])
68 import_dir = os.path.join(source_dir, *['src', 'import', 'vendor.fetch'])
69
70 if os.path.exists(vendor_dir):
71 # Nothing to do except re-establish link to actual vendor folder
72 if not os.path.exists(linkname):
73 os.symlink(vendor_dir, linkname)
74 return
75
76 bb.utils.mkdirhier(vendor_dir)
77
78 modules = {}
79
80 for url in fetcher.urls:
81 srcuri = fetcher.ud[url].host + fetcher.ud[url].path
82
83 # Skip non Go module src uris
84 if not fetcher.ud[url].parm.get('is_go_dependency'):
85 continue
86
87 destsuffix = fetcher.ud[url].parm.get('destsuffix')
88 # We derive the module repo / version in the following manner (exmaple):
89 #
90 # destsuffix = git/src/import/vendor.fetch/github.com/foo/bar@v1.2.3
91 # p = github.com/foo/bar@v1.2.3
92 # repo = github.com/foo/bar
93 # version = v1.2.3
94
95 p = destsuffix[len(default_destsuffix)+1:]
96 repo, version = p.split('@')
97
98 module_path = fetcher.ud[url].parm.get('go_module_path')
99
100 subdir = fetcher.ud[url].parm.get('go_subdir')
101 subdir = None if not subdir else subdir
102
103 pathMajor = fetcher.ud[url].parm.get('go_pathmajor')
104 pathMajor = None if not pathMajor else pathMajor.strip('/')
105
106 if not (repo, version) in modules:
107 modules[(repo, version)] = {
108 "repo_path": os.path.join(import_dir, p),
109 "module_path": module_path,
110 "subdir": subdir,
111 "pathMajor": pathMajor }
112
113 for module_key, module in modules.items():
114
115 # only take the version which is explicitly listed
116 # as a dependency in the go.mod
117 module_path = module['module_path']
118 rootdir = module['repo_path']
119 subdir = module['subdir']
120 pathMajor = module['pathMajor']
121
122 src = rootdir
123
124 if subdir:
125 src = os.path.join(rootdir, subdir)
126
127 # If the module is released at major version 2 or higher, the module
128 # path must end with a major version suffix like /v2.
129 # This may or may not be part of the subdirectory name
130 #
131 # https://go.dev/ref/mod#modules-overview
132 if pathMajor:
133 tmp = os.path.join(src, pathMajor)
134 # source directory including major version path may or may not exist
135 if os.path.exists(tmp):
136 src = tmp
137
138 dst = os.path.join(vendor_dir, module_path)
139
140 bb.debug(1, "cp %s --> %s" % (src, dst))
141 shutil.copytree(src, dst, symlinks=True, dirs_exist_ok=True, \
142 ignore=shutil.ignore_patterns(".git", \
143 "vendor", \
144 "*._test.go"))
145
146 # If the root directory has a LICENSE file but not the subdir
147 # we copy the root license to the sub module since the license
148 # applies to all modules in the repository
149 # see https://go.dev/ref/mod#vcs-license
150 if subdir:
151 rootdirLicese = os.path.join(rootdir, "LICENSE")
152 subdirLicense = os.path.join(src, "LICENSE")
153
154 if not os.path.exists(subdir) and \
155 os.path.exists(rootdirLicese):
156 shutil.copy2(rootdirLicese, subdirLicense)
157
158 # Copy vendor manifest
159 modules_txt_src = os.path.join(d.getVar('WORKDIR'), "modules.txt")
160 bb.debug(1, "cp %s --> %s" % (modules_txt_src, vendor_dir))
161 shutil.copy2(modules_txt_src, vendor_dir)
162
163 # Clean up vendor dir
164 # We only require the modules in the modules_txt file
165 fetched_paths = set([os.path.relpath(x[0], vendor_dir) for x in os.walk(vendor_dir)])
166
167 # Remove toplevel dir
168 fetched_paths.remove('.')
169
170 vendored_paths = set()
171 replaced_paths = dict()
172 with open(modules_txt_src) as f:
173 for line in f:
174 if not line.startswith("#"):
175 line = line.strip()
176 vendored_paths.add(line)
177
178 # Add toplevel dirs into vendored dir, as we want to keep them
179 topdir = os.path.dirname(line)
180 while len(topdir):
181 if not topdir in vendored_paths:
182 vendored_paths.add(topdir)
183
184 topdir = os.path.dirname(topdir)
185 else:
186 replaced_module = line.split("=>")
187 if len(replaced_module) > 1:
188 # This module has been replaced, use a local path
189 # we parse the line that has a pattern "# module-name [module-version] => local-path
190 actual_path = replaced_module[1].strip()
191 vendored_name = replaced_module[0].split()[1]
192 bb.debug(1, "added vendored name %s for actual path %s" % (vendored_name, actual_path))
193 replaced_paths[vendored_name] = actual_path
194
195 for path in fetched_paths:
196 if path not in vendored_paths:
197 realpath = os.path.join(vendor_dir, path)
198 if os.path.exists(realpath):
199 shutil.rmtree(realpath)
200
201 for vendored_name, replaced_path in replaced_paths.items():
202 symlink_target = os.path.join(source_dir, *['src', go_import, replaced_path])
203 symlink_name = os.path.join(vendor_dir, vendored_name)
204 bb.debug(1, "vendored name %s, symlink name %s" % (vendored_name, symlink_name))
205 os.symlink(symlink_target, symlink_name)
206
207 # Create a symlink to the actual directory
208 os.symlink(vendor_dir, linkname)
209}
210
211addtask go_vendor before do_patch after do_unpack
diff --git a/meta/classes/go.bbclass b/meta/classes/go.bbclass
deleted file mode 100644
index 77ec98dd51..0000000000
--- a/meta/classes/go.bbclass
+++ /dev/null
@@ -1,156 +0,0 @@
1inherit goarch
2
3GO_PARALLEL_BUILD ?= "${@oe.utils.parallel_make_argument(d, '-p %d')}"
4
5GOROOT_class-native = "${STAGING_LIBDIR_NATIVE}/go"
6GOROOT_class-nativesdk = "${STAGING_DIR_TARGET}${libdir}/go"
7GOROOT = "${STAGING_LIBDIR}/go"
8export GOROOT
9export GOROOT_FINAL = "${libdir}/go"
10export GOCACHE = "${B}/.cache"
11
12export GOARCH = "${TARGET_GOARCH}"
13export GOOS = "${TARGET_GOOS}"
14export GOHOSTARCH="${BUILD_GOARCH}"
15export GOHOSTOS="${BUILD_GOOS}"
16
17GOARM[export] = "0"
18GOARM_arm_class-target = "${TARGET_GOARM}"
19GOARM_arm_class-target[export] = "1"
20
21GO386[export] = "0"
22GO386_x86_class-target = "${TARGET_GO386}"
23GO386_x86_class-target[export] = "1"
24
25GOMIPS[export] = "0"
26GOMIPS_mips_class-target = "${TARGET_GOMIPS}"
27GOMIPS_mips_class-target[export] = "1"
28
29DEPENDS_GOLANG_class-target = "virtual/${TUNE_PKGARCH}-go virtual/${TARGET_PREFIX}go-runtime"
30DEPENDS_GOLANG_class-native = "go-native"
31DEPENDS_GOLANG_class-nativesdk = "virtual/${TARGET_PREFIX}go-crosssdk virtual/${TARGET_PREFIX}go-runtime"
32
33DEPENDS_append = " ${DEPENDS_GOLANG}"
34
35GO_LINKSHARED ?= "${@'-linkshared' if d.getVar('GO_DYNLINK') else ''}"
36GO_RPATH_LINK = "${@'-Wl,-rpath-link=${STAGING_DIR_TARGET}${libdir}/go/pkg/${TARGET_GOTUPLE}_dynlink' if d.getVar('GO_DYNLINK') else ''}"
37GO_RPATH = "${@'-r ${libdir}/go/pkg/${TARGET_GOTUPLE}_dynlink' if d.getVar('GO_DYNLINK') else ''}"
38GO_RPATH_class-native = "${@'-r ${STAGING_LIBDIR_NATIVE}/go/pkg/${TARGET_GOTUPLE}_dynlink' if d.getVar('GO_DYNLINK') else ''}"
39GO_RPATH_LINK_class-native = "${@'-Wl,-rpath-link=${STAGING_LIBDIR_NATIVE}/go/pkg/${TARGET_GOTUPLE}_dynlink' if d.getVar('GO_DYNLINK') else ''}"
40GO_EXTLDFLAGS ?= "${HOST_CC_ARCH}${TOOLCHAIN_OPTIONS} ${GO_RPATH_LINK} ${LDFLAGS}"
41GO_LINKMODE ?= ""
42GO_LINKMODE_class-nativesdk = "--linkmode=external"
43GO_LINKMODE_class-native = "--linkmode=external"
44GO_LDFLAGS ?= '-ldflags="${GO_RPATH} ${GO_LINKMODE} -extldflags '${GO_EXTLDFLAGS}'"'
45export GOBUILDFLAGS ?= "-v ${GO_LDFLAGS} -trimpath"
46export GOPATH_OMIT_IN_ACTIONID ?= "1"
47export GOPTESTBUILDFLAGS ?= "${GOBUILDFLAGS} -c"
48export GOPTESTFLAGS ?= ""
49GOBUILDFLAGS_prepend_task-compile = "${GO_PARALLEL_BUILD} "
50
51export GO = "${HOST_PREFIX}go"
52GOTOOLDIR = "${STAGING_LIBDIR_NATIVE}/${TARGET_SYS}/go/pkg/tool/${BUILD_GOTUPLE}"
53GOTOOLDIR_class-native = "${STAGING_LIBDIR_NATIVE}/go/pkg/tool/${BUILD_GOTUPLE}"
54export GOTOOLDIR
55
56export CGO_ENABLED ?= "1"
57export CGO_CFLAGS ?= "${CFLAGS}"
58export CGO_CPPFLAGS ?= "${CPPFLAGS}"
59export CGO_CXXFLAGS ?= "${CXXFLAGS}"
60export CGO_LDFLAGS ?= "${LDFLAGS}"
61
62GO_INSTALL ?= "${GO_IMPORT}/..."
63GO_INSTALL_FILTEROUT ?= "${GO_IMPORT}/vendor/"
64
65B = "${WORKDIR}/build"
66export GOPATH = "${B}"
67export GOTMPDIR ?= "${WORKDIR}/go-tmp"
68GOTMPDIR[vardepvalue] = ""
69
70python go_do_unpack() {
71 src_uri = (d.getVar('SRC_URI') or "").split()
72 if len(src_uri) == 0:
73 return
74
75 fetcher = bb.fetch2.Fetch(src_uri, d)
76 for url in fetcher.urls:
77 if fetcher.ud[url].type == 'git':
78 if fetcher.ud[url].parm.get('destsuffix') is None:
79 s_dirname = os.path.basename(d.getVar('S'))
80 fetcher.ud[url].parm['destsuffix'] = os.path.join(s_dirname, 'src', d.getVar('GO_IMPORT')) + '/'
81 fetcher.unpack(d.getVar('WORKDIR'))
82}
83
84go_list_packages() {
85 ${GO} list -f '{{.ImportPath}}' ${GOBUILDFLAGS} ${GO_INSTALL} | \
86 egrep -v '${GO_INSTALL_FILTEROUT}'
87}
88
89go_list_package_tests() {
90 ${GO} list -f '{{.ImportPath}} {{.TestGoFiles}}' ${GOBUILDFLAGS} ${GO_INSTALL} | \
91 grep -v '\[\]$' | \
92 egrep -v '${GO_INSTALL_FILTEROUT}' | \
93 awk '{ print $1 }'
94}
95
96go_do_configure() {
97 ln -snf ${S}/src ${B}/
98}
99do_configure[dirs] =+ "${GOTMPDIR}"
100
101go_do_compile() {
102 export TMPDIR="${GOTMPDIR}"
103 if [ -n "${GO_INSTALL}" ]; then
104 if [ -n "${GO_LINKSHARED}" ]; then
105 ${GO} install ${GOBUILDFLAGS} `go_list_packages`
106 rm -rf ${B}/bin
107 fi
108 ${GO} install ${GO_LINKSHARED} ${GOBUILDFLAGS} `go_list_packages`
109 fi
110}
111do_compile[dirs] =+ "${GOTMPDIR}"
112do_compile[cleandirs] = "${B}/bin ${B}/pkg"
113
114go_do_install() {
115 install -d ${D}${libdir}/go/src/${GO_IMPORT}
116 tar -C ${S}/src/${GO_IMPORT} -cf - --exclude-vcs --exclude '*.test' --exclude 'testdata' . | \
117 tar -C ${D}${libdir}/go/src/${GO_IMPORT} --no-same-owner -xf -
118 tar -C ${B} -cf - --exclude-vcs --exclude '*.test' --exclude 'testdata' pkg | \
119 tar -C ${D}${libdir}/go --no-same-owner -xf -
120
121 if [ -n "`ls ${B}/${GO_BUILD_BINDIR}/`" ]; then
122 install -d ${D}${bindir}
123 install -m 0755 ${B}/${GO_BUILD_BINDIR}/* ${D}${bindir}/
124 fi
125}
126
127go_stage_testdata() {
128 oldwd="$PWD"
129 cd ${S}/src
130 find ${GO_IMPORT} -depth -type d -name testdata | while read d; do
131 if echo "$d" | grep -q '/vendor/'; then
132 continue
133 fi
134 parent=`dirname $d`
135 install -d ${D}${PTEST_PATH}/$parent
136 cp --preserve=mode,timestamps -R $d ${D}${PTEST_PATH}/$parent/
137 done
138 cd "$oldwd"
139}
140
141EXPORT_FUNCTIONS do_unpack do_configure do_compile do_install
142
143FILES_${PN}-dev = "${libdir}/go/src"
144FILES_${PN}-staticdev = "${libdir}/go/pkg"
145
146INSANE_SKIP_${PN} += "ldflags"
147
148# Add -buildmode=pie to GOBUILDFLAGS to satisfy "textrel" QA checking, but mips
149# doesn't support -buildmode=pie, so skip the QA checking for mips/rv32 and its
150# variants.
151python() {
152 if 'mips' in d.getVar('TARGET_ARCH') or 'riscv32' in d.getVar('TARGET_ARCH'):
153 d.appendVar('INSANE_SKIP_%s' % d.getVar('PN'), " textrel")
154 else:
155 d.appendVar('GOBUILDFLAGS', ' -buildmode=pie')
156}
diff --git a/meta/classes/goarch.bbclass b/meta/classes/goarch.bbclass
deleted file mode 100644
index e4e0ca37be..0000000000
--- a/meta/classes/goarch.bbclass
+++ /dev/null
@@ -1,116 +0,0 @@
1BUILD_GOOS = "${@go_map_os(d.getVar('BUILD_OS'), d)}"
2BUILD_GOARCH = "${@go_map_arch(d.getVar('BUILD_ARCH'), d)}"
3BUILD_GOTUPLE = "${BUILD_GOOS}_${BUILD_GOARCH}"
4HOST_GOOS = "${@go_map_os(d.getVar('HOST_OS'), d)}"
5HOST_GOARCH = "${@go_map_arch(d.getVar('HOST_ARCH'), d)}"
6HOST_GOARM = "${@go_map_arm(d.getVar('HOST_ARCH'), d)}"
7HOST_GO386 = "${@go_map_386(d.getVar('HOST_ARCH'), d.getVar('TUNE_FEATURES'), d)}"
8HOST_GOMIPS = "${@go_map_mips(d.getVar('HOST_ARCH'), d.getVar('TUNE_FEATURES'), d)}"
9HOST_GOARM_class-native = "7"
10HOST_GO386_class-native = "sse2"
11HOST_GOMIPS_class-native = "hardfloat"
12HOST_GOTUPLE = "${HOST_GOOS}_${HOST_GOARCH}"
13TARGET_GOOS = "${@go_map_os(d.getVar('TARGET_OS'), d)}"
14TARGET_GOARCH = "${@go_map_arch(d.getVar('TARGET_ARCH'), d)}"
15TARGET_GOARM = "${@go_map_arm(d.getVar('TARGET_ARCH'), d)}"
16TARGET_GO386 = "${@go_map_386(d.getVar('TARGET_ARCH'), d.getVar('TUNE_FEATURES'), d)}"
17TARGET_GOMIPS = "${@go_map_mips(d.getVar('TARGET_ARCH'), d.getVar('TUNE_FEATURES'), d)}"
18TARGET_GOARM_class-native = "7"
19TARGET_GO386_class-native = "sse2"
20TARGET_GOMIPS_class-native = "hardfloat"
21TARGET_GOTUPLE = "${TARGET_GOOS}_${TARGET_GOARCH}"
22GO_BUILD_BINDIR = "${@['bin/${HOST_GOTUPLE}','bin'][d.getVar('BUILD_GOTUPLE') == d.getVar('HOST_GOTUPLE')]}"
23
24# Use the MACHINEOVERRIDES to map ARM CPU architecture passed to GO via GOARM.
25# This is combined with *_ARCH to set HOST_GOARM and TARGET_GOARM.
26BASE_GOARM = ''
27BASE_GOARM_armv7ve = '7'
28BASE_GOARM_armv7a = '7'
29BASE_GOARM_armv6 = '6'
30BASE_GOARM_armv5 = '5'
31
32# Go supports dynamic linking on a limited set of architectures.
33# See the supportsDynlink function in go/src/cmd/compile/internal/gc/main.go
34GO_DYNLINK = ""
35GO_DYNLINK_arm = "1"
36GO_DYNLINK_aarch64 = "1"
37GO_DYNLINK_x86 = "1"
38GO_DYNLINK_x86-64 = "1"
39GO_DYNLINK_powerpc64 = "1"
40GO_DYNLINK_powerpc64le = "1"
41GO_DYNLINK_class-native = ""
42GO_DYNLINK_class-nativesdk = ""
43
44# define here because everybody inherits this class
45#
46COMPATIBLE_HOST_linux-gnux32 = "null"
47COMPATIBLE_HOST_linux-muslx32 = "null"
48COMPATIBLE_HOST_powerpc = "null"
49COMPATIBLE_HOST_powerpc64 = "null"
50COMPATIBLE_HOST_mipsarchn32 = "null"
51
52ARM_INSTRUCTION_SET_armv4 = "arm"
53ARM_INSTRUCTION_SET_armv5 = "arm"
54ARM_INSTRUCTION_SET_armv6 = "arm"
55
56TUNE_CCARGS_remove = "-march=mips32r2"
57SECURITY_NOPIE_CFLAGS ??= ""
58
59# go can't be built with ccache:
60# gcc: fatal error: no input files
61CCACHE_DISABLE ?= "1"
62
63def go_map_arch(a, d):
64 import re
65 if re.match('i.86', a):
66 return '386'
67 elif a == 'x86_64':
68 return 'amd64'
69 elif re.match('arm.*', a):
70 return 'arm'
71 elif re.match('aarch64.*', a):
72 return 'arm64'
73 elif re.match('mips64el.*', a):
74 return 'mips64le'
75 elif re.match('mips64.*', a):
76 return 'mips64'
77 elif a == 'mips':
78 return 'mips'
79 elif a == 'mipsel':
80 return 'mipsle'
81 elif re.match('p(pc|owerpc)(64le)', a):
82 return 'ppc64le'
83 elif re.match('p(pc|owerpc)(64)', a):
84 return 'ppc64'
85 elif a == 'riscv64':
86 return 'riscv64'
87 else:
88 raise bb.parse.SkipRecipe("Unsupported CPU architecture: %s" % a)
89
90def go_map_arm(a, d):
91 if a.startswith("arm"):
92 return d.getVar('BASE_GOARM')
93 return ''
94
95def go_map_386(a, f, d):
96 import re
97 if re.match('i.86', a):
98 if ('core2' in f) or ('corei7' in f):
99 return 'sse2'
100 else:
101 return 'softfloat'
102 return ''
103
104def go_map_mips(a, f, d):
105 import re
106 if a == 'mips' or a == 'mipsel':
107 if 'fpu-hard' in f:
108 return 'hardfloat'
109 else:
110 return 'softfloat'
111 return ''
112
113def go_map_os(o, d):
114 if o.startswith('linux'):
115 return 'linux'
116 return o
diff --git a/meta/classes/gobject-introspection-data.bbclass b/meta/classes/gobject-introspection-data.bbclass
deleted file mode 100644
index 2ef684626a..0000000000
--- a/meta/classes/gobject-introspection-data.bbclass
+++ /dev/null
@@ -1,7 +0,0 @@
1# This variable is set to True if gobject-introspection-data is in
2# DISTRO_FEATURES and qemu-usermode is in MACHINE_FEATURES, and False otherwise.
3#
4# It should be used in recipes to determine whether introspection data should be built,
5# so that qemu use can be avoided when necessary.
6GI_DATA_ENABLED ?= "${@bb.utils.contains('DISTRO_FEATURES', 'gobject-introspection-data', \
7 bb.utils.contains('MACHINE_FEATURES', 'qemu-usermode', 'True', 'False', d), 'False', d)}"
diff --git a/meta/classes/gobject-introspection.bbclass b/meta/classes/gobject-introspection.bbclass
deleted file mode 100644
index 504f75e28d..0000000000
--- a/meta/classes/gobject-introspection.bbclass
+++ /dev/null
@@ -1,53 +0,0 @@
1# Inherit this class in recipes to enable building their introspection files
2
3# python3native is inherited to prevent introspection tools being run with
4# host's python 3 (they need to be run with native python 3)
5#
6# This also sets up autoconf-based recipes to build introspection data (or not),
7# depending on distro and machine features (see gobject-introspection-data class).
8inherit python3native gobject-introspection-data
9
10# meson: default option name to enable/disable introspection. This matches most
11# project's configuration. In doubts - check meson_options.txt in project's
12# source path.
13GIR_MESON_OPTION ?= 'introspection'
14GIR_MESON_ENABLE_FLAG ?= 'true'
15GIR_MESON_DISABLE_FLAG ?= 'false'
16
17# Auto enable/disable based on GI_DATA_ENABLED
18EXTRA_OECONF_prepend_class-target = "${@bb.utils.contains('GI_DATA_ENABLED', 'True', '--enable-introspection', '--disable-introspection', d)} "
19EXTRA_OEMESON_prepend_class-target = "-D${GIR_MESON_OPTION}=${@bb.utils.contains('GI_DATA_ENABLED', 'True', '${GIR_MESON_ENABLE_FLAG}', '${GIR_MESON_DISABLE_FLAG}', d)} "
20
21# When building native recipes, disable introspection, as it is not necessary,
22# pulls in additional dependencies, and makes build times longer
23EXTRA_OECONF_prepend_class-native = "--disable-introspection "
24EXTRA_OECONF_prepend_class-nativesdk = "--disable-introspection "
25EXTRA_OEMESON_prepend_class-native = "-D${GIR_MESON_OPTION}=${GIR_MESON_DISABLE_FLAG} "
26EXTRA_OEMESON_prepend_class-nativesdk = "-D${GIR_MESON_OPTION}=${GIR_MESON_DISABLE_FLAG} "
27
28# Generating introspection data depends on a combination of native and target
29# introspection tools, and qemu to run the target tools.
30DEPENDS_append_class-target = " gobject-introspection gobject-introspection-native qemu-native prelink-native"
31
32# Even though introspection is disabled on -native, gobject-introspection package is still
33# needed for m4 macros.
34DEPENDS_append_class-native = " gobject-introspection-native"
35DEPENDS_append_class-nativesdk = " gobject-introspection-native"
36
37# This is used by introspection tools to find .gir includes
38export XDG_DATA_DIRS = "${STAGING_DATADIR}:${STAGING_LIBDIR}"
39
40do_configure_prepend_class-target () {
41 # introspection.m4 pre-packaged with upstream tarballs does not yet
42 # have our fixes
43 mkdir -p ${S}/m4
44 cp ${STAGING_DIR_TARGET}/${datadir}/aclocal/introspection.m4 ${S}/m4
45}
46
47# .typelib files are needed at runtime and so they go to the main package (so
48# they'll be together with libraries they support).
49FILES_${PN}_append = " ${libdir}/girepository-*/*.typelib"
50
51# .gir files go to dev package, as they're needed for developing (but not for
52# running) things that depends on introspection.
53FILES_${PN}-dev_append = " ${datadir}/gir-*/*.gir ${libdir}/gir-*/*.gir"
diff --git a/meta/classes/godep.bbclass b/meta/classes/godep.bbclass
deleted file mode 100644
index c82401c313..0000000000
--- a/meta/classes/godep.bbclass
+++ /dev/null
@@ -1,8 +0,0 @@
1DEPENDS_append = " go-dep-native"
2
3do_compile_prepend() {
4 rm -f ${WORKDIR}/build/src/${GO_IMPORT}/Gopkg.toml
5 rm -f ${WORKDIR}/build/src/${GO_IMPORT}/Gopkg.lock
6 ( cd ${WORKDIR}/build/src/${GO_IMPORT} && dep init && dep ensure )
7}
8
diff --git a/meta/classes/grub-efi-cfg.bbclass b/meta/classes/grub-efi-cfg.bbclass
deleted file mode 100644
index ea21b3de3d..0000000000
--- a/meta/classes/grub-efi-cfg.bbclass
+++ /dev/null
@@ -1,123 +0,0 @@
1# grub-efi.bbclass
2# Copyright (c) 2011, Intel Corporation.
3# All rights reserved.
4#
5# Released under the MIT license (see packages/COPYING)
6
7# Provide grub-efi specific functions for building bootable images.
8
9# External variables
10# ${INITRD} - indicates a list of filesystem images to concatenate and use as an initrd (optional)
11# ${ROOTFS} - indicates a filesystem image to include as the root filesystem (optional)
12# ${GRUB_GFXSERIAL} - set this to 1 to have graphics and serial in the boot menu
13# ${LABELS} - a list of targets for the automatic config
14# ${APPEND} - an override list of append strings for each label
15# ${GRUB_OPTS} - additional options to add to the config, ';' delimited # (optional)
16# ${GRUB_TIMEOUT} - timeout before executing the deault label (optional)
17# ${GRUB_ROOT} - grub's root device.
18
19GRUB_SERIAL ?= "console=ttyS0,115200"
20GRUB_CFG_VM = "${S}/grub_vm.cfg"
21GRUB_CFG_LIVE = "${S}/grub_live.cfg"
22GRUB_TIMEOUT ?= "10"
23#FIXME: build this from the machine config
24GRUB_OPTS ?= "serial --unit=0 --speed=115200 --word=8 --parity=no --stop=1"
25
26GRUB_ROOT ?= "${ROOT}"
27APPEND ?= ""
28
29# Uses MACHINE specific KERNEL_IMAGETYPE
30PACKAGE_ARCH = "${MACHINE_ARCH}"
31
32# Need UUID utility code.
33inherit fs-uuid
34
35python build_efi_cfg() {
36 import sys
37
38 workdir = d.getVar('WORKDIR')
39 if not workdir:
40 bb.error("WORKDIR not defined, unable to package")
41 return
42
43 gfxserial = d.getVar('GRUB_GFXSERIAL') or ""
44
45 labels = d.getVar('LABELS')
46 if not labels:
47 bb.debug(1, "LABELS not defined, nothing to do")
48 return
49
50 if labels == []:
51 bb.debug(1, "No labels, nothing to do")
52 return
53
54 cfile = d.getVar('GRUB_CFG')
55 if not cfile:
56 bb.fatal('Unable to read GRUB_CFG')
57
58 try:
59 cfgfile = open(cfile, 'w')
60 except OSError:
61 bb.fatal('Unable to open %s' % cfile)
62
63 cfgfile.write('# Automatically created by OE\n')
64
65 opts = d.getVar('GRUB_OPTS')
66 if opts:
67 for opt in opts.split(';'):
68 cfgfile.write('%s\n' % opt)
69
70 cfgfile.write('default=%s\n' % (labels.split()[0]))
71
72 timeout = d.getVar('GRUB_TIMEOUT')
73 if timeout:
74 cfgfile.write('timeout=%s\n' % timeout)
75 else:
76 cfgfile.write('timeout=50\n')
77
78 root = d.getVar('GRUB_ROOT')
79 if not root:
80 bb.fatal('GRUB_ROOT not defined')
81
82 if gfxserial == "1":
83 btypes = [ [ " graphics console", "" ],
84 [ " serial console", d.getVar('GRUB_SERIAL') or "" ] ]
85 else:
86 btypes = [ [ "", "" ] ]
87
88 for label in labels.split():
89 localdata = d.createCopy()
90
91 overrides = localdata.getVar('OVERRIDES')
92 if not overrides:
93 bb.fatal('OVERRIDES not defined')
94
95 localdata.setVar('OVERRIDES', 'grub_' + label + ':' + overrides)
96
97 for btype in btypes:
98 cfgfile.write('\nmenuentry \'%s%s\'{\n' % (label, btype[0]))
99 lb = label
100 if label == "install":
101 lb = "install-efi"
102 kernel = localdata.getVar('KERNEL_IMAGETYPE')
103 cfgfile.write('linux /%s LABEL=%s' % (kernel, lb))
104
105 cfgfile.write(' %s' % replace_rootfs_uuid(d, root))
106
107 append = localdata.getVar('APPEND')
108 initrd = localdata.getVar('INITRD')
109
110 if append:
111 append = replace_rootfs_uuid(d, append)
112 cfgfile.write(' %s' % (append))
113
114 cfgfile.write(' %s' % btype[1])
115 cfgfile.write('\n')
116
117 if initrd:
118 cfgfile.write('initrd /initrd')
119 cfgfile.write('\n}\n')
120
121 cfgfile.close()
122}
123build_efi_cfg[vardepsexclude] += "OVERRIDES"
diff --git a/meta/classes/grub-efi.bbclass b/meta/classes/grub-efi.bbclass
deleted file mode 100644
index 8fc6999e52..0000000000
--- a/meta/classes/grub-efi.bbclass
+++ /dev/null
@@ -1,8 +0,0 @@
1inherit grub-efi-cfg
2require conf/image-uefi.conf
3
4efi_populate() {
5 efi_populate_common "$1" grub-efi
6
7 install -m 0644 ${GRUB_CFG} ${DEST}${EFIDIR}/grub.cfg
8}
diff --git a/meta/classes/gsettings.bbclass b/meta/classes/gsettings.bbclass
deleted file mode 100644
index 33afc96a9c..0000000000
--- a/meta/classes/gsettings.bbclass
+++ /dev/null
@@ -1,42 +0,0 @@
1# A bbclass to handle installed GSettings (glib) schemas, updated the compiled
2# form on package install and remove.
3#
4# The compiled schemas are platform-agnostic, so we can depend on
5# glib-2.0-native for the native tool and run the postinst script when the
6# rootfs builds to save a little time on first boot.
7
8# TODO use a trigger so that this runs once per package operation run
9
10GSETTINGS_PACKAGE ?= "${PN}"
11
12python __anonymous() {
13 pkg = d.getVar("GSETTINGS_PACKAGE")
14 if pkg:
15 d.appendVar("PACKAGE_WRITE_DEPS", " glib-2.0-native")
16 d.appendVar("RDEPENDS_" + pkg, " ${MLPREFIX}glib-2.0-utils")
17 d.appendVar("FILES_" + pkg, " ${datadir}/glib-2.0/schemas")
18}
19
20gsettings_postinstrm () {
21 glib-compile-schemas $D${datadir}/glib-2.0/schemas
22}
23
24python populate_packages_append () {
25 pkg = d.getVar('GSETTINGS_PACKAGE')
26 if pkg:
27 bb.note("adding gsettings postinst scripts to %s" % pkg)
28
29 postinst = d.getVar('pkg_postinst_%s' % pkg) or d.getVar('pkg_postinst')
30 if not postinst:
31 postinst = '#!/bin/sh\n'
32 postinst += d.getVar('gsettings_postinstrm')
33 d.setVar('pkg_postinst_%s' % pkg, postinst)
34
35 bb.note("adding gsettings postrm scripts to %s" % pkg)
36
37 postrm = d.getVar('pkg_postrm_%s' % pkg) or d.getVar('pkg_postrm')
38 if not postrm:
39 postrm = '#!/bin/sh\n'
40 postrm += d.getVar('gsettings_postinstrm')
41 d.setVar('pkg_postrm_%s' % pkg, postrm)
42}
diff --git a/meta/classes/gtk-doc.bbclass b/meta/classes/gtk-doc.bbclass
deleted file mode 100644
index ef99e63faf..0000000000
--- a/meta/classes/gtk-doc.bbclass
+++ /dev/null
@@ -1,83 +0,0 @@
1# Helper class to pull in the right gtk-doc dependencies and configure
2# gtk-doc to enable or disable documentation building (which requries the
3# use of usermode qemu).
4
5# This variable is set to True if api-documentation is in
6# DISTRO_FEATURES and qemu-usermode is in MACHINE_FEATURES, and False otherwise.
7#
8# It should be used in recipes to determine whether gtk-doc based documentation should be built,
9# so that qemu use can be avoided when necessary.
10GTKDOC_ENABLED_class-native = "False"
11GTKDOC_ENABLED ?= "${@bb.utils.contains('DISTRO_FEATURES', 'api-documentation', \
12 bb.utils.contains('MACHINE_FEATURES', 'qemu-usermode', 'True', 'False', d), 'False', d)}"
13
14# meson: default option name to enable/disable gtk-doc. This matches most
15# project's configuration. In doubts - check meson_options.txt in project's
16# source path.
17GTKDOC_MESON_OPTION ?= 'docs'
18GTKDOC_MESON_ENABLE_FLAG ?= 'true'
19GTKDOC_MESON_DISABLE_FLAG ?= 'false'
20
21# Auto enable/disable based on GTKDOC_ENABLED
22EXTRA_OECONF_prepend_class-target = "${@bb.utils.contains('GTKDOC_ENABLED', 'True', '--enable-gtk-doc --enable-gtk-doc-html --disable-gtk-doc-pdf', \
23 '--disable-gtk-doc', d)} "
24EXTRA_OEMESON_prepend_class-target = "-D${GTKDOC_MESON_OPTION}=${@bb.utils.contains('GTKDOC_ENABLED', 'True', '${GTKDOC_MESON_ENABLE_FLAG}', '${GTKDOC_MESON_DISABLE_FLAG}', d)} "
25
26# When building native recipes, disable gtkdoc, as it is not necessary,
27# pulls in additional dependencies, and makes build times longer
28EXTRA_OECONF_prepend_class-native = "--disable-gtk-doc "
29EXTRA_OECONF_prepend_class-nativesdk = "--disable-gtk-doc "
30EXTRA_OEMESON_prepend_class-native = "-D${GTKDOC_MESON_OPTION}=${GTKDOC_MESON_DISABLE_FLAG} "
31EXTRA_OEMESON_prepend_class-nativesdk = "-D${GTKDOC_MESON_OPTION}=${GTKDOC_MESON_DISABLE_FLAG} "
32
33# Even though gtkdoc is disabled on -native, gtk-doc package is still
34# needed for m4 macros.
35DEPENDS_append = " gtk-doc-native"
36
37# The documentation directory, where the infrastructure will be copied.
38# gtkdocize has a default of "." so to handle out-of-tree builds set this to $S.
39GTKDOC_DOCDIR ?= "${S}"
40
41export STAGING_DIR_HOST
42
43inherit python3native pkgconfig qemu
44DEPENDS_append = "${@' qemu-native' if d.getVar('GTKDOC_ENABLED') == 'True' else ''}"
45
46do_configure_prepend () {
47 # Need to use ||true as this is only needed if configure.ac both exists
48 # and uses GTK_DOC_CHECK.
49 gtkdocize --srcdir ${S} --docdir ${GTKDOC_DOCDIR} || true
50}
51
52do_compile_prepend_class-target () {
53 if [ ${GTKDOC_ENABLED} = True ]; then
54 # Write out a qemu wrapper that will be given to gtkdoc-scangobj so that it
55 # can run target helper binaries through that.
56 qemu_binary="${@qemu_wrapper_cmdline(d, '$STAGING_DIR_HOST', ['\\$GIR_EXTRA_LIBS_PATH','$STAGING_DIR_HOST/${libdir}','$STAGING_DIR_HOST/${base_libdir}'])}"
57 cat > ${B}/gtkdoc-qemuwrapper << EOF
58#!/bin/sh
59# Use a modules directory which doesn't exist so we don't load random things
60# which may then get deleted (or their dependencies) and potentially segfault
61export GIO_MODULE_DIR=${STAGING_LIBDIR}/gio/modules-dummy
62
63GIR_EXTRA_LIBS_PATH=\`find ${B} -name *.so -printf "%h\n"|sort|uniq| tr '\n' ':'\`\$GIR_EXTRA_LIBS_PATH
64GIR_EXTRA_LIBS_PATH=\`find ${B} -name .libs| tr '\n' ':'\`\$GIR_EXTRA_LIBS_PATH
65
66# meson sets this wrongly (only to libs in build-dir), qemu-wrapper_cmdline() and GIR_EXTRA_LIBS_PATH take care of it properly
67unset LD_LIBRARY_PATH
68
69if [ -d ".libs" ]; then
70 $qemu_binary ".libs/\$@"
71else
72 $qemu_binary "\$@"
73fi
74
75if [ \$? -ne 0 ]; then
76 echo "If the above error message is about missing .so libraries, then setting up GIR_EXTRA_LIBS_PATH in the recipe should help."
77 echo "(typically like this: GIR_EXTRA_LIBS_PATH=\"$""{B}/something/.libs\" )"
78 exit 1
79fi
80EOF
81 chmod +x ${B}/gtkdoc-qemuwrapper
82 fi
83}
diff --git a/meta/classes/gtk-icon-cache.bbclass b/meta/classes/gtk-icon-cache.bbclass
deleted file mode 100644
index 340a283851..0000000000
--- a/meta/classes/gtk-icon-cache.bbclass
+++ /dev/null
@@ -1,84 +0,0 @@
1FILES_${PN} += "${datadir}/icons/hicolor"
2
3#gtk+3 reqiure GTK3DISTROFEATURES, DEPENDS on it make all the
4#recipes inherit this class require GTK3DISTROFEATURES
5inherit features_check
6ANY_OF_DISTRO_FEATURES = "${GTK3DISTROFEATURES}"
7
8DEPENDS +=" ${@['hicolor-icon-theme', '']['${BPN}' == 'hicolor-icon-theme']} \
9 ${@['gdk-pixbuf', '']['${BPN}' == 'gdk-pixbuf']} \
10 ${@['gtk+3', '']['${BPN}' == 'gtk+3']} \
11 gtk+3-native \
12"
13
14PACKAGE_WRITE_DEPS += "gtk+3-native gdk-pixbuf-native"
15
16gtk_icon_cache_postinst() {
17if [ "x$D" != "x" ]; then
18 $INTERCEPT_DIR/postinst_intercept update_gtk_icon_cache ${PKG} \
19 mlprefix=${MLPREFIX} \
20 libdir_native=${libdir_native}
21else
22
23 # Update the pixbuf loaders in case they haven't been registered yet
24 ${libdir}/gdk-pixbuf-2.0/gdk-pixbuf-query-loaders --update-cache
25
26 for icondir in /usr/share/icons/* ; do
27 if [ -d $icondir ] ; then
28 gtk-update-icon-cache -fqt $icondir
29 fi
30 done
31fi
32}
33
34gtk_icon_cache_postrm() {
35if [ "x$D" != "x" ]; then
36 $INTERCEPT_DIR/postinst_intercept update_gtk_icon_cache ${PKG} \
37 mlprefix=${MLPREFIX} \
38 libdir=${libdir}
39else
40 for icondir in /usr/share/icons/* ; do
41 if [ -d $icondir ] ; then
42 gtk-update-icon-cache -qt $icondir
43 fi
44 done
45fi
46}
47
48python populate_packages_append () {
49 packages = d.getVar('PACKAGES').split()
50 pkgdest = d.getVar('PKGDEST')
51
52 for pkg in packages:
53 icon_dir = '%s/%s/%s/icons' % (pkgdest, pkg, d.getVar('datadir'))
54 if not os.path.exists(icon_dir):
55 continue
56
57 bb.note("adding hicolor-icon-theme dependency to %s" % pkg)
58 rdepends = ' ' + d.getVar('MLPREFIX', False) + "hicolor-icon-theme"
59 d.appendVar('RDEPENDS_%s' % pkg, rdepends)
60
61 #gtk_icon_cache_postinst depend on gdk-pixbuf and gtk+3
62 bb.note("adding gdk-pixbuf dependency to %s" % pkg)
63 rdepends = ' ' + d.getVar('MLPREFIX', False) + "gdk-pixbuf"
64 d.appendVar('RDEPENDS_%s' % pkg, rdepends)
65
66 bb.note("adding gtk+3 dependency to %s" % pkg)
67 rdepends = ' ' + d.getVar('MLPREFIX', False) + "gtk+3"
68 d.appendVar('RDEPENDS_%s' % pkg, rdepends)
69
70 bb.note("adding gtk-icon-cache postinst and postrm scripts to %s" % pkg)
71
72 postinst = d.getVar('pkg_postinst_%s' % pkg)
73 if not postinst:
74 postinst = '#!/bin/sh\n'
75 postinst += d.getVar('gtk_icon_cache_postinst')
76 d.setVar('pkg_postinst_%s' % pkg, postinst)
77
78 postrm = d.getVar('pkg_postrm_%s' % pkg)
79 if not postrm:
80 postrm = '#!/bin/sh\n'
81 postrm += d.getVar('gtk_icon_cache_postrm')
82 d.setVar('pkg_postrm_%s' % pkg, postrm)
83}
84
diff --git a/meta/classes/gtk-immodules-cache.bbclass b/meta/classes/gtk-immodules-cache.bbclass
deleted file mode 100644
index 8e783fb493..0000000000
--- a/meta/classes/gtk-immodules-cache.bbclass
+++ /dev/null
@@ -1,76 +0,0 @@
1# This class will update the inputmethod module cache for virtual keyboards
2#
3# Usage: Set GTKIMMODULES_PACKAGES to the packages that needs to update the inputmethod modules
4
5PACKAGE_WRITE_DEPS += "qemu-native"
6
7inherit qemu
8
9GTKIMMODULES_PACKAGES ?= "${PN}"
10
11gtk_immodule_cache_postinst() {
12if [ "x$D" != "x" ]; then
13 $INTERCEPT_DIR/postinst_intercept update_gtk_immodules_cache ${PKG} \
14 mlprefix=${MLPREFIX} \
15 binprefix=${MLPREFIX} \
16 libdir=${libdir} \
17 libexecdir=${libexecdir} \
18 base_libdir=${base_libdir} \
19 bindir=${bindir}
20else
21 if [ ! -z `which gtk-query-immodules-2.0` ]; then
22 gtk-query-immodules-2.0 > ${libdir}/gtk-2.0/2.10.0/immodules.cache
23 fi
24 if [ ! -z `which gtk-query-immodules-3.0` ]; then
25 mkdir -p ${libdir}/gtk-3.0/3.0.0
26 gtk-query-immodules-3.0 > ${libdir}/gtk-3.0/3.0.0/immodules.cache
27 fi
28fi
29}
30
31gtk_immodule_cache_postrm() {
32if [ "x$D" != "x" ]; then
33 $INTERCEPT_DIR/postinst_intercept update_gtk_immodules_cache ${PKG} \
34 mlprefix=${MLPREFIX} \
35 binprefix=${MLPREFIX} \
36 libdir=${libdir} \
37 libexecdir=${libexecdir} \
38 base_libdir=${base_libdir} \
39 bindir=${bindir}
40else
41 if [ ! -z `which gtk-query-immodules-2.0` ]; then
42 gtk-query-immodules-2.0 > ${libdir}/gtk-2.0/2.10.0/immodules.cache
43 fi
44 if [ ! -z `which gtk-query-immodules-3.0` ]; then
45 gtk-query-immodules-3.0 > ${libdir}/gtk-3.0/3.0.0/immodules.cache
46 fi
47fi
48}
49
50python populate_packages_append () {
51 gtkimmodules_pkgs = d.getVar('GTKIMMODULES_PACKAGES').split()
52
53 for pkg in gtkimmodules_pkgs:
54 bb.note("adding gtk-immodule-cache postinst and postrm scripts to %s" % pkg)
55
56 postinst = d.getVar('pkg_postinst_%s' % pkg)
57 if not postinst:
58 postinst = '#!/bin/sh\n'
59 postinst += d.getVar('gtk_immodule_cache_postinst')
60 d.setVar('pkg_postinst_%s' % pkg, postinst)
61
62 postrm = d.getVar('pkg_postrm_%s' % pkg)
63 if not postrm:
64 postrm = '#!/bin/sh\n'
65 postrm += d.getVar('gtk_immodule_cache_postrm')
66 d.setVar('pkg_postrm_%s' % pkg, postrm)
67}
68
69python __anonymous() {
70 if not bb.data.inherits_class('native', d) and not bb.data.inherits_class('cross', d):
71 gtkimmodules_check = d.getVar('GTKIMMODULES_PACKAGES', False)
72 if not gtkimmodules_check:
73 bb_filename = d.getVar('FILE', False)
74 bb.fatal("ERROR: %s inherits gtk-immodules-cache but doesn't set GTKIMMODULES_PACKAGES" % bb_filename)
75}
76
diff --git a/meta/classes/icecc.bbclass b/meta/classes/icecc.bbclass
index d095305ed8..159cae20f8 100644
--- a/meta/classes/icecc.bbclass
+++ b/meta/classes/icecc.bbclass
@@ -1,40 +1,45 @@
1# IceCream distributed compiling support 1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# Icecream distributed compiling support
2# 8#
3# Stages directories with symlinks from gcc/g++ to icecc, for both 9# Stages directories with symlinks from gcc/g++ to icecc, for both
4# native and cross compilers. Depending on each configure or compile, 10# native and cross compilers. Depending on each configure or compile,
5# the directories are added at the head of the PATH list and ICECC_CXX 11# the directories are added at the head of the PATH list and ICECC_CXX
6# and ICEC_CC are set. 12# and ICECC_CC are set.
7# 13#
8# For the cross compiler, creates a tar.gz of our toolchain and sets 14# For the cross compiler, creates a tar.gz of our toolchain and sets
9# ICECC_VERSION accordingly. 15# ICECC_VERSION accordingly.
10# 16#
11# The class now handles all 3 different compile 'stages' (i.e native ,cross-kernel and target) creating the 17# The class now handles all 3 different compile 'stages' (i.e native ,cross-kernel and target) creating the
12# necessary environment tar.gz file to be used by the remote machines. 18# necessary environment tar.gz file to be used by the remote machines.
13# It also supports meta-toolchain generation 19# It also supports meta-toolchain generation.
14# 20#
15# If ICECC_PATH is not set in local.conf then the class will try to locate it using 'bb.utils.which' 21# If ICECC_PATH is not set in local.conf then the class will try to locate it using 'bb.utils.which'
16# but nothing is sure ;) 22# but nothing is sure. ;)
17# 23#
18# If ICECC_ENV_EXEC is set in local.conf, then it should point to the icecc-create-env script provided by the user 24# If ICECC_ENV_EXEC is set in local.conf, then it should point to the icecc-create-env script provided by the user
19# or the default one provided by icecc-create-env.bb will be used 25# or the default one provided by icecc-create-env_0.1.bb will be used.
20# (NOTE that this is a modified version of the script need it and *not the one that comes with icecc* 26# (NOTE that this is a modified version of the needed script and *not the one that comes with icecream*).
21# 27#
22# User can specify if specific packages or packages belonging to class should not use icecc to distribute 28# User can specify if specific recipes or recipes inheriting specific classes should not use icecc to distribute
23# compile jobs to remote machines, but handled locally, by defining ICECC_USER_CLASS_BL and ICECC_USER_PACKAGE_BL 29# compile jobs to remote machines, but handle them locally by defining ICECC_CLASS_DISABLE and ICECC_RECIPE_DISABLE
24# with the appropriate values in local.conf. In addition the user can force to enable icecc for packages 30# with the appropriate values in local.conf. In addition the user can force to enable icecc for recipes
25# which set an empty PARALLEL_MAKE variable by defining ICECC_USER_PACKAGE_WL. 31# which set an empty PARALLEL_MAKE variable by defining ICECC_RECIPE_ENABLE.
26# 32#
27######################################################################################### 33#########################################################################################
28#Error checking is kept to minimum so double check any parameters you pass to the class 34# Error checking is kept to minimum so double check any parameters you pass to the class
29########################################################################################### 35#########################################################################################
30 36
31BB_HASHBASE_WHITELIST += "ICECC_PARALLEL_MAKE ICECC_DISABLED ICECC_USER_PACKAGE_BL \ 37BB_BASEHASH_IGNORE_VARS += "ICECC_PARALLEL_MAKE ICECC_DISABLED ICECC_RECIPE_DISABLE \
32 ICECC_USER_CLASS_BL ICECC_USER_PACKAGE_WL ICECC_PATH ICECC_ENV_EXEC \ 38 ICECC_CLASS_DISABLE ICECC_RECIPE_ENABLE ICECC_PATH ICECC_ENV_EXEC \
33 ICECC_CARET_WORKAROUND ICECC_CFLAGS ICECC_ENV_VERSION \ 39 ICECC_CARET_WORKAROUND ICECC_CFLAGS ICECC_ENV_VERSION \
34 ICECC_DEBUG ICECC_LOGFILE ICECC_REPEAT_RATE ICECC_PREFERRED_HOST \ 40 ICECC_DEBUG ICECC_LOGFILE ICECC_REPEAT_RATE ICECC_PREFERRED_HOST \
35 ICECC_CLANG_REMOTE_CPP ICECC_IGNORE_UNVERIFIED ICECC_TEST_SOCKET \ 41 ICECC_CLANG_REMOTE_CPP ICECC_IGNORE_UNVERIFIED ICECC_TEST_SOCKET \
36 ICECC_ENV_DEBUG ICECC_SYSTEM_PACKAGE_BL ICECC_SYSTEM_CLASS_BL \ 42 ICECC_ENV_DEBUG ICECC_REMOTE_CPP \
37 ICECC_REMOTE_CPP \
38 " 43 "
39 44
40ICECC_ENV_EXEC ?= "${STAGING_BINDIR_NATIVE}/icecc-create-env" 45ICECC_ENV_EXEC ?= "${STAGING_BINDIR_NATIVE}/icecc-create-env"
@@ -45,9 +50,9 @@ HOSTTOOLS_NONFATAL += "icecc patchelf"
45# invalidate the version on the compile nodes. Changing it will cause a new 50# invalidate the version on the compile nodes. Changing it will cause a new
46# environment to be created. 51# environment to be created.
47# 52#
48# A useful thing to do for testing Icecream changes locally is to add a 53# A useful thing to do for testing icecream changes locally is to add a
49# subversion in local.conf: 54# subversion in local.conf:
50# ICECC_ENV_VERSION_append = "-my-ver-1" 55# ICECC_ENV_VERSION:append = "-my-ver-1"
51ICECC_ENV_VERSION = "2" 56ICECC_ENV_VERSION = "2"
52 57
53# Default to disabling the caret workaround, If set to "1" in local.conf, icecc 58# Default to disabling the caret workaround, If set to "1" in local.conf, icecc
@@ -66,46 +71,46 @@ CXXFLAGS += "${ICECC_CFLAGS}"
66# Debug flags when generating environments 71# Debug flags when generating environments
67ICECC_ENV_DEBUG ??= "" 72ICECC_ENV_DEBUG ??= ""
68 73
69# "system" recipe blacklist contains a list of packages that can not distribute 74# Disable recipe list contains a list of recipes that can not distribute
70# compile tasks for one reason or the other. When adding new entry, please 75# compile tasks for one reason or the other. When adding a new entry, please
71# document why (how it failed) so that we can re-evaluate it later e.g. when 76# document why (how it failed) so that we can re-evaluate it later e.g. when
72# there is new version 77# there is a new version.
73# 78#
74# libgcc-initial - fails with CPP sanity check error if host sysroot contains 79# libgcc-initial - fails with CPP sanity check error if host sysroot contains
75# cross gcc built for another target tune/variant 80# cross gcc built for another target tune/variant.
76# pixman - prng_state: TLS reference mismatches non-TLS reference, possibly due to 81# pixman - prng_state: TLS reference mismatches non-TLS reference, possibly due to
77# pragma omp threadprivate(prng_state) 82# pragma omp threadprivate(prng_state).
78# systemtap - _HelperSDT.c undefs macros and uses the identifiers in macros emitting 83# systemtap - _HelperSDT.c undefs macros and uses the identifiers in macros emitting
79# inline assembly 84# inline assembly.
80# target-sdk-provides-dummy - ${HOST_PREFIX} is empty which triggers the "NULL 85# target-sdk-provides-dummy - ${HOST_PREFIX} is empty which triggers the "NULL
81# prefix" error. 86# prefix" error.
82ICECC_SYSTEM_PACKAGE_BL += "\ 87ICECC_RECIPE_DISABLE += "\
83 libgcc-initial \ 88 libgcc-initial \
84 pixman \ 89 pixman \
85 systemtap \ 90 systemtap \
86 target-sdk-provides-dummy \ 91 target-sdk-provides-dummy \
87 " 92 "
88 93
89# "system" classes that should be blacklisted. When adding new entry, please 94# Classes that should not use icecc. When adding a new entry, please
90# document why (how it failed) so that we can re-evaluate it later 95# document why (how it failed) so that we can re-evaluate it later.
91# 96#
92# image - Image aren't compiling, but the testing framework for images captures 97# image - images aren't compiling, but the testing framework for images captures
93# PARALLEL_MAKE as part of the test environment. Many tests won't use 98# PARALLEL_MAKE as part of the test environment. Many tests won't use
94# icecream, but leaving the high level of parallelism can cause them to 99# icecream, but leaving the high level of parallelism can cause them to
95# consume an unnecessary amount of resources. 100# consume an unnecessary amount of resources.
96ICECC_SYSTEM_CLASS_BL += "\ 101ICECC_CLASS_DISABLE += "\
97 image \ 102 image \
98 " 103 "
99 104
100def icecc_dep_prepend(d): 105def get_icecc_dep(d):
101 # INHIBIT_DEFAULT_DEPS doesn't apply to the patch command. Whether or not 106 # INHIBIT_DEFAULT_DEPS doesn't apply to the patch command. Whether or not
102 # we need that built is the responsibility of the patch function / class, not 107 # we need that built is the responsibility of the patch function / class, not
103 # the application. 108 # the application.
104 if not d.getVar('INHIBIT_DEFAULT_DEPS'): 109 if not d.getVar('INHIBIT_DEFAULT_DEPS'):
105 return "icecc-create-env-native" 110 return "icecc-create-env-native"
106 return "" 111 return ""
107 112
108DEPENDS_prepend = "${@icecc_dep_prepend(d)} " 113DEPENDS:prepend = "${@get_icecc_dep(d)} "
109 114
110get_cross_kernel_cc[vardepsexclude] += "KERNEL_CC" 115get_cross_kernel_cc[vardepsexclude] += "KERNEL_CC"
111def get_cross_kernel_cc(bb,d): 116def get_cross_kernel_cc(bb,d):
@@ -138,39 +143,31 @@ def use_icecc(bb,d):
138 if icecc_is_cross_canadian(bb, d): 143 if icecc_is_cross_canadian(bb, d):
139 return "no" 144 return "no"
140 145
141 if d.getVar('INHIBIT_DEFAULT_DEPS', False):
142 # We don't have a compiler, so no icecc
143 return "no"
144
145 pn = d.getVar('PN') 146 pn = d.getVar('PN')
146 bpn = d.getVar('BPN') 147 bpn = d.getVar('BPN')
147 148
148 # Blacklist/whitelist checks are made against BPN, because there is a good 149 # Enable/disable checks are made against BPN, because there is a good
149 # chance that if icecc should be skipped for a recipe, it should be skipped 150 # chance that if icecc should be skipped for a recipe, it should be skipped
150 # for all the variants of that recipe. PN is still checked in case a user 151 # for all the variants of that recipe. PN is still checked in case a user
151 # specified a more specific recipe. 152 # specified a more specific recipe.
152 check_pn = set([pn, bpn]) 153 check_pn = set([pn, bpn])
153 154
154 system_class_blacklist = (d.getVar('ICECC_SYSTEM_CLASS_BL') or "").split() 155 class_disable = (d.getVar('ICECC_CLASS_DISABLE') or "").split()
155 user_class_blacklist = (d.getVar('ICECC_USER_CLASS_BL') or "none").split()
156 package_class_blacklist = system_class_blacklist + user_class_blacklist
157 156
158 for black in package_class_blacklist: 157 for bbclass in class_disable:
159 if bb.data.inherits_class(black, d): 158 if bb.data.inherits_class(bbclass, d):
160 bb.debug(1, "%s: class %s found in blacklist, disable icecc" % (pn, black)) 159 bb.debug(1, "%s: bbclass %s found in disable, disable icecc" % (pn, bbclass))
161 return "no" 160 return "no"
162 161
163 system_package_blacklist = (d.getVar('ICECC_SYSTEM_PACKAGE_BL') or "").split() 162 disabled_recipes = (d.getVar('ICECC_RECIPE_DISABLE') or "").split()
164 user_package_blacklist = (d.getVar('ICECC_USER_PACKAGE_BL') or "").split() 163 enabled_recipes = (d.getVar('ICECC_RECIPE_ENABLE') or "").split()
165 user_package_whitelist = (d.getVar('ICECC_USER_PACKAGE_WL') or "").split()
166 package_blacklist = system_package_blacklist + user_package_blacklist
167 164
168 if check_pn & set(package_blacklist): 165 if check_pn & set(disabled_recipes):
169 bb.debug(1, "%s: found in blacklist, disable icecc" % pn) 166 bb.debug(1, "%s: found in disable list, disable icecc" % pn)
170 return "no" 167 return "no"
171 168
172 if check_pn & set(user_package_whitelist): 169 if check_pn & set(enabled_recipes):
173 bb.debug(1, "%s: found in whitelist, enable icecc" % pn) 170 bb.debug(1, "%s: found in enabled recipes list, enable icecc" % pn)
174 return "yes" 171 return "yes"
175 172
176 if d.getVar('PARALLEL_MAKE') == "": 173 if d.getVar('PARALLEL_MAKE') == "":
@@ -262,7 +259,7 @@ def icecc_get_tool_link(tool, d):
262def icecc_get_path_tool(tool, d): 259def icecc_get_path_tool(tool, d):
263 # This is a little ugly, but we want to make sure we add an actual 260 # This is a little ugly, but we want to make sure we add an actual
264 # compiler to the toolchain, not ccache. Some distros (e.g. Fedora) 261 # compiler to the toolchain, not ccache. Some distros (e.g. Fedora)
265 # have ccache enabled by default using symlinks PATH, meaning ccache 262 # have ccache enabled by default using symlinks in PATH, meaning ccache
266 # would be found first when looking for the compiler. 263 # would be found first when looking for the compiler.
267 paths = os.getenv("PATH").split(':') 264 paths = os.getenv("PATH").split(':')
268 while True: 265 while True:
@@ -313,7 +310,7 @@ wait_for_file() {
313 local TIMEOUT=$2 310 local TIMEOUT=$2
314 until [ -f "$FILE_TO_TEST" ] 311 until [ -f "$FILE_TO_TEST" ]
315 do 312 do
316 TIME_ELAPSED=`expr $TIME_ELAPSED + 1` 313 TIME_ELAPSED=$(expr $TIME_ELAPSED + 1)
317 if [ $TIME_ELAPSED -gt $TIMEOUT ] 314 if [ $TIME_ELAPSED -gt $TIMEOUT ]
318 then 315 then
319 return 1 316 return 1
@@ -362,12 +359,12 @@ set_icecc_env() {
362 ICECC_WHICH_AS="${@bb.utils.which(os.getenv('PATH'), 'as')}" 359 ICECC_WHICH_AS="${@bb.utils.which(os.getenv('PATH'), 'as')}"
363 if [ ! -x "${ICECC_CC}" -o ! -x "${ICECC_CXX}" ] 360 if [ ! -x "${ICECC_CC}" -o ! -x "${ICECC_CXX}" ]
364 then 361 then
365 bbwarn "Cannot use icecc: could not get ICECC_CC or ICECC_CXX" 362 bbnote "Cannot use icecc: could not get ICECC_CC or ICECC_CXX"
366 return 363 return
367 fi 364 fi
368 365
369 ICE_VERSION=`$ICECC_CC -dumpversion` 366 ICE_VERSION="$($ICECC_CC -dumpversion)"
370 ICECC_VERSION=`echo ${ICECC_VERSION} | sed -e "s/@VERSION@/$ICE_VERSION/g"` 367 ICECC_VERSION=$(echo ${ICECC_VERSION} | sed -e "s/@VERSION@/$ICE_VERSION/g")
371 if [ ! -x "${ICECC_ENV_EXEC}" ] 368 if [ ! -x "${ICECC_ENV_EXEC}" ]
372 then 369 then
373 bbwarn "Cannot use icecc: invalid ICECC_ENV_EXEC" 370 bbwarn "Cannot use icecc: invalid ICECC_ENV_EXEC"
@@ -383,7 +380,6 @@ set_icecc_env() {
383 fi 380 fi
384 for compiler in $compilers; do 381 for compiler in $compilers; do
385 ln -sf $ICECC_BIN $ICE_PATH/symlinks/$compiler 382 ln -sf $ICECC_BIN $ICE_PATH/symlinks/$compiler
386 rm -f $ICE_PATH/$compiler
387 cat <<-__EOF__ > $ICE_PATH/$compiler 383 cat <<-__EOF__ > $ICE_PATH/$compiler
388 #!/bin/sh -e 384 #!/bin/sh -e
389 export ICECC_VERSION=$ICECC_VERSION 385 export ICECC_VERSION=$ICECC_VERSION
@@ -394,18 +390,18 @@ set_icecc_env() {
394 chmod 775 $ICE_PATH/$compiler 390 chmod 775 $ICE_PATH/$compiler
395 done 391 done
396 392
397 ICECC_AS="`${ICECC_CC} -print-prog-name=as`" 393 ICECC_AS="$(${ICECC_CC} -print-prog-name=as)"
398 # for target recipes should return something like: 394 # for target recipes should return something like:
399 # /OE/tmp-eglibc/sysroots/x86_64-linux/usr/libexec/arm920tt-oe-linux-gnueabi/gcc/arm-oe-linux-gnueabi/4.8.2/as 395 # /OE/tmp-eglibc/sysroots/x86_64-linux/usr/libexec/arm920tt-oe-linux-gnueabi/gcc/arm-oe-linux-gnueabi/4.8.2/as
400 # and just "as" for native, if it returns "as" in current directory (for whatever reason) use "as" from PATH 396 # and just "as" for native, if it returns "as" in current directory (for whatever reason) use "as" from PATH
401 if [ "`dirname "${ICECC_AS}"`" = "." ] 397 if [ "$(dirname "${ICECC_AS}")" = "." ]
402 then 398 then
403 ICECC_AS="${ICECC_WHICH_AS}" 399 ICECC_AS="${ICECC_WHICH_AS}"
404 fi 400 fi
405 401
406 if [ ! -f "${ICECC_VERSION}.done" ] 402 if [ ! -f "${ICECC_VERSION}.done" ]
407 then 403 then
408 mkdir -p "`dirname "${ICECC_VERSION}"`" 404 mkdir -p "$(dirname "${ICECC_VERSION}")"
409 405
410 # the ICECC_VERSION generation step must be locked by a mutex 406 # the ICECC_VERSION generation step must be locked by a mutex
411 # in order to prevent race conditions 407 # in order to prevent race conditions
@@ -432,28 +428,34 @@ set_icecc_env() {
432 bbnote "Using icecc tarball: $ICECC_VERSION" 428 bbnote "Using icecc tarball: $ICECC_VERSION"
433} 429}
434 430
435do_configure_prepend() { 431do_configure:prepend() {
436 set_icecc_env 432 set_icecc_env
437} 433}
438 434
439do_compile_prepend() { 435do_compile:prepend() {
440 set_icecc_env 436 set_icecc_env
441} 437}
442 438
443do_compile_kernelmodules_prepend() { 439do_compile_kernelmodules:prepend() {
444 set_icecc_env 440 set_icecc_env
445} 441}
446 442
447do_install_prepend() { 443do_install:prepend() {
448 set_icecc_env 444 set_icecc_env
449} 445}
450 446
451# IceCream is not (currently) supported in the extensible SDK 447# Icecream is not (currently) supported in the extensible SDK
452ICECC_SDK_HOST_TASK = "nativesdk-icecc-toolchain" 448ICECC_SDK_HOST_TASK = "nativesdk-icecc-toolchain"
453ICECC_SDK_HOST_TASK_task-populate-sdk-ext = "" 449ICECC_SDK_HOST_TASK:task-populate-sdk-ext = ""
454 450
455# Don't include IceCream in uninative tarball 451# Don't include icecream in uninative tarball
456ICECC_SDK_HOST_TASK_pn-uninative-tarball = "" 452ICECC_SDK_HOST_TASK:pn-uninative-tarball = ""
457 453
458# Add the toolchain scripts to the SDK 454# Add the toolchain scripts to the SDK
459TOOLCHAIN_HOST_TASK_append = " ${ICECC_SDK_HOST_TASK}" 455TOOLCHAIN_HOST_TASK:append = " ${ICECC_SDK_HOST_TASK}"
456
457python () {
458 if d.getVar('ICECC_DISABLED') != "1":
459 for task in ['do_configure', 'do_compile', 'do_compile_kernelmodules', 'do_install']:
460 d.setVarFlag(task, 'network', '1')
461}
diff --git a/meta/classes/image-artifact-names.bbclass b/meta/classes/image-artifact-names.bbclass
deleted file mode 100644
index 3ac8dd731a..0000000000
--- a/meta/classes/image-artifact-names.bbclass
+++ /dev/null
@@ -1,15 +0,0 @@
1##################################################################
2# Specific image creation and rootfs population info.
3##################################################################
4
5IMAGE_BASENAME ?= "${PN}"
6IMAGE_VERSION_SUFFIX ?= "-${DATETIME}"
7IMAGE_VERSION_SUFFIX[vardepsexclude] += "DATETIME"
8IMAGE_NAME ?= "${IMAGE_BASENAME}-${MACHINE}${IMAGE_VERSION_SUFFIX}"
9IMAGE_LINK_NAME ?= "${IMAGE_BASENAME}-${MACHINE}"
10
11# IMAGE_NAME is the base name for everything produced when building images.
12# The actual image that contains the rootfs has an additional suffix (.rootfs
13# by default) followed by additional suffices which describe the format (.ext4,
14# .ext4.xz, etc.).
15IMAGE_NAME_SUFFIX ??= ".rootfs"
diff --git a/meta/classes/image-buildinfo.bbclass b/meta/classes/image-buildinfo.bbclass
index 94c585d4cd..b83ce650ad 100644
--- a/meta/classes/image-buildinfo.bbclass
+++ b/meta/classes/image-buildinfo.bbclass
@@ -1,10 +1,10 @@
1# 1#
2# Writes build information to target filesystem on /etc/build 2# Writes build information to target filesystem on /etc/buildinfo
3# 3#
4# Copyright (C) 2014 Intel Corporation 4# Copyright (C) 2014 Intel Corporation
5# Author: Alejandro Enedino Hernandez Samaniego <alejandro.hernandez@intel.com> 5# Author: Alejandro Enedino Hernandez Samaniego <alejandro.hernandez@intel.com>
6# 6#
7# Licensed under the MIT license, see COPYING.MIT for details 7# SPDX-License-Identifier: MIT
8# 8#
9# Usage: add INHERIT += "image-buildinfo" to your conf file 9# Usage: add INHERIT += "image-buildinfo" to your conf file
10# 10#
@@ -13,7 +13,8 @@
13IMAGE_BUILDINFO_VARS ?= "DISTRO DISTRO_VERSION" 13IMAGE_BUILDINFO_VARS ?= "DISTRO DISTRO_VERSION"
14 14
15# Desired location of the output file in the image. 15# Desired location of the output file in the image.
16IMAGE_BUILDINFO_FILE ??= "${sysconfdir}/build" 16IMAGE_BUILDINFO_FILE ??= "${sysconfdir}/buildinfo"
17SDK_BUILDINFO_FILE ??= "/buildinfo"
17 18
18# From buildhistory.bbclass 19# From buildhistory.bbclass
19def image_buildinfo_outputvars(vars, d): 20def image_buildinfo_outputvars(vars, d):
@@ -26,30 +27,10 @@ def image_buildinfo_outputvars(vars, d):
26 ret += "%s = %s\n" % (var, value) 27 ret += "%s = %s\n" % (var, value)
27 return ret.rstrip('\n') 28 return ret.rstrip('\n')
28 29
29# Gets git branch's status (clean or dirty)
30def get_layer_git_status(path):
31 import subprocess
32 try:
33 subprocess.check_output("""cd %s; export PSEUDO_UNLOAD=1; set -e;
34 git diff --quiet --no-ext-diff
35 git diff --quiet --no-ext-diff --cached""" % path,
36 shell=True,
37 stderr=subprocess.STDOUT)
38 return ""
39 except subprocess.CalledProcessError as ex:
40 # Silently treat errors as "modified", without checking for the
41 # (expected) return code 1 in a modified git repo. For example, we get
42 # output and a 129 return code when a layer isn't a git repo at all.
43 return "-- modified"
44
45# Returns layer revisions along with their respective status 30# Returns layer revisions along with their respective status
46def get_layer_revs(d): 31def get_layer_revs(d):
47 layers = (d.getVar("BBLAYERS") or "").split() 32 revisions = oe.buildcfg.get_layer_revisions(d)
48 medadata_revs = ["%-17s = %s:%s %s" % (os.path.basename(i), \ 33 medadata_revs = ["%-17s = %s:%s%s" % (r[1], r[2], r[3], r[4]) for r in revisions]
49 base_get_metadata_git_branch(i, None).strip(), \
50 base_get_metadata_git_revision(i, None), \
51 get_layer_git_status(i)) \
52 for i in layers]
53 return '\n'.join(medadata_revs) 34 return '\n'.join(medadata_revs)
54 35
55def buildinfo_target(d): 36def buildinfo_target(d):
@@ -60,11 +41,12 @@ def buildinfo_target(d):
60 vars = (d.getVar("IMAGE_BUILDINFO_VARS") or "") 41 vars = (d.getVar("IMAGE_BUILDINFO_VARS") or "")
61 return image_buildinfo_outputvars(vars, d) 42 return image_buildinfo_outputvars(vars, d)
62 43
63# Write build information to target filesystem 44python buildinfo() {
64python buildinfo () {
65 if not d.getVar('IMAGE_BUILDINFO_FILE'): 45 if not d.getVar('IMAGE_BUILDINFO_FILE'):
66 return 46 return
67 with open(d.expand('${IMAGE_ROOTFS}${IMAGE_BUILDINFO_FILE}'), 'w') as build: 47 destfile = d.expand('${BUILDINFODEST}${IMAGE_BUILDINFO_FILE}')
48 bb.utils.mkdirhier(os.path.dirname(destfile))
49 with open(destfile, 'w') as build:
68 build.writelines(( 50 build.writelines((
69 '''----------------------- 51 '''-----------------------
70Build Configuration: | 52Build Configuration: |
@@ -82,4 +64,18 @@ Layer Revisions: |
82 )) 64 ))
83} 65}
84 66
85IMAGE_PREPROCESS_COMMAND += "buildinfo;" 67# Write build information to target filesystem
68python buildinfo_image () {
69 d.setVar("BUILDINFODEST", "${IMAGE_ROOTFS}")
70 bb.build.exec_func("buildinfo", d)
71}
72
73python buildinfo_sdk () {
74 d.setVar("BUILDINFODEST", "${SDK_OUTPUT}/${SDKPATH}")
75 d.setVar("IMAGE_BUILDINFO_FILE", d.getVar("SDK_BUILDINFO_FILE"))
76 bb.build.exec_func("buildinfo", d)
77}
78
79IMAGE_PREPROCESS_COMMAND += "buildinfo_image"
80POPULATE_SDK_PRE_TARGET_COMMAND += "buildinfo_sdk"
81
diff --git a/meta/classes/image-combined-dbg.bbclass b/meta/classes/image-combined-dbg.bbclass
deleted file mode 100644
index f4772f7ea1..0000000000
--- a/meta/classes/image-combined-dbg.bbclass
+++ /dev/null
@@ -1,9 +0,0 @@
1IMAGE_PREPROCESS_COMMAND_append = " combine_dbg_image; "
2
3combine_dbg_image () {
4 if [ "${IMAGE_GEN_DEBUGFS}" = "1" -a -e ${IMAGE_ROOTFS}-dbg ]; then
5 # copy target files into -dbg rootfs, so it can be used for
6 # debug purposes directly
7 tar -C ${IMAGE_ROOTFS} -cf - . | tar -C ${IMAGE_ROOTFS}-dbg -xf -
8 fi
9}
diff --git a/meta/classes/image-container.bbclass b/meta/classes/image-container.bbclass
deleted file mode 100644
index f002858bd2..0000000000
--- a/meta/classes/image-container.bbclass
+++ /dev/null
@@ -1,21 +0,0 @@
1ROOTFS_BOOTSTRAP_INSTALL = ""
2IMAGE_TYPES_MASKED += "container"
3IMAGE_TYPEDEP_container = "tar.bz2"
4
5python __anonymous() {
6 if "container" in d.getVar("IMAGE_FSTYPES") and \
7 d.getVar("IMAGE_CONTAINER_NO_DUMMY") != "1" and \
8 "linux-dummy" not in d.getVar("PREFERRED_PROVIDER_virtual/kernel"):
9 msg = '"container" is in IMAGE_FSTYPES, but ' \
10 'PREFERRED_PROVIDER_virtual/kernel is not "linux-dummy". ' \
11 'Unless a particular kernel is needed, using linux-dummy will ' \
12 'prevent a kernel from being built, which can reduce ' \
13 'build times. If you don\'t want to use "linux-dummy", set ' \
14 '"IMAGE_CONTAINER_NO_DUMMY" to "1".'
15
16 # Raising skip recipe was Paul's clever idea. It causes the error to
17 # only be shown for the recipes actually requested to build, rather
18 # than bb.fatal which would appear for all recipes inheriting the
19 # class.
20 raise bb.parse.SkipRecipe(msg)
21}
diff --git a/meta/classes/image-live.bbclass b/meta/classes/image-live.bbclass
deleted file mode 100644
index 1b2183eadd..0000000000
--- a/meta/classes/image-live.bbclass
+++ /dev/null
@@ -1,264 +0,0 @@
1# Copyright (C) 2004, Advanced Micro Devices, Inc. All Rights Reserved
2# Released under the MIT license (see packages/COPYING)
3
4# Creates a bootable image using syslinux, your kernel and an optional
5# initrd
6
7#
8# End result is two things:
9#
10# 1. A .hddimg file which is an msdos filesystem containing syslinux, a kernel,
11# an initrd and a rootfs image. These can be written to harddisks directly and
12# also booted on USB flash disks (write them there with dd).
13#
14# 2. A CD .iso image
15
16# Boot process is that the initrd will boot and process which label was selected
17# in syslinux. Actions based on the label are then performed (e.g. installing to
18# an hdd)
19
20# External variables (also used by syslinux.bbclass)
21# ${INITRD} - indicates a list of filesystem images to concatenate and use as an initrd (optional)
22# ${HDDIMG_ID} - FAT image volume-id
23# ${ROOTFS} - indicates a filesystem image to include as the root filesystem (optional)
24
25inherit live-vm-common image-artifact-names
26
27do_bootimg[depends] += "dosfstools-native:do_populate_sysroot \
28 mtools-native:do_populate_sysroot \
29 cdrtools-native:do_populate_sysroot \
30 virtual/kernel:do_deploy \
31 ${MLPREFIX}syslinux:do_populate_sysroot \
32 syslinux-native:do_populate_sysroot \
33 ${PN}:do_image_${@d.getVar('LIVE_ROOTFS_TYPE').replace('-', '_')} \
34 "
35
36
37LABELS_LIVE ?= "boot install"
38ROOT_LIVE ?= "root=/dev/ram0"
39INITRD_IMAGE_LIVE ?= "${MLPREFIX}core-image-minimal-initramfs"
40INITRD_LIVE ?= "${DEPLOY_DIR_IMAGE}/${INITRD_IMAGE_LIVE}-${MACHINE}.${INITRAMFS_FSTYPES}"
41
42LIVE_ROOTFS_TYPE ?= "ext4"
43ROOTFS ?= "${IMGDEPLOYDIR}/${IMAGE_LINK_NAME}.${LIVE_ROOTFS_TYPE}"
44
45IMAGE_TYPEDEP_live = "${LIVE_ROOTFS_TYPE}"
46IMAGE_TYPEDEP_iso = "${LIVE_ROOTFS_TYPE}"
47IMAGE_TYPEDEP_hddimg = "${LIVE_ROOTFS_TYPE}"
48IMAGE_TYPES_MASKED += "live hddimg iso"
49
50python() {
51 image_b = d.getVar('IMAGE_BASENAME')
52 initrd_i = d.getVar('INITRD_IMAGE_LIVE')
53 if image_b == initrd_i:
54 bb.error('INITRD_IMAGE_LIVE %s cannot use image live, hddimg or iso.' % initrd_i)
55 bb.fatal('Check IMAGE_FSTYPES and INITRAMFS_FSTYPES settings.')
56 elif initrd_i:
57 d.appendVarFlag('do_bootimg', 'depends', ' %s:do_image_complete' % initrd_i)
58}
59
60HDDDIR = "${S}/hddimg"
61ISODIR = "${S}/iso"
62EFIIMGDIR = "${S}/efi_img"
63COMPACT_ISODIR = "${S}/iso.z"
64
65ISOLINUXDIR ?= "/isolinux"
66ISO_BOOTIMG = "isolinux/isolinux.bin"
67ISO_BOOTCAT = "isolinux/boot.cat"
68MKISOFS_OPTIONS = "-no-emul-boot -boot-load-size 4 -boot-info-table"
69
70BOOTIMG_VOLUME_ID ?= "boot"
71BOOTIMG_EXTRA_SPACE ?= "512"
72
73populate_live() {
74 populate_kernel $1
75 if [ -s "${ROOTFS}" ]; then
76 install -m 0644 ${ROOTFS} $1/rootfs.img
77 fi
78}
79
80build_iso() {
81 # Only create an ISO if we have an INITRD and the live or iso image type was selected
82 if [ -z "${INITRD}" ] || [ "${@bb.utils.contains_any('IMAGE_FSTYPES', 'live iso', '1', '0', d)}" != "1" ]; then
83 bbnote "ISO image will not be created."
84 return
85 fi
86 # ${INITRD} is a list of multiple filesystem images
87 for fs in ${INITRD}
88 do
89 if [ ! -s "$fs" ]; then
90 bbwarn "ISO image will not be created. $fs is invalid."
91 return
92 fi
93 done
94
95 populate_live ${ISODIR}
96
97 if [ "${PCBIOS}" = "1" ]; then
98 syslinux_iso_populate ${ISODIR}
99 fi
100 if [ "${EFI}" = "1" ]; then
101 efi_iso_populate ${ISODIR}
102 build_fat_img ${EFIIMGDIR} ${ISODIR}/efi.img
103 fi
104
105 # EFI only
106 if [ "${PCBIOS}" != "1" ] && [ "${EFI}" = "1" ] ; then
107 # Work around bug in isohybrid where it requires isolinux.bin
108 # In the boot catalog, even though it is not used
109 mkdir -p ${ISODIR}/${ISOLINUXDIR}
110 install -m 0644 ${STAGING_DATADIR}/syslinux/isolinux.bin ${ISODIR}${ISOLINUXDIR}
111 fi
112
113 # We used to have support for zisofs; this is a relic of that
114 mkisofs_compress_opts="-r"
115
116 # Check the size of ${ISODIR}/rootfs.img, use mkisofs -iso-level 3
117 # when it exceeds 3.8GB, the specification is 4G - 1 bytes, we need
118 # leave a few space for other files.
119 mkisofs_iso_level=""
120
121 if [ -n "${ROOTFS}" ] && [ -s "${ROOTFS}" ]; then
122 rootfs_img_size=`stat -c '%s' ${ISODIR}/rootfs.img`
123 # 4080218931 = 3.8 * 1024 * 1024 * 1024
124 if [ $rootfs_img_size -gt 4080218931 ]; then
125 bbnote "${ISODIR}/rootfs.img execeeds 3.8GB, using '-iso-level 3' for mkisofs"
126 mkisofs_iso_level="-iso-level 3"
127 fi
128 fi
129
130 if [ "${PCBIOS}" = "1" ] && [ "${EFI}" != "1" ] ; then
131 # PCBIOS only media
132 mkisofs -V ${BOOTIMG_VOLUME_ID} \
133 -o ${IMGDEPLOYDIR}/${IMAGE_NAME}.iso \
134 -b ${ISO_BOOTIMG} -c ${ISO_BOOTCAT} \
135 $mkisofs_compress_opts \
136 ${MKISOFS_OPTIONS} $mkisofs_iso_level ${ISODIR}
137 else
138 # EFI only OR EFI+PCBIOS
139 mkisofs -A ${BOOTIMG_VOLUME_ID} -V ${BOOTIMG_VOLUME_ID} \
140 -o ${IMGDEPLOYDIR}/${IMAGE_NAME}.iso \
141 -b ${ISO_BOOTIMG} -c ${ISO_BOOTCAT} \
142 $mkisofs_compress_opts ${MKISOFS_OPTIONS} $mkisofs_iso_level \
143 -eltorito-alt-boot -eltorito-platform efi \
144 -b efi.img -no-emul-boot \
145 ${ISODIR}
146 isohybrid_args="-u"
147 fi
148
149 isohybrid $isohybrid_args ${IMGDEPLOYDIR}/${IMAGE_NAME}.iso
150}
151
152build_fat_img() {
153 FATSOURCEDIR=$1
154 FATIMG=$2
155
156 # Calculate the size required for the final image including the
157 # data and filesystem overhead.
158 # Sectors: 512 bytes
159 # Blocks: 1024 bytes
160
161 # Determine the sector count just for the data
162 SECTORS=$(expr $(du --apparent-size -ks ${FATSOURCEDIR} | cut -f 1) \* 2)
163
164 # Account for the filesystem overhead. This includes directory
165 # entries in the clusters as well as the FAT itself.
166 # Assumptions:
167 # FAT32 (12 or 16 may be selected by mkdosfs, but the extra
168 # padding will be minimal on those smaller images and not
169 # worth the logic here to caclulate the smaller FAT sizes)
170 # < 16 entries per directory
171 # 8.3 filenames only
172
173 # 32 bytes per dir entry
174 DIR_BYTES=$(expr $(find ${FATSOURCEDIR} | tail -n +2 | wc -l) \* 32)
175 # 32 bytes for every end-of-directory dir entry
176 DIR_BYTES=$(expr $DIR_BYTES + $(expr $(find ${FATSOURCEDIR} -type d | tail -n +2 | wc -l) \* 32))
177 # 4 bytes per FAT entry per sector of data
178 FAT_BYTES=$(expr $SECTORS \* 4)
179 # 4 bytes per FAT entry per end-of-cluster list
180 FAT_BYTES=$(expr $FAT_BYTES + $(expr $(find ${FATSOURCEDIR} -type d | tail -n +2 | wc -l) \* 4))
181
182 # Use a ceiling function to determine FS overhead in sectors
183 DIR_SECTORS=$(expr $(expr $DIR_BYTES + 511) / 512)
184 # There are two FATs on the image
185 FAT_SECTORS=$(expr $(expr $(expr $FAT_BYTES + 511) / 512) \* 2)
186 SECTORS=$(expr $SECTORS + $(expr $DIR_SECTORS + $FAT_SECTORS))
187
188 # Determine the final size in blocks accounting for some padding
189 BLOCKS=$(expr $(expr $SECTORS / 2) + ${BOOTIMG_EXTRA_SPACE})
190
191 # mkdosfs will sometimes use FAT16 when it is not appropriate,
192 # resulting in a boot failure from SYSLINUX. Use FAT32 for
193 # images larger than 512MB, otherwise let mkdosfs decide.
194 if [ $(expr $BLOCKS / 1024) -gt 512 ]; then
195 FATSIZE="-F 32"
196 fi
197
198 # mkdosfs will fail if ${FATIMG} exists. Since we are creating an
199 # new image, it is safe to delete any previous image.
200 if [ -e ${FATIMG} ]; then
201 rm ${FATIMG}
202 fi
203
204 if [ -z "${HDDIMG_ID}" ]; then
205 mkdosfs ${FATSIZE} -n ${BOOTIMG_VOLUME_ID} ${MKDOSFS_EXTRAOPTS} -C ${FATIMG} \
206 ${BLOCKS}
207 else
208 mkdosfs ${FATSIZE} -n ${BOOTIMG_VOLUME_ID} ${MKDOSFS_EXTRAOPTS} -C ${FATIMG} \
209 ${BLOCKS} -i ${HDDIMG_ID}
210 fi
211
212 # Copy FATSOURCEDIR recursively into the image file directly
213 mcopy -i ${FATIMG} -s ${FATSOURCEDIR}/* ::/
214}
215
216build_hddimg() {
217 # Create an HDD image
218 if [ "${@bb.utils.contains_any('IMAGE_FSTYPES', 'live hddimg', '1', '0', d)}" = "1" ] ; then
219 populate_live ${HDDDIR}
220
221 if [ "${PCBIOS}" = "1" ]; then
222 syslinux_hddimg_populate ${HDDDIR}
223 fi
224 if [ "${EFI}" = "1" ]; then
225 efi_hddimg_populate ${HDDDIR}
226 fi
227
228 # Check the size of ${HDDDIR}/rootfs.img, error out if it
229 # exceeds 4GB, it is the single file's max size of FAT fs.
230 if [ -f ${HDDDIR}/rootfs.img ]; then
231 rootfs_img_size=`stat -c '%s' ${HDDDIR}/rootfs.img`
232 max_size=`expr 4 \* 1024 \* 1024 \* 1024`
233 if [ $rootfs_img_size -ge $max_size ]; then
234 bberror "${HDDDIR}/rootfs.img rootfs size is greather than or equal to 4GB,"
235 bberror "and this doesn't work on a FAT filesystem. You can either:"
236 bberror "1) Reduce the size of rootfs.img, or,"
237 bbfatal "2) Use wic, vmdk,vhd, vhdx or vdi instead of hddimg\n"
238 fi
239 fi
240
241 build_fat_img ${HDDDIR} ${IMGDEPLOYDIR}/${IMAGE_NAME}.hddimg
242
243 if [ "${PCBIOS}" = "1" ]; then
244 syslinux_hddimg_install
245 fi
246
247 chmod 644 ${IMGDEPLOYDIR}/${IMAGE_NAME}.hddimg
248 fi
249}
250
251python do_bootimg() {
252 set_live_vm_vars(d, 'LIVE')
253 if d.getVar("PCBIOS") == "1":
254 bb.build.exec_func('build_syslinux_cfg', d)
255 if d.getVar("EFI") == "1":
256 bb.build.exec_func('build_efi_cfg', d)
257 bb.build.exec_func('build_hddimg', d)
258 bb.build.exec_func('build_iso', d)
259 bb.build.exec_func('create_symlinks', d)
260}
261do_bootimg[subimages] = "hddimg iso"
262do_bootimg[imgsuffix] = "."
263
264addtask bootimg before do_image_complete
diff --git a/meta/classes/image-mklibs.bbclass b/meta/classes/image-mklibs.bbclass
deleted file mode 100644
index 68e11d4365..0000000000
--- a/meta/classes/image-mklibs.bbclass
+++ /dev/null
@@ -1,56 +0,0 @@
1do_rootfs[depends] += "mklibs-native:do_populate_sysroot"
2
3IMAGE_PREPROCESS_COMMAND += "mklibs_optimize_image; "
4
5inherit linuxloader
6
7mklibs_optimize_image_doit() {
8 rm -rf ${WORKDIR}/mklibs
9 mkdir -p ${WORKDIR}/mklibs/dest
10 cd ${IMAGE_ROOTFS}
11 du -bs > ${WORKDIR}/mklibs/du.before.mklibs.txt
12
13 # Build a list of dynamically linked executable ELF files.
14 # Omit libc/libpthread as a special case because it has an interpreter
15 # but is primarily what we intend to strip down.
16 for i in `find . -type f -executable ! -name 'libc-*' ! -name 'libpthread-*'`; do
17 file $i | grep -q ELF || continue
18 ${HOST_PREFIX}readelf -l $i | grep -q INTERP || continue
19 echo $i
20 done > ${WORKDIR}/mklibs/executables.list
21
22 dynamic_loader=${@get_linuxloader(d)}
23
24 mklibs -v \
25 --ldlib ${dynamic_loader} \
26 --libdir ${baselib} \
27 --sysroot ${PKG_CONFIG_SYSROOT_DIR} \
28 --gcc-options "--sysroot=${PKG_CONFIG_SYSROOT_DIR}" \
29 --root ${IMAGE_ROOTFS} \
30 --target `echo ${TARGET_PREFIX} | sed 's/-$//' ` \
31 -d ${WORKDIR}/mklibs/dest \
32 `cat ${WORKDIR}/mklibs/executables.list`
33
34 cd ${WORKDIR}/mklibs/dest
35 for i in *
36 do
37 cp $i `find ${IMAGE_ROOTFS} -name $i`
38 done
39
40 cd ${IMAGE_ROOTFS}
41 du -bs > ${WORKDIR}/mklibs/du.after.mklibs.txt
42
43 echo rootfs size before mklibs optimization: `cat ${WORKDIR}/mklibs/du.before.mklibs.txt`
44 echo rootfs size after mklibs optimization: `cat ${WORKDIR}/mklibs/du.after.mklibs.txt`
45}
46
47mklibs_optimize_image() {
48 for img in ${MKLIBS_OPTIMIZED_IMAGES}
49 do
50 if [ "${img}" = "${PN}" ] || [ "${img}" = "all" ]
51 then
52 mklibs_optimize_image_doit
53 break
54 fi
55 done
56}
diff --git a/meta/classes/image-postinst-intercepts.bbclass b/meta/classes/image-postinst-intercepts.bbclass
deleted file mode 100644
index ed30bbd98d..0000000000
--- a/meta/classes/image-postinst-intercepts.bbclass
+++ /dev/null
@@ -1,23 +0,0 @@
1# Gather existing and candidate postinst intercepts from BBPATH
2POSTINST_INTERCEPTS_DIR ?= "${COREBASE}/scripts/postinst-intercepts"
3POSTINST_INTERCEPTS_PATHS ?= "${@':'.join('%s/postinst-intercepts' % p for p in '${BBPATH}'.split(':'))}:${POSTINST_INTERCEPTS_DIR}"
4
5python find_intercepts() {
6 intercepts = {}
7 search_paths = []
8 paths = d.getVar('POSTINST_INTERCEPTS_PATHS').split(':')
9 overrides = (':' + d.getVar('FILESOVERRIDES')).split(':') + ['']
10 search_paths = [os.path.join(p, op) for p in paths for op in overrides]
11 searched = oe.path.which_wild('*', ':'.join(search_paths), candidates=True)
12 files, chksums = [], []
13 for pathname, candidates in searched:
14 if os.path.isfile(pathname):
15 files.append(pathname)
16 chksums.append('%s:True' % pathname)
17 chksums.extend('%s:False' % c for c in candidates[:-1])
18
19 d.setVar('POSTINST_INTERCEPT_CHECKSUMS', ' '.join(chksums))
20 d.setVar('POSTINST_INTERCEPTS', ' '.join(files))
21}
22find_intercepts[eventmask] += "bb.event.RecipePreFinalise"
23addhandler find_intercepts
diff --git a/meta/classes/image-prelink.bbclass b/meta/classes/image-prelink.bbclass
deleted file mode 100644
index ebf6e6d7ee..0000000000
--- a/meta/classes/image-prelink.bbclass
+++ /dev/null
@@ -1,81 +0,0 @@
1do_rootfs[depends] += "prelink-native:do_populate_sysroot"
2
3IMAGE_PREPROCESS_COMMAND_append_libc-glibc = " prelink_setup; prelink_image; "
4
5python prelink_setup () {
6 oe.utils.write_ld_so_conf(d)
7}
8
9inherit linuxloader
10
11prelink_image () {
12# export PSEUDO_DEBUG=4
13# /bin/env | /bin/grep PSEUDO
14# echo "LD_LIBRARY_PATH=$LD_LIBRARY_PATH"
15# echo "LD_PRELOAD=$LD_PRELOAD"
16
17 pre_prelink_size=`du -ks ${IMAGE_ROOTFS} | awk '{size = $1 ; print size }'`
18 echo "Size before prelinking $pre_prelink_size."
19
20 # The filesystem may not contain sysconfdir so establish what is present
21 # to enable cleanup after temporary creation of sysconfdir if needed
22 presentdir="${IMAGE_ROOTFS}${sysconfdir}"
23 while [ "${IMAGE_ROOTFS}" != "${presentdir}" ] ; do
24 [ ! -d "${presentdir}" ] || break
25 presentdir=`dirname "${presentdir}"`
26 done
27
28 mkdir -p "${IMAGE_ROOTFS}${sysconfdir}"
29
30 # We need a prelink conf on the filesystem, add one if it's missing
31 if [ ! -e ${IMAGE_ROOTFS}${sysconfdir}/prelink.conf ]; then
32 cp ${STAGING_ETCDIR_NATIVE}/prelink.conf \
33 ${IMAGE_ROOTFS}${sysconfdir}/prelink.conf
34 dummy_prelink_conf=true;
35 else
36 dummy_prelink_conf=false;
37 fi
38
39 # We need a ld.so.conf with pathnames in,prelink conf on the filesystem, add one if it's missing
40 ldsoconf=${IMAGE_ROOTFS}${sysconfdir}/ld.so.conf
41 if [ -e $ldsoconf ]; then
42 cp $ldsoconf $ldsoconf.prelink
43 fi
44 cat ${STAGING_DIR_TARGET}${sysconfdir}/ld.so.conf >> $ldsoconf
45
46 dynamic_loader=${@get_linuxloader(d)}
47
48 # prelink!
49 if [ "${BUILD_REPRODUCIBLE_BINARIES}" = "1" ]; then
50 bbnote " prelink: BUILD_REPRODUCIBLE_BINARIES..."
51 if [ "$REPRODUCIBLE_TIMESTAMP_ROOTFS" = "" ]; then
52 export PRELINK_TIMESTAMP=`git log -1 --pretty=%ct `
53 else
54 export PRELINK_TIMESTAMP=$REPRODUCIBLE_TIMESTAMP_ROOTFS
55 fi
56 ${STAGING_SBINDIR_NATIVE}/prelink --root ${IMAGE_ROOTFS} -am -N -c ${sysconfdir}/prelink.conf --dynamic-linker $dynamic_loader
57 else
58 ${STAGING_SBINDIR_NATIVE}/prelink --root ${IMAGE_ROOTFS} -amR -N -c ${sysconfdir}/prelink.conf --dynamic-linker $dynamic_loader
59 fi
60
61 # Remove the prelink.conf if we had to add it.
62 if [ "$dummy_prelink_conf" = "true" ]; then
63 rm -f ${IMAGE_ROOTFS}${sysconfdir}/prelink.conf
64 fi
65
66 if [ -e $ldsoconf.prelink ]; then
67 mv $ldsoconf.prelink $ldsoconf
68 else
69 rm $ldsoconf
70 fi
71
72 # Remove any directories temporarily created for sysconfdir
73 cleanupdir="${IMAGE_ROOTFS}${sysconfdir}"
74 while [ "${presentdir}" != "${cleanupdir}" ] ; do
75 rmdir "${cleanupdir}"
76 cleanupdir=`dirname ${cleanupdir}`
77 done
78
79 pre_prelink_size=`du -ks ${IMAGE_ROOTFS} | awk '{size = $1 ; print size }'`
80 echo "Size after prelinking $pre_prelink_size."
81}
diff --git a/meta/classes/image.bbclass b/meta/classes/image.bbclass
deleted file mode 100644
index 41fc329178..0000000000
--- a/meta/classes/image.bbclass
+++ /dev/null
@@ -1,674 +0,0 @@
1
2IMAGE_CLASSES ??= ""
3
4# rootfs bootstrap install
5# warning - image-container resets this
6ROOTFS_BOOTSTRAP_INSTALL = "run-postinsts"
7
8# Handle inherits of any of the image classes we need
9IMGCLASSES = "rootfs_${IMAGE_PKGTYPE} image_types ${IMAGE_CLASSES}"
10# Only Linux SDKs support populate_sdk_ext, fall back to populate_sdk_base
11# in the non-Linux SDK_OS case, such as mingw32
12IMGCLASSES += "${@['populate_sdk_base', 'populate_sdk_ext']['linux' in d.getVar("SDK_OS")]}"
13IMGCLASSES += "${@bb.utils.contains_any('IMAGE_FSTYPES', 'live iso hddimg', 'image-live', '', d)}"
14IMGCLASSES += "${@bb.utils.contains('IMAGE_FSTYPES', 'container', 'image-container', '', d)}"
15IMGCLASSES += "image_types_wic"
16IMGCLASSES += "rootfs-postcommands"
17IMGCLASSES += "image-postinst-intercepts"
18inherit ${IMGCLASSES}
19
20TOOLCHAIN_TARGET_TASK += "${PACKAGE_INSTALL}"
21TOOLCHAIN_TARGET_TASK_ATTEMPTONLY += "${PACKAGE_INSTALL_ATTEMPTONLY}"
22POPULATE_SDK_POST_TARGET_COMMAND += "rootfs_sysroot_relativelinks; "
23
24LICENSE ?= "MIT"
25PACKAGES = ""
26DEPENDS += "${@' '.join(["%s-qemuwrapper-cross" % m for m in d.getVar("MULTILIB_VARIANTS").split()])} qemuwrapper-cross depmodwrapper-cross cross-localedef-native"
27RDEPENDS += "${PACKAGE_INSTALL} ${LINGUAS_INSTALL} ${IMAGE_INSTALL_DEBUGFS}"
28RRECOMMENDS += "${PACKAGE_INSTALL_ATTEMPTONLY}"
29PATH_prepend = "${@":".join(all_multilib_tune_values(d, 'STAGING_BINDIR_CROSS').split())}:"
30
31INHIBIT_DEFAULT_DEPS = "1"
32
33# IMAGE_FEATURES may contain any available package group
34IMAGE_FEATURES ?= ""
35IMAGE_FEATURES[type] = "list"
36IMAGE_FEATURES[validitems] += "debug-tweaks read-only-rootfs read-only-rootfs-delayed-postinsts stateless-rootfs empty-root-password allow-empty-password allow-root-login post-install-logging"
37
38# Generate companion debugfs?
39IMAGE_GEN_DEBUGFS ?= "0"
40
41# These pacackages will be installed as additional into debug rootfs
42IMAGE_INSTALL_DEBUGFS ?= ""
43
44# These packages will be removed from a read-only rootfs after all other
45# packages have been installed
46ROOTFS_RO_UNNEEDED ??= "update-rc.d base-passwd shadow ${VIRTUAL-RUNTIME_update-alternatives} ${ROOTFS_BOOTSTRAP_INSTALL}"
47
48# packages to install from features
49FEATURE_INSTALL = "${@' '.join(oe.packagegroup.required_packages(oe.data.typed_value('IMAGE_FEATURES', d), d))}"
50FEATURE_INSTALL[vardepvalue] = "${FEATURE_INSTALL}"
51FEATURE_INSTALL_OPTIONAL = "${@' '.join(oe.packagegroup.optional_packages(oe.data.typed_value('IMAGE_FEATURES', d), d))}"
52FEATURE_INSTALL_OPTIONAL[vardepvalue] = "${FEATURE_INSTALL_OPTIONAL}"
53
54# Define some very basic feature package groups
55FEATURE_PACKAGES_package-management = "${ROOTFS_PKGMANAGE}"
56SPLASH ?= "psplash"
57FEATURE_PACKAGES_splash = "${SPLASH}"
58
59IMAGE_INSTALL_COMPLEMENTARY = '${@complementary_globs("IMAGE_FEATURES", d)}'
60
61def check_image_features(d):
62 valid_features = (d.getVarFlag('IMAGE_FEATURES', 'validitems') or "").split()
63 valid_features += d.getVarFlags('COMPLEMENTARY_GLOB').keys()
64 for var in d:
65 if var.startswith("FEATURE_PACKAGES_"):
66 valid_features.append(var[17:])
67 valid_features.sort()
68
69 features = set(oe.data.typed_value('IMAGE_FEATURES', d))
70 for feature in features:
71 if feature not in valid_features:
72 if bb.utils.contains('EXTRA_IMAGE_FEATURES', feature, True, False, d):
73 raise bb.parse.SkipRecipe("'%s' in IMAGE_FEATURES (added via EXTRA_IMAGE_FEATURES) is not a valid image feature. Valid features: %s" % (feature, ' '.join(valid_features)))
74 else:
75 raise bb.parse.SkipRecipe("'%s' in IMAGE_FEATURES is not a valid image feature. Valid features: %s" % (feature, ' '.join(valid_features)))
76
77IMAGE_INSTALL ?= ""
78IMAGE_INSTALL[type] = "list"
79export PACKAGE_INSTALL ?= "${IMAGE_INSTALL} ${ROOTFS_BOOTSTRAP_INSTALL} ${FEATURE_INSTALL}"
80PACKAGE_INSTALL_ATTEMPTONLY ?= "${FEATURE_INSTALL_OPTIONAL}"
81
82IMGDEPLOYDIR = "${WORKDIR}/deploy-${PN}-image-complete"
83
84# Images are generally built explicitly, do not need to be part of world.
85EXCLUDE_FROM_WORLD = "1"
86
87USE_DEVFS ?= "1"
88USE_DEPMOD ?= "1"
89
90PID = "${@os.getpid()}"
91
92PACKAGE_ARCH = "${MACHINE_ARCH}"
93
94LDCONFIGDEPEND ?= "ldconfig-native:do_populate_sysroot"
95LDCONFIGDEPEND_libc-musl = ""
96
97# This is needed to have depmod data in PKGDATA_DIR,
98# but if you're building small initramfs image
99# e.g. to include it in your kernel, you probably
100# don't want this dependency, which is causing dependency loop
101KERNELDEPMODDEPEND ?= "virtual/kernel:do_packagedata"
102
103do_rootfs[depends] += " \
104 makedevs-native:do_populate_sysroot virtual/fakeroot-native:do_populate_sysroot ${LDCONFIGDEPEND} \
105 virtual/update-alternatives-native:do_populate_sysroot update-rc.d-native:do_populate_sysroot \
106 ${KERNELDEPMODDEPEND} \
107"
108do_rootfs[recrdeptask] += "do_packagedata"
109
110def rootfs_command_variables(d):
111 return ['ROOTFS_POSTPROCESS_COMMAND','ROOTFS_PREPROCESS_COMMAND','ROOTFS_POSTINSTALL_COMMAND','ROOTFS_POSTUNINSTALL_COMMAND','OPKG_PREPROCESS_COMMANDS','OPKG_POSTPROCESS_COMMANDS','IMAGE_POSTPROCESS_COMMAND',
112 'IMAGE_PREPROCESS_COMMAND','RPM_PREPROCESS_COMMANDS','RPM_POSTPROCESS_COMMANDS','DEB_PREPROCESS_COMMANDS','DEB_POSTPROCESS_COMMANDS']
113
114python () {
115 variables = rootfs_command_variables(d) + sdk_command_variables(d)
116 for var in variables:
117 if d.getVar(var, False):
118 d.setVarFlag(var, 'func', '1')
119}
120
121def rootfs_variables(d):
122 from oe.rootfs import variable_depends
123 variables = ['IMAGE_DEVICE_TABLE','IMAGE_DEVICE_TABLES','BUILD_IMAGES_FROM_FEEDS','IMAGE_TYPES_MASKED','IMAGE_ROOTFS_ALIGNMENT','IMAGE_OVERHEAD_FACTOR','IMAGE_ROOTFS_SIZE','IMAGE_ROOTFS_EXTRA_SPACE',
124 'IMAGE_ROOTFS_MAXSIZE','IMAGE_NAME','IMAGE_LINK_NAME','IMAGE_MANIFEST','DEPLOY_DIR_IMAGE','IMAGE_FSTYPES','IMAGE_INSTALL_COMPLEMENTARY','IMAGE_LINGUAS', 'IMAGE_LINGUAS_COMPLEMENTARY', 'IMAGE_LOCALES_ARCHIVE',
125 'MULTILIBRE_ALLOW_REP','MULTILIB_TEMP_ROOTFS','MULTILIB_VARIANTS','MULTILIBS','ALL_MULTILIB_PACKAGE_ARCHS','MULTILIB_GLOBAL_VARIANTS','BAD_RECOMMENDATIONS','NO_RECOMMENDATIONS',
126 'PACKAGE_ARCHS','PACKAGE_CLASSES','TARGET_VENDOR','TARGET_ARCH','TARGET_OS','OVERRIDES','BBEXTENDVARIANT','FEED_DEPLOYDIR_BASE_URI','INTERCEPT_DIR','USE_DEVFS',
127 'CONVERSIONTYPES', 'IMAGE_GEN_DEBUGFS', 'ROOTFS_RO_UNNEEDED', 'IMGDEPLOYDIR', 'PACKAGE_EXCLUDE_COMPLEMENTARY', 'REPRODUCIBLE_TIMESTAMP_ROOTFS', 'IMAGE_INSTALL_DEBUGFS']
128 variables.extend(rootfs_command_variables(d))
129 variables.extend(variable_depends(d))
130 return " ".join(variables)
131
132do_rootfs[vardeps] += "${@rootfs_variables(d)}"
133
134do_build[depends] += "virtual/kernel:do_deploy"
135
136
137python () {
138 def extraimage_getdepends(task):
139 deps = ""
140 for dep in (d.getVar('EXTRA_IMAGEDEPENDS') or "").split():
141 deps += " %s:%s" % (dep, task)
142 return deps
143
144 d.appendVarFlag('do_image_complete', 'depends', extraimage_getdepends('do_populate_sysroot'))
145
146 deps = " " + imagetypes_getdepends(d)
147 d.appendVarFlag('do_rootfs', 'depends', deps)
148
149 #process IMAGE_FEATURES, we must do this before runtime_mapping_rename
150 #Check for replaces image features
151 features = set(oe.data.typed_value('IMAGE_FEATURES', d))
152 remain_features = features.copy()
153 for feature in features:
154 replaces = set((d.getVar("IMAGE_FEATURES_REPLACES_%s" % feature) or "").split())
155 remain_features -= replaces
156
157 #Check for conflict image features
158 for feature in remain_features:
159 conflicts = set((d.getVar("IMAGE_FEATURES_CONFLICTS_%s" % feature) or "").split())
160 temp = conflicts & remain_features
161 if temp:
162 bb.fatal("%s contains conflicting IMAGE_FEATURES %s %s" % (d.getVar('PN'), feature, ' '.join(list(temp))))
163
164 d.setVar('IMAGE_FEATURES', ' '.join(sorted(list(remain_features))))
165
166 check_image_features(d)
167}
168
169IMAGE_POSTPROCESS_COMMAND ?= ""
170
171# some default locales
172IMAGE_LINGUAS ?= "de-de fr-fr en-gb"
173
174LINGUAS_INSTALL ?= "${@" ".join(map(lambda s: "locale-base-%s" % s, d.getVar('IMAGE_LINGUAS').split()))}"
175
176# per default create a locale archive
177IMAGE_LOCALES_ARCHIVE ?= '1'
178
179# Prefer image, but use the fallback files for lookups if the image ones
180# aren't yet available.
181PSEUDO_PASSWD = "${IMAGE_ROOTFS}:${STAGING_DIR_NATIVE}"
182
183PSEUDO_IGNORE_PATHS .= ",${WORKDIR}/intercept_scripts,${WORKDIR}/oe-rootfs-repo,${WORKDIR}/sstate-build-image_complete"
184
185PACKAGE_EXCLUDE ??= ""
186PACKAGE_EXCLUDE[type] = "list"
187
188fakeroot python do_rootfs () {
189 from oe.rootfs import create_rootfs
190 from oe.manifest import create_manifest
191 import logging
192
193 logger = d.getVar('BB_TASK_LOGGER', False)
194 if logger:
195 logcatcher = bb.utils.LogCatcher()
196 logger.addHandler(logcatcher)
197 else:
198 logcatcher = None
199
200 # NOTE: if you add, remove or significantly refactor the stages of this
201 # process then you should recalculate the weightings here. This is quite
202 # easy to do - just change the MultiStageProgressReporter line temporarily
203 # to pass debug=True as the last parameter and you'll get a printout of
204 # the weightings as well as a map to the lines where next_stage() was
205 # called. Of course this isn't critical, but it helps to keep the progress
206 # reporting accurate.
207 stage_weights = [1, 203, 354, 186, 65, 4228, 1, 353, 49, 330, 382, 23, 1]
208 progress_reporter = bb.progress.MultiStageProgressReporter(d, stage_weights)
209 progress_reporter.next_stage()
210
211 # Handle package exclusions
212 excl_pkgs = d.getVar("PACKAGE_EXCLUDE").split()
213 inst_pkgs = d.getVar("PACKAGE_INSTALL").split()
214 inst_attempt_pkgs = d.getVar("PACKAGE_INSTALL_ATTEMPTONLY").split()
215
216 d.setVar('PACKAGE_INSTALL_ORIG', ' '.join(inst_pkgs))
217 d.setVar('PACKAGE_INSTALL_ATTEMPTONLY', ' '.join(inst_attempt_pkgs))
218
219 for pkg in excl_pkgs:
220 if pkg in inst_pkgs:
221 bb.warn("Package %s, set to be excluded, is in %s PACKAGE_INSTALL (%s). It will be removed from the list." % (pkg, d.getVar('PN'), inst_pkgs))
222 inst_pkgs.remove(pkg)
223
224 if pkg in inst_attempt_pkgs:
225 bb.warn("Package %s, set to be excluded, is in %s PACKAGE_INSTALL_ATTEMPTONLY (%s). It will be removed from the list." % (pkg, d.getVar('PN'), inst_pkgs))
226 inst_attempt_pkgs.remove(pkg)
227
228 d.setVar("PACKAGE_INSTALL", ' '.join(inst_pkgs))
229 d.setVar("PACKAGE_INSTALL_ATTEMPTONLY", ' '.join(inst_attempt_pkgs))
230
231 # Ensure we handle package name remapping
232 # We have to delay the runtime_mapping_rename until just before rootfs runs
233 # otherwise, the multilib renaming could step in and squash any fixups that
234 # may have occurred.
235 pn = d.getVar('PN')
236 runtime_mapping_rename("PACKAGE_INSTALL", pn, d)
237 runtime_mapping_rename("PACKAGE_INSTALL_ATTEMPTONLY", pn, d)
238 runtime_mapping_rename("BAD_RECOMMENDATIONS", pn, d)
239
240 # Generate the initial manifest
241 create_manifest(d)
242
243 progress_reporter.next_stage()
244
245 # generate rootfs
246 d.setVarFlag('REPRODUCIBLE_TIMESTAMP_ROOTFS', 'export', '1')
247 create_rootfs(d, progress_reporter=progress_reporter, logcatcher=logcatcher)
248
249 progress_reporter.finish()
250}
251do_rootfs[dirs] = "${TOPDIR}"
252do_rootfs[cleandirs] += "${S} ${IMGDEPLOYDIR}"
253do_rootfs[file-checksums] += "${POSTINST_INTERCEPT_CHECKSUMS}"
254addtask rootfs after do_prepare_recipe_sysroot
255
256fakeroot python do_image () {
257 from oe.utils import execute_pre_post_process
258
259 d.setVarFlag('REPRODUCIBLE_TIMESTAMP_ROOTFS', 'export', '1')
260 pre_process_cmds = d.getVar("IMAGE_PREPROCESS_COMMAND")
261
262 execute_pre_post_process(d, pre_process_cmds)
263}
264do_image[dirs] = "${TOPDIR}"
265addtask do_image after do_rootfs
266
267fakeroot python do_image_complete () {
268 from oe.utils import execute_pre_post_process
269
270 post_process_cmds = d.getVar("IMAGE_POSTPROCESS_COMMAND")
271
272 execute_pre_post_process(d, post_process_cmds)
273}
274do_image_complete[dirs] = "${TOPDIR}"
275SSTATETASKS += "do_image_complete"
276SSTATE_SKIP_CREATION_task-image-complete = '1'
277do_image_complete[sstate-inputdirs] = "${IMGDEPLOYDIR}"
278do_image_complete[sstate-outputdirs] = "${DEPLOY_DIR_IMAGE}"
279do_image_complete[stamp-extra-info] = "${MACHINE_ARCH}"
280addtask do_image_complete after do_image before do_build
281python do_image_complete_setscene () {
282 sstate_setscene(d)
283}
284addtask do_image_complete_setscene
285
286# Add image-level QA/sanity checks to IMAGE_QA_COMMANDS
287#
288# IMAGE_QA_COMMANDS += " \
289# image_check_everything_ok \
290# "
291# This task runs all functions in IMAGE_QA_COMMANDS after the rootfs
292# construction has completed in order to validate the resulting image.
293#
294# The functions should use ${IMAGE_ROOTFS} to find the unpacked rootfs
295# directory, which if QA passes will be the basis for the images.
296fakeroot python do_image_qa () {
297 from oe.utils import ImageQAFailed
298
299 qa_cmds = (d.getVar('IMAGE_QA_COMMANDS') or '').split()
300 qamsg = ""
301
302 for cmd in qa_cmds:
303 try:
304 bb.build.exec_func(cmd, d)
305 except oe.utils.ImageQAFailed as e:
306 qamsg = qamsg + '\tImage QA function %s failed: %s\n' % (e.name, e.description)
307 except Exception as e:
308 qamsg = qamsg + '\tImage QA function %s failed\n' % cmd
309
310 if qamsg:
311 imgname = d.getVar('IMAGE_NAME')
312 bb.fatal("QA errors found whilst validating image: %s\n%s" % (imgname, qamsg))
313}
314addtask do_image_qa after do_rootfs before do_image
315
316SSTATETASKS += "do_image_qa"
317SSTATE_SKIP_CREATION_task-image-qa = '1'
318do_image_qa[sstate-inputdirs] = ""
319do_image_qa[sstate-outputdirs] = ""
320python do_image_qa_setscene () {
321 sstate_setscene(d)
322}
323addtask do_image_qa_setscene
324
325def setup_debugfs_variables(d):
326 d.appendVar('IMAGE_ROOTFS', '-dbg')
327 if d.getVar('IMAGE_LINK_NAME'):
328 d.appendVar('IMAGE_LINK_NAME', '-dbg')
329 d.appendVar('IMAGE_NAME','-dbg')
330 d.setVar('IMAGE_BUILDING_DEBUGFS', 'true')
331 debugfs_image_fstypes = d.getVar('IMAGE_FSTYPES_DEBUGFS')
332 if debugfs_image_fstypes:
333 d.setVar('IMAGE_FSTYPES', debugfs_image_fstypes)
334
335python setup_debugfs () {
336 setup_debugfs_variables(d)
337}
338
339python () {
340 vardeps = set()
341 # We allow CONVERSIONTYPES to have duplicates. That avoids breaking
342 # derived distros when OE-core or some other layer independently adds
343 # the same type. There is still only one command for each type, but
344 # presumably the commands will do the same when the type is the same,
345 # even when added in different places.
346 #
347 # Without de-duplication, gen_conversion_cmds() below
348 # would create the same compression command multiple times.
349 ctypes = set(d.getVar('CONVERSIONTYPES').split())
350 old_overrides = d.getVar('OVERRIDES', False)
351
352 def _image_base_type(type):
353 basetype = type
354 for ctype in ctypes:
355 if type.endswith("." + ctype):
356 basetype = type[:-len("." + ctype)]
357 break
358
359 if basetype != type:
360 # New base type itself might be generated by a conversion command.
361 basetype = _image_base_type(basetype)
362
363 return basetype
364
365 basetypes = {}
366 alltypes = d.getVar('IMAGE_FSTYPES').split()
367 typedeps = {}
368
369 if d.getVar('IMAGE_GEN_DEBUGFS') == "1":
370 debugfs_fstypes = d.getVar('IMAGE_FSTYPES_DEBUGFS').split()
371 for t in debugfs_fstypes:
372 alltypes.append("debugfs_" + t)
373
374 def _add_type(t):
375 baset = _image_base_type(t)
376 input_t = t
377 if baset not in basetypes:
378 basetypes[baset]= []
379 if t not in basetypes[baset]:
380 basetypes[baset].append(t)
381 debug = ""
382 if t.startswith("debugfs_"):
383 t = t[8:]
384 debug = "debugfs_"
385 deps = (d.getVar('IMAGE_TYPEDEP_' + t) or "").split()
386 vardeps.add('IMAGE_TYPEDEP_' + t)
387 if baset not in typedeps:
388 typedeps[baset] = set()
389 deps = [debug + dep for dep in deps]
390 for dep in deps:
391 if dep not in alltypes:
392 alltypes.append(dep)
393 _add_type(dep)
394 basedep = _image_base_type(dep)
395 typedeps[baset].add(basedep)
396
397 if baset != input_t:
398 _add_type(baset)
399
400 for t in alltypes[:]:
401 _add_type(t)
402
403 d.appendVarFlag('do_image', 'vardeps', ' '.join(vardeps))
404
405 maskedtypes = (d.getVar('IMAGE_TYPES_MASKED') or "").split()
406 maskedtypes = [dbg + t for t in maskedtypes for dbg in ("", "debugfs_")]
407
408 for t in basetypes:
409 vardeps = set()
410 cmds = []
411 subimages = []
412 realt = t
413
414 if t in maskedtypes:
415 continue
416
417 localdata = bb.data.createCopy(d)
418 debug = ""
419 if t.startswith("debugfs_"):
420 setup_debugfs_variables(localdata)
421 debug = "setup_debugfs "
422 realt = t[8:]
423 localdata.setVar('OVERRIDES', '%s:%s' % (realt, old_overrides))
424 localdata.setVar('type', realt)
425 # Delete DATETIME so we don't expand any references to it now
426 # This means the task's hash can be stable rather than having hardcoded
427 # date/time values. It will get expanded at execution time.
428 # Similarly TMPDIR since otherwise we see QA stamp comparision problems
429 # Expand PV else it can trigger get_srcrev which can fail due to these variables being unset
430 localdata.setVar('PV', d.getVar('PV'))
431 localdata.delVar('DATETIME')
432 localdata.delVar('DATE')
433 localdata.delVar('TMPDIR')
434 vardepsexclude = (d.getVarFlag('IMAGE_CMD_' + realt, 'vardepsexclude', True) or '').split()
435 for dep in vardepsexclude:
436 localdata.delVar(dep)
437
438 image_cmd = localdata.getVar("IMAGE_CMD")
439 vardeps.add('IMAGE_CMD_' + realt)
440 if image_cmd:
441 cmds.append("\t" + image_cmd)
442 else:
443 bb.fatal("No IMAGE_CMD defined for IMAGE_FSTYPES entry '%s' - possibly invalid type name or missing support class" % t)
444 cmds.append(localdata.expand("\tcd ${IMGDEPLOYDIR}"))
445
446 # Since a copy of IMAGE_CMD_xxx will be inlined within do_image_xxx,
447 # prevent a redundant copy of IMAGE_CMD_xxx being emitted as a function.
448 d.delVarFlag('IMAGE_CMD_' + realt, 'func')
449
450 rm_tmp_images = set()
451 def gen_conversion_cmds(bt):
452 for ctype in sorted(ctypes):
453 if bt.endswith("." + ctype):
454 type = bt[0:-len(ctype) - 1]
455 if type.startswith("debugfs_"):
456 type = type[8:]
457 # Create input image first.
458 gen_conversion_cmds(type)
459 localdata.setVar('type', type)
460 cmd = "\t" + (localdata.getVar("CONVERSION_CMD_" + ctype) or localdata.getVar("COMPRESS_CMD_" + ctype))
461 if cmd not in cmds:
462 cmds.append(cmd)
463 vardeps.add('CONVERSION_CMD_' + ctype)
464 vardeps.add('COMPRESS_CMD_' + ctype)
465 subimage = type + "." + ctype
466 if subimage not in subimages:
467 subimages.append(subimage)
468 if type not in alltypes:
469 rm_tmp_images.add(localdata.expand("${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"))
470
471 for bt in basetypes[t]:
472 gen_conversion_cmds(bt)
473
474 localdata.setVar('type', realt)
475 if t not in alltypes:
476 rm_tmp_images.add(localdata.expand("${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"))
477 else:
478 subimages.append(realt)
479
480 # Clean up after applying all conversion commands. Some of them might
481 # use the same input, therefore we cannot delete sooner without applying
482 # some complex dependency analysis.
483 for image in sorted(rm_tmp_images):
484 cmds.append("\trm " + image)
485
486 after = 'do_image'
487 for dep in typedeps[t]:
488 after += ' do_image_%s' % dep.replace("-", "_").replace(".", "_")
489
490 task = "do_image_%s" % t.replace("-", "_").replace(".", "_")
491
492 d.setVar(task, '\n'.join(cmds))
493 d.setVarFlag(task, 'func', '1')
494 d.setVarFlag(task, 'fakeroot', '1')
495
496 d.appendVarFlag(task, 'prefuncs', ' ' + debug + ' set_image_size')
497 d.prependVarFlag(task, 'postfuncs', 'create_symlinks ')
498 d.appendVarFlag(task, 'subimages', ' ' + ' '.join(subimages))
499 d.appendVarFlag(task, 'vardeps', ' ' + ' '.join(vardeps))
500 d.appendVarFlag(task, 'vardepsexclude', ' DATETIME DATE ' + ' '.join(vardepsexclude))
501
502 bb.debug(2, "Adding task %s before %s, after %s" % (task, 'do_image_complete', after))
503 bb.build.addtask(task, 'do_image_complete', after, d)
504}
505
506#
507# Compute the rootfs size
508#
509def get_rootfs_size(d):
510 import subprocess
511
512 rootfs_alignment = int(d.getVar('IMAGE_ROOTFS_ALIGNMENT'))
513 overhead_factor = float(d.getVar('IMAGE_OVERHEAD_FACTOR'))
514 rootfs_req_size = int(d.getVar('IMAGE_ROOTFS_SIZE'))
515 rootfs_extra_space = eval(d.getVar('IMAGE_ROOTFS_EXTRA_SPACE'))
516 rootfs_maxsize = d.getVar('IMAGE_ROOTFS_MAXSIZE')
517 image_fstypes = d.getVar('IMAGE_FSTYPES') or ''
518 initramfs_fstypes = d.getVar('INITRAMFS_FSTYPES') or ''
519 initramfs_maxsize = d.getVar('INITRAMFS_MAXSIZE')
520
521 output = subprocess.check_output(['du', '-ks',
522 d.getVar('IMAGE_ROOTFS')])
523 size_kb = int(output.split()[0])
524
525 base_size = size_kb * overhead_factor
526 bb.debug(1, '%f = %d * %f' % (base_size, size_kb, overhead_factor))
527 base_size2 = max(base_size, rootfs_req_size) + rootfs_extra_space
528 bb.debug(1, '%f = max(%f, %d)[%f] + %d' % (base_size2, base_size, rootfs_req_size, max(base_size, rootfs_req_size), rootfs_extra_space))
529
530 base_size = base_size2
531 if base_size != int(base_size):
532 base_size = int(base_size + 1)
533 else:
534 base_size = int(base_size)
535 bb.debug(1, '%f = int(%f)' % (base_size, base_size2))
536
537 base_size_saved = base_size
538 base_size += rootfs_alignment - 1
539 base_size -= base_size % rootfs_alignment
540 bb.debug(1, '%d = aligned(%d)' % (base_size, base_size_saved))
541
542 # Do not check image size of the debugfs image. This is not supposed
543 # to be deployed, etc. so it doesn't make sense to limit the size
544 # of the debug.
545 if (d.getVar('IMAGE_BUILDING_DEBUGFS') or "") == "true":
546 bb.debug(1, 'returning debugfs size %d' % (base_size))
547 return base_size
548
549 # Check the rootfs size against IMAGE_ROOTFS_MAXSIZE (if set)
550 if rootfs_maxsize:
551 rootfs_maxsize_int = int(rootfs_maxsize)
552 if base_size > rootfs_maxsize_int:
553 bb.fatal("The rootfs size %d(K) exceeds IMAGE_ROOTFS_MAXSIZE: %d(K)" % \
554 (base_size, rootfs_maxsize_int))
555
556 # Check the initramfs size against INITRAMFS_MAXSIZE (if set)
557 if image_fstypes == initramfs_fstypes != '' and initramfs_maxsize:
558 initramfs_maxsize_int = int(initramfs_maxsize)
559 if base_size > initramfs_maxsize_int:
560 bb.error("The initramfs size %d(K) exceeds INITRAMFS_MAXSIZE: %d(K)" % \
561 (base_size, initramfs_maxsize_int))
562 bb.error("You can set INITRAMFS_MAXSIZE a larger value. Usually, it should")
563 bb.fatal("be less than 1/2 of ram size, or you may fail to boot it.\n")
564
565 bb.debug(1, 'returning %d' % (base_size))
566 return base_size
567
568python set_image_size () {
569 rootfs_size = get_rootfs_size(d)
570 d.setVar('ROOTFS_SIZE', str(rootfs_size))
571 d.setVarFlag('ROOTFS_SIZE', 'export', '1')
572}
573
574#
575# Create symlinks to the newly created image
576#
577python create_symlinks() {
578
579 deploy_dir = d.getVar('IMGDEPLOYDIR')
580 img_name = d.getVar('IMAGE_NAME')
581 link_name = d.getVar('IMAGE_LINK_NAME')
582 manifest_name = d.getVar('IMAGE_MANIFEST')
583 taskname = d.getVar("BB_CURRENTTASK")
584 subimages = (d.getVarFlag("do_" + taskname, 'subimages', False) or "").split()
585 imgsuffix = d.getVarFlag("do_" + taskname, 'imgsuffix') or d.expand("${IMAGE_NAME_SUFFIX}.")
586
587 if not link_name:
588 return
589 for type in subimages:
590 dst = os.path.join(deploy_dir, link_name + "." + type)
591 src = img_name + imgsuffix + type
592 if os.path.exists(os.path.join(deploy_dir, src)):
593 bb.note("Creating symlink: %s -> %s" % (dst, src))
594 if os.path.islink(dst):
595 os.remove(dst)
596 os.symlink(src, dst)
597 else:
598 bb.note("Skipping symlink, source does not exist: %s -> %s" % (dst, src))
599}
600
601MULTILIBRE_ALLOW_REP =. "${base_bindir}|${base_sbindir}|${bindir}|${sbindir}|${libexecdir}|${sysconfdir}|${nonarch_base_libdir}/udev|/lib/modules/[^/]*/modules.*|"
602MULTILIB_CHECK_FILE = "${WORKDIR}/multilib_check.py"
603MULTILIB_TEMP_ROOTFS = "${WORKDIR}/multilib"
604
605do_fetch[noexec] = "1"
606do_unpack[noexec] = "1"
607do_patch[noexec] = "1"
608do_configure[noexec] = "1"
609do_compile[noexec] = "1"
610do_install[noexec] = "1"
611deltask do_populate_lic
612deltask do_populate_sysroot
613do_package[noexec] = "1"
614deltask do_package_qa
615do_packagedata[noexec] = "1"
616deltask do_package_write_ipk
617deltask do_package_write_deb
618deltask do_package_write_rpm
619
620# Prepare the root links to point to the /usr counterparts.
621create_merged_usr_symlinks() {
622 root="$1"
623 install -d $root${base_bindir} $root${base_sbindir} $root${base_libdir}
624 lnr $root${base_bindir} $root/bin
625 lnr $root${base_sbindir} $root/sbin
626 lnr $root${base_libdir} $root/${baselib}
627
628 if [ "${nonarch_base_libdir}" != "${base_libdir}" ]; then
629 install -d $root${nonarch_base_libdir}
630 lnr $root${nonarch_base_libdir} $root/lib
631 fi
632
633 # create base links for multilibs
634 multi_libdirs="${@d.getVar('MULTILIB_VARIANTS')}"
635 for d in $multi_libdirs; do
636 install -d $root${exec_prefix}/$d
637 lnr $root${exec_prefix}/$d $root/$d
638 done
639}
640
641create_merged_usr_symlinks_rootfs() {
642 create_merged_usr_symlinks ${IMAGE_ROOTFS}
643}
644
645create_merged_usr_symlinks_sdk() {
646 create_merged_usr_symlinks ${SDK_OUTPUT}${SDKTARGETSYSROOT}
647}
648
649ROOTFS_PREPROCESS_COMMAND += "${@bb.utils.contains('DISTRO_FEATURES', 'usrmerge', 'create_merged_usr_symlinks_rootfs; ', '',d)}"
650POPULATE_SDK_PRE_TARGET_COMMAND += "${@bb.utils.contains('DISTRO_FEATURES', 'usrmerge', 'create_merged_usr_symlinks_sdk; ', '',d)}"
651
652reproducible_final_image_task () {
653 if [ "${BUILD_REPRODUCIBLE_BINARIES}" = "1" ]; then
654 if [ "$REPRODUCIBLE_TIMESTAMP_ROOTFS" = "" ]; then
655 REPRODUCIBLE_TIMESTAMP_ROOTFS=`git -C "${COREBASE}" log -1 --pretty=%ct 2>/dev/null` || true
656 if [ "$REPRODUCIBLE_TIMESTAMP_ROOTFS" = "" ]; then
657 REPRODUCIBLE_TIMESTAMP_ROOTFS=`stat -c%Y ${@bb.utils.which(d.getVar("BBPATH"), "conf/bitbake.conf")}`
658 fi
659 fi
660 # Set mtime of all files to a reproducible value
661 bbnote "reproducible_final_image_task: mtime set to $REPRODUCIBLE_TIMESTAMP_ROOTFS"
662 find ${IMAGE_ROOTFS} -exec touch -h --date=@$REPRODUCIBLE_TIMESTAMP_ROOTFS {} \;
663 fi
664}
665
666systemd_preset_all () {
667 if [ -e ${IMAGE_ROOTFS}${root_prefix}/lib/systemd/systemd ]; then
668 systemctl --root="${IMAGE_ROOTFS}" --preset-mode=enable-only preset-all
669 fi
670}
671
672IMAGE_PREPROCESS_COMMAND_append = " ${@ 'systemd_preset_all;' if bb.utils.contains('DISTRO_FEATURES', 'systemd', True, False, d) and not bb.utils.contains('IMAGE_FEATURES', 'stateless-rootfs', True, False, d) else ''} reproducible_final_image_task; "
673
674CVE_PRODUCT = ""
diff --git a/meta/classes/image_types.bbclass b/meta/classes/image_types.bbclass
deleted file mode 100644
index 8028691405..0000000000
--- a/meta/classes/image_types.bbclass
+++ /dev/null
@@ -1,324 +0,0 @@
1# The default aligment of the size of the rootfs is set to 1KiB. In case
2# you're using the SD card emulation of a QEMU system simulator you may
3# set this value to 2048 (2MiB alignment).
4IMAGE_ROOTFS_ALIGNMENT ?= "1"
5
6def imagetypes_getdepends(d):
7 def adddep(depstr, deps):
8 for d in (depstr or "").split():
9 # Add task dependency if not already present
10 if ":" not in d:
11 d += ":do_populate_sysroot"
12 deps.add(d)
13
14 # Take a type in the form of foo.bar.car and split it into the items
15 # needed for the image deps "foo", and the conversion deps ["bar", "car"]
16 def split_types(typestring):
17 types = typestring.split(".")
18 return types[0], types[1:]
19
20 fstypes = set((d.getVar('IMAGE_FSTYPES') or "").split())
21 fstypes |= set((d.getVar('IMAGE_FSTYPES_DEBUGFS') or "").split())
22
23 deprecated = set()
24 deps = set()
25 for typestring in fstypes:
26 basetype, resttypes = split_types(typestring)
27
28 var = "IMAGE_DEPENDS_%s" % basetype
29 if d.getVar(var) is not None:
30 deprecated.add(var)
31
32 for typedepends in (d.getVar("IMAGE_TYPEDEP_%s" % basetype) or "").split():
33 base, rest = split_types(typedepends)
34 resttypes += rest
35
36 var = "IMAGE_DEPENDS_%s" % base
37 if d.getVar(var) is not None:
38 deprecated.add(var)
39
40 for ctype in resttypes:
41 adddep(d.getVar("CONVERSION_DEPENDS_%s" % ctype), deps)
42 adddep(d.getVar("COMPRESS_DEPENDS_%s" % ctype), deps)
43
44 if deprecated:
45 bb.fatal('Deprecated variable(s) found: "%s". '
46 'Use do_image_<type>[depends] += "<recipe>:<task>" instead' % ', '.join(deprecated))
47
48 # Sort the set so that ordering is consistant
49 return " ".join(sorted(deps))
50
51XZ_COMPRESSION_LEVEL ?= "-9"
52XZ_INTEGRITY_CHECK ?= "crc32"
53
54ZIP_COMPRESSION_LEVEL ?= "-9"
55
56ZSTD_COMPRESSION_LEVEL ?= "-3"
57
58JFFS2_SUM_EXTRA_ARGS ?= ""
59IMAGE_CMD_jffs2 = "mkfs.jffs2 --root=${IMAGE_ROOTFS} --faketime --output=${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.jffs2 ${EXTRA_IMAGECMD}"
60
61IMAGE_CMD_cramfs = "mkfs.cramfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.cramfs ${EXTRA_IMAGECMD}"
62
63oe_mkext234fs () {
64 fstype=$1
65 extra_imagecmd=""
66
67 if [ $# -gt 1 ]; then
68 shift
69 extra_imagecmd=$@
70 fi
71
72 # If generating an empty image the size of the sparse block should be large
73 # enough to allocate an ext4 filesystem using 4096 bytes per inode, this is
74 # about 60K, so dd needs a minimum count of 60, with bs=1024 (bytes per IO)
75 eval local COUNT=\"0\"
76 eval local MIN_COUNT=\"60\"
77 if [ $ROOTFS_SIZE -lt $MIN_COUNT ]; then
78 eval COUNT=\"$MIN_COUNT\"
79 fi
80 # Create a sparse image block
81 bbdebug 1 Executing "dd if=/dev/zero of=${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.$fstype seek=$ROOTFS_SIZE count=$COUNT bs=1024"
82 dd if=/dev/zero of=${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.$fstype seek=$ROOTFS_SIZE count=$COUNT bs=1024
83 bbdebug 1 "Actual Rootfs size: `du -s ${IMAGE_ROOTFS}`"
84 bbdebug 1 "Actual Partion size: `stat -c '%s' ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.$fstype`"
85 bbdebug 1 Executing "mkfs.$fstype -F $extra_imagecmd ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.$fstype -d ${IMAGE_ROOTFS}"
86 mkfs.$fstype -F $extra_imagecmd ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.$fstype -d ${IMAGE_ROOTFS}
87 # Error codes 0-3 indicate successfull operation of fsck (no errors or errors corrected)
88 fsck.$fstype -pvfD ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.$fstype || [ $? -le 3 ]
89}
90
91IMAGE_CMD_ext2 = "oe_mkext234fs ext2 ${EXTRA_IMAGECMD}"
92IMAGE_CMD_ext3 = "oe_mkext234fs ext3 ${EXTRA_IMAGECMD}"
93IMAGE_CMD_ext4 = "oe_mkext234fs ext4 ${EXTRA_IMAGECMD}"
94
95MIN_BTRFS_SIZE ?= "16384"
96IMAGE_CMD_btrfs () {
97 size=${ROOTFS_SIZE}
98 if [ ${size} -lt ${MIN_BTRFS_SIZE} ] ; then
99 size=${MIN_BTRFS_SIZE}
100 bbwarn "Rootfs size is too small for BTRFS. Filesystem will be extended to ${size}K"
101 fi
102 dd if=/dev/zero of=${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.btrfs seek=${size} count=0 bs=1024
103 mkfs.btrfs ${EXTRA_IMAGECMD} -r ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.btrfs
104}
105
106IMAGE_CMD_squashfs = "mksquashfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.squashfs ${EXTRA_IMAGECMD} -noappend"
107IMAGE_CMD_squashfs-xz = "mksquashfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.squashfs-xz ${EXTRA_IMAGECMD} -noappend -comp xz"
108IMAGE_CMD_squashfs-lzo = "mksquashfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.squashfs-lzo ${EXTRA_IMAGECMD} -noappend -comp lzo"
109IMAGE_CMD_squashfs-lz4 = "mksquashfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.squashfs-lz4 ${EXTRA_IMAGECMD} -noappend -comp lz4"
110
111IMAGE_CMD_TAR ?= "tar"
112# ignore return code 1 "file changed as we read it" as other tasks(e.g. do_image_wic) may be hardlinking rootfs
113IMAGE_CMD_tar = "${IMAGE_CMD_TAR} --sort=name --format=posix --numeric-owner -cf ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.tar -C ${IMAGE_ROOTFS} . || [ $? -eq 1 ]"
114
115do_image_cpio[cleandirs] += "${WORKDIR}/cpio_append"
116IMAGE_CMD_cpio () {
117 (cd ${IMAGE_ROOTFS} && find . | sort | cpio --reproducible -o -H newc >${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.cpio)
118 # We only need the /init symlink if we're building the real
119 # image. The -dbg image doesn't need it! By being clever
120 # about this we also avoid 'touch' below failing, as it
121 # might be trying to touch /sbin/init on the host since both
122 # the normal and the -dbg image share the same WORKDIR
123 if [ "${IMAGE_BUILDING_DEBUGFS}" != "true" ]; then
124 if [ ! -L ${IMAGE_ROOTFS}/init ] && [ ! -e ${IMAGE_ROOTFS}/init ]; then
125 if [ -L ${IMAGE_ROOTFS}/sbin/init ] || [ -e ${IMAGE_ROOTFS}/sbin/init ]; then
126 ln -sf /sbin/init ${WORKDIR}/cpio_append/init
127 else
128 touch ${WORKDIR}/cpio_append/init
129 fi
130 (cd ${WORKDIR}/cpio_append && echo ./init | cpio -oA -H newc -F ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.cpio)
131 fi
132 fi
133}
134
135UBI_VOLNAME ?= "${MACHINE}-rootfs"
136
137multiubi_mkfs() {
138 local mkubifs_args="$1"
139 local ubinize_args="$2"
140
141 # Added prompt error message for ubi and ubifs image creation.
142 if [ -z "$mkubifs_args" ] || [ -z "$ubinize_args" ]; then
143 bbfatal "MKUBIFS_ARGS and UBINIZE_ARGS have to be set, see http://www.linux-mtd.infradead.org/faq/ubifs.html for details"
144 fi
145
146 if [ -z "$3" ]; then
147 local vname=""
148 else
149 local vname="_$3"
150 fi
151
152 echo \[ubifs\] > ubinize${vname}-${IMAGE_NAME}.cfg
153 echo mode=ubi >> ubinize${vname}-${IMAGE_NAME}.cfg
154 echo image=${IMGDEPLOYDIR}/${IMAGE_NAME}${vname}${IMAGE_NAME_SUFFIX}.ubifs >> ubinize${vname}-${IMAGE_NAME}.cfg
155 echo vol_id=0 >> ubinize${vname}-${IMAGE_NAME}.cfg
156 echo vol_type=dynamic >> ubinize${vname}-${IMAGE_NAME}.cfg
157 echo vol_name=${UBI_VOLNAME} >> ubinize${vname}-${IMAGE_NAME}.cfg
158 echo vol_flags=autoresize >> ubinize${vname}-${IMAGE_NAME}.cfg
159 if [ -n "$vname" ]; then
160 mkfs.ubifs -r ${IMAGE_ROOTFS} -o ${IMGDEPLOYDIR}/${IMAGE_NAME}${vname}${IMAGE_NAME_SUFFIX}.ubifs ${mkubifs_args}
161 fi
162 ubinize -o ${IMGDEPLOYDIR}/${IMAGE_NAME}${vname}${IMAGE_NAME_SUFFIX}.ubi ${ubinize_args} ubinize${vname}-${IMAGE_NAME}.cfg
163
164 # Cleanup cfg file
165 mv ubinize${vname}-${IMAGE_NAME}.cfg ${IMGDEPLOYDIR}/
166
167 # Create own symlinks for 'named' volumes
168 if [ -n "$vname" ]; then
169 cd ${IMGDEPLOYDIR}
170 if [ -e ${IMAGE_NAME}${vname}${IMAGE_NAME_SUFFIX}.ubifs ]; then
171 ln -sf ${IMAGE_NAME}${vname}${IMAGE_NAME_SUFFIX}.ubifs \
172 ${IMAGE_LINK_NAME}${vname}.ubifs
173 fi
174 if [ -e ${IMAGE_NAME}${vname}${IMAGE_NAME_SUFFIX}.ubi ]; then
175 ln -sf ${IMAGE_NAME}${vname}${IMAGE_NAME_SUFFIX}.ubi \
176 ${IMAGE_LINK_NAME}${vname}.ubi
177 fi
178 cd -
179 fi
180}
181
182IMAGE_CMD_multiubi () {
183 # Split MKUBIFS_ARGS_<name> and UBINIZE_ARGS_<name>
184 for name in ${MULTIUBI_BUILD}; do
185 eval local mkubifs_args=\"\$MKUBIFS_ARGS_${name}\"
186 eval local ubinize_args=\"\$UBINIZE_ARGS_${name}\"
187
188 multiubi_mkfs "${mkubifs_args}" "${ubinize_args}" "${name}"
189 done
190}
191
192IMAGE_CMD_ubi () {
193 multiubi_mkfs "${MKUBIFS_ARGS}" "${UBINIZE_ARGS}"
194}
195IMAGE_TYPEDEP_ubi = "ubifs"
196
197IMAGE_CMD_ubifs = "mkfs.ubifs -r ${IMAGE_ROOTFS} -o ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.ubifs ${MKUBIFS_ARGS}"
198
199MIN_F2FS_SIZE ?= "524288"
200IMAGE_CMD_f2fs () {
201 # We need to add additional smarts here form devices smaller than 1.5G
202 # Need to scale appropriately between 40M -> 1.5G as the "overprovision
203 # ratio" goes down as the device gets bigger (70% -> 4.5%), below about
204 # 500M the standard IMAGE_OVERHEAD_FACTOR does not work, so add additional
205 # space here when under 500M
206 size=${ROOTFS_SIZE}
207 if [ ${size} -lt ${MIN_F2FS_SIZE} ] ; then
208 size=${MIN_F2FS_SIZE}
209 bbwarn "Rootfs size is too small for F2FS. Filesystem will be extended to ${size}K"
210 fi
211 dd if=/dev/zero of=${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.f2fs seek=${size} count=0 bs=1024
212 mkfs.f2fs ${EXTRA_IMAGECMD} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.f2fs
213 sload.f2fs -f ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.f2fs
214}
215
216EXTRA_IMAGECMD = ""
217
218inherit siteinfo kernel-arch image-artifact-names
219
220JFFS2_ENDIANNESS ?= "${@oe.utils.conditional('SITEINFO_ENDIANNESS', 'le', '-l', '-b', d)}"
221JFFS2_ERASEBLOCK ?= "0x40000"
222EXTRA_IMAGECMD_jffs2 ?= "--pad ${JFFS2_ENDIANNESS} --eraseblock=${JFFS2_ERASEBLOCK} --no-cleanmarkers"
223
224# Change these if you want default mkfs behavior (i.e. create minimal inode number)
225EXTRA_IMAGECMD_ext2 ?= "-i 4096"
226EXTRA_IMAGECMD_ext3 ?= "-i 4096"
227EXTRA_IMAGECMD_ext4 ?= "-i 4096"
228EXTRA_IMAGECMD_btrfs ?= "-n 4096"
229EXTRA_IMAGECMD_f2fs ?= ""
230
231do_image_cpio[depends] += "cpio-native:do_populate_sysroot"
232do_image_jffs2[depends] += "mtd-utils-native:do_populate_sysroot"
233do_image_cramfs[depends] += "util-linux-native:do_populate_sysroot"
234do_image_ext2[depends] += "e2fsprogs-native:do_populate_sysroot"
235do_image_ext3[depends] += "e2fsprogs-native:do_populate_sysroot"
236do_image_ext4[depends] += "e2fsprogs-native:do_populate_sysroot"
237do_image_btrfs[depends] += "btrfs-tools-native:do_populate_sysroot"
238do_image_squashfs[depends] += "squashfs-tools-native:do_populate_sysroot"
239do_image_squashfs_xz[depends] += "squashfs-tools-native:do_populate_sysroot"
240do_image_squashfs_lzo[depends] += "squashfs-tools-native:do_populate_sysroot"
241do_image_squashfs_lz4[depends] += "squashfs-tools-native:do_populate_sysroot"
242do_image_ubi[depends] += "mtd-utils-native:do_populate_sysroot"
243do_image_ubifs[depends] += "mtd-utils-native:do_populate_sysroot"
244do_image_multiubi[depends] += "mtd-utils-native:do_populate_sysroot"
245do_image_f2fs[depends] += "f2fs-tools-native:do_populate_sysroot"
246
247# This variable is available to request which values are suitable for IMAGE_FSTYPES
248IMAGE_TYPES = " \
249 jffs2 jffs2.sum \
250 cramfs \
251 ext2 ext2.gz ext2.bz2 ext2.lzma \
252 ext3 ext3.gz \
253 ext4 ext4.gz \
254 btrfs \
255 iso \
256 hddimg \
257 squashfs squashfs-xz squashfs-lzo squashfs-lz4 \
258 ubi ubifs multiubi \
259 tar tar.gz tar.bz2 tar.xz tar.lz4 tar.zst \
260 cpio cpio.gz cpio.xz cpio.lzma cpio.lz4 \
261 wic wic.gz wic.bz2 wic.lzma wic.zst \
262 container \
263 f2fs \
264"
265
266# Compression is a special case of conversion. The old variable
267# names are still supported for backward-compatibility. When defining
268# new compression or conversion commands, use CONVERSIONTYPES and
269# CONVERSION_CMD/DEPENDS.
270COMPRESSIONTYPES ?= ""
271
272CONVERSIONTYPES = "gz bz2 lzma xz lz4 lzo zip zst sum md5sum sha1sum sha224sum sha256sum sha384sum sha512sum bmap u-boot vmdk vhd vhdx vdi qcow2 base64 ${COMPRESSIONTYPES}"
273CONVERSION_CMD_lzma = "lzma -k -f -7 ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
274CONVERSION_CMD_gz = "gzip -f -9 -n -c --rsyncable ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.gz"
275CONVERSION_CMD_bz2 = "pbzip2 -f -k ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
276CONVERSION_CMD_xz = "xz -f -k -c ${XZ_COMPRESSION_LEVEL} ${XZ_DEFAULTS} --check=${XZ_INTEGRITY_CHECK} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.xz"
277CONVERSION_CMD_lz4 = "lz4 -9 -z -l ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.lz4"
278CONVERSION_CMD_lzo = "lzop -9 ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
279CONVERSION_CMD_zip = "zip ${ZIP_COMPRESSION_LEVEL} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.zip ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
280CONVERSION_CMD_zst = "zstd -f -k -T0 -c ${ZSTD_COMPRESSION_LEVEL} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.zst"
281CONVERSION_CMD_sum = "sumtool -i ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} -o ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sum ${JFFS2_SUM_EXTRA_ARGS}"
282CONVERSION_CMD_md5sum = "md5sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.md5sum"
283CONVERSION_CMD_sha1sum = "sha1sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha1sum"
284CONVERSION_CMD_sha224sum = "sha224sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha224sum"
285CONVERSION_CMD_sha256sum = "sha256sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha256sum"
286CONVERSION_CMD_sha384sum = "sha384sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha384sum"
287CONVERSION_CMD_sha512sum = "sha512sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha512sum"
288CONVERSION_CMD_bmap = "bmaptool create ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} -o ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.bmap"
289CONVERSION_CMD_u-boot = "mkimage -A ${UBOOT_ARCH} -O linux -T ramdisk -C none -n ${IMAGE_NAME} -d ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.u-boot"
290CONVERSION_CMD_vmdk = "qemu-img convert -O vmdk ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.vmdk"
291CONVERSION_CMD_vhdx = "qemu-img convert -O vhdx -o subformat=dynamic ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.vhdx"
292CONVERSION_CMD_vhd = "qemu-img convert -O vpc -o subformat=fixed ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.vhd"
293CONVERSION_CMD_vdi = "qemu-img convert -O vdi ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.vdi"
294CONVERSION_CMD_qcow2 = "qemu-img convert -O qcow2 ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.qcow2"
295CONVERSION_CMD_base64 = "base64 ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.base64"
296CONVERSION_DEPENDS_lzma = "xz-native"
297CONVERSION_DEPENDS_gz = "pigz-native"
298CONVERSION_DEPENDS_bz2 = "pbzip2-native"
299CONVERSION_DEPENDS_xz = "xz-native"
300CONVERSION_DEPENDS_lz4 = "lz4-native"
301CONVERSION_DEPENDS_lzo = "lzop-native"
302CONVERSION_DEPENDS_zip = "zip-native"
303CONVERSION_DEPENDS_zst = "zstd-native"
304CONVERSION_DEPENDS_sum = "mtd-utils-native"
305CONVERSION_DEPENDS_bmap = "bmap-tools-native"
306CONVERSION_DEPENDS_u-boot = "u-boot-tools-native"
307CONVERSION_DEPENDS_vmdk = "qemu-system-native"
308CONVERSION_DEPENDS_vdi = "qemu-system-native"
309CONVERSION_DEPENDS_qcow2 = "qemu-system-native"
310CONVERSION_DEPENDS_base64 = "coreutils-native"
311CONVERSION_DEPENDS_vhdx = "qemu-system-native"
312CONVERSION_DEPENDS_vhd = "qemu-system-native"
313
314RUNNABLE_IMAGE_TYPES ?= "ext2 ext3 ext4"
315RUNNABLE_MACHINE_PATTERNS ?= "qemu"
316
317DEPLOYABLE_IMAGE_TYPES ?= "hddimg iso"
318
319# The IMAGE_TYPES_MASKED variable will be used to mask out from the IMAGE_FSTYPES,
320# images that will not be built at do_rootfs time: vmdk, vhd, vhdx, vdi, qcow2, hddimg, iso, etc.
321IMAGE_TYPES_MASKED ?= ""
322
323# bmap requires python3 to be in the PATH
324EXTRANATIVEPATH += "${@'python3-native' if d.getVar('IMAGE_FSTYPES').find('.bmap') else ''}"
diff --git a/meta/classes/image_types_wic.bbclass b/meta/classes/image_types_wic.bbclass
deleted file mode 100644
index 49be1da77a..0000000000
--- a/meta/classes/image_types_wic.bbclass
+++ /dev/null
@@ -1,157 +0,0 @@
1# The WICVARS variable is used to define list of bitbake variables used in wic code
2# variables from this list is written to <image>.env file
3WICVARS ?= "\
4 BBLAYERS IMGDEPLOYDIR DEPLOY_DIR_IMAGE FAKEROOTCMD IMAGE_BASENAME IMAGE_EFI_BOOT_FILES IMAGE_BOOT_FILES \
5 IMAGE_LINK_NAME IMAGE_ROOTFS INITRAMFS_FSTYPES INITRD INITRD_LIVE ISODIR RECIPE_SYSROOT_NATIVE \
6 ROOTFS_SIZE STAGING_DATADIR STAGING_DIR STAGING_LIBDIR TARGET_SYS HOSTTOOLS_DIR \
7 KERNEL_IMAGETYPE MACHINE INITRAMFS_IMAGE INITRAMFS_IMAGE_BUNDLE INITRAMFS_LINK_NAME APPEND \
8 ASSUME_PROVIDED PSEUDO_IGNORE_PATHS"
9
10inherit ${@bb.utils.contains('INITRAMFS_IMAGE_BUNDLE', '1', 'kernel-artifact-names', '', d)}
11
12WKS_FILE ??= "${IMAGE_BASENAME}.${MACHINE}.wks"
13WKS_FILES ?= "${WKS_FILE} ${IMAGE_BASENAME}.wks"
14WKS_SEARCH_PATH ?= "${THISDIR}:${@':'.join('%s/wic' % p for p in '${BBPATH}'.split(':'))}:${@':'.join('%s/scripts/lib/wic/canned-wks' % l for l in '${BBPATH}:${COREBASE}'.split(':'))}"
15WKS_FULL_PATH = "${@wks_search(d.getVar('WKS_FILES').split(), d.getVar('WKS_SEARCH_PATH')) or ''}"
16
17def wks_search(files, search_path):
18 for f in files:
19 if os.path.isabs(f):
20 if os.path.exists(f):
21 return f
22 else:
23 searched = bb.utils.which(search_path, f)
24 if searched:
25 return searched
26
27WIC_CREATE_EXTRA_ARGS ?= ""
28
29IMAGE_CMD_wic () {
30 out="${IMGDEPLOYDIR}/${IMAGE_NAME}"
31 build_wic="${WORKDIR}/build-wic"
32 tmp_wic="${WORKDIR}/tmp-wic"
33 wks="${WKS_FULL_PATH}"
34 if [ -e "$tmp_wic" ]; then
35 # Ensure we don't have any junk leftover from a previously interrupted
36 # do_image_wic execution
37 rm -rf "$tmp_wic"
38 fi
39 if [ -z "$wks" ]; then
40 bbfatal "No kickstart files from WKS_FILES were found: ${WKS_FILES}. Please set WKS_FILE or WKS_FILES appropriately."
41 fi
42 BUILDDIR="${TOPDIR}" PSEUDO_UNLOAD=1 wic create "$wks" --vars "${STAGING_DIR}/${MACHINE}/imgdata/" -e "${IMAGE_BASENAME}" -o "$build_wic/" -w "$tmp_wic" ${WIC_CREATE_EXTRA_ARGS}
43 mv "$build_wic/$(basename "${wks%.wks}")"*.direct "$out${IMAGE_NAME_SUFFIX}.wic"
44}
45IMAGE_CMD_wic[vardepsexclude] = "WKS_FULL_PATH WKS_FILES TOPDIR"
46do_image_wic[cleandirs] = "${WORKDIR}/build-wic"
47
48PSEUDO_IGNORE_PATHS .= ",${WORKDIR}/build-wic"
49
50# Rebuild when the wks file or vars in WICVARS change
51USING_WIC = "${@bb.utils.contains_any('IMAGE_FSTYPES', 'wic ' + ' '.join('wic.%s' % c for c in '${CONVERSIONTYPES}'.split()), '1', '', d)}"
52WKS_FILE_CHECKSUM = "${@'${WKS_FULL_PATH}:%s' % os.path.exists('${WKS_FULL_PATH}') if '${USING_WIC}' else ''}"
53do_image_wic[file-checksums] += "${WKS_FILE_CHECKSUM}"
54do_image_wic[depends] += "${@' '.join('%s-native:do_populate_sysroot' % r for r in ('parted', 'gptfdisk', 'dosfstools', 'mtools'))}"
55
56# We ensure all artfacts are deployed (e.g virtual/bootloader)
57do_image_wic[recrdeptask] += "do_deploy"
58do_image_wic[deptask] += "do_image_complete"
59
60WKS_FILE_DEPENDS_DEFAULT = '${@bb.utils.contains_any("BUILD_ARCH", [ 'x86_64', 'i686' ], "syslinux-native", "",d)}'
61WKS_FILE_DEPENDS_DEFAULT += "bmap-tools-native cdrtools-native btrfs-tools-native squashfs-tools-native e2fsprogs-native"
62WKS_FILE_DEPENDS_BOOTLOADERS = ""
63WKS_FILE_DEPENDS_BOOTLOADERS_x86 = "syslinux grub-efi systemd-boot"
64WKS_FILE_DEPENDS_BOOTLOADERS_x86-64 = "syslinux grub-efi systemd-boot"
65WKS_FILE_DEPENDS_BOOTLOADERS_x86-x32 = "syslinux grub-efi"
66
67WKS_FILE_DEPENDS ??= "${WKS_FILE_DEPENDS_DEFAULT} ${WKS_FILE_DEPENDS_BOOTLOADERS}"
68
69DEPENDS += "${@ '${WKS_FILE_DEPENDS}' if d.getVar('USING_WIC') else '' }"
70
71python do_write_wks_template () {
72 """Write out expanded template contents to WKS_FULL_PATH."""
73 import re
74
75 template_body = d.getVar('_WKS_TEMPLATE')
76
77 # Remove any remnant variable references left behind by the expansion
78 # due to undefined variables
79 expand_var_regexp = re.compile(r"\${[^{}@\n\t :]+}")
80 while True:
81 new_body = re.sub(expand_var_regexp, '', template_body)
82 if new_body == template_body:
83 break
84 else:
85 template_body = new_body
86
87 wks_file = d.getVar('WKS_FULL_PATH')
88 with open(wks_file, 'w') as f:
89 f.write(template_body)
90 f.close()
91 # Copy the finalized wks file to the deploy directory for later use
92 depdir = d.getVar('IMGDEPLOYDIR')
93 basename = d.getVar('IMAGE_BASENAME')
94 bb.utils.copyfile(wks_file, "%s/%s" % (depdir, basename + '-' + os.path.basename(wks_file)))
95}
96
97do_flush_pseudodb() {
98 ${FAKEROOTENV} ${FAKEROOTCMD} -S
99}
100
101python () {
102 if d.getVar('USING_WIC'):
103 wks_file_u = d.getVar('WKS_FULL_PATH', False)
104 wks_file = d.expand(wks_file_u)
105 base, ext = os.path.splitext(wks_file)
106 if ext == '.in' and os.path.exists(wks_file):
107 wks_out_file = os.path.join(d.getVar('WORKDIR'), os.path.basename(base))
108 d.setVar('WKS_FULL_PATH', wks_out_file)
109 d.setVar('WKS_TEMPLATE_PATH', wks_file_u)
110 d.setVar('WKS_FILE_CHECKSUM', '${WKS_TEMPLATE_PATH}:True')
111
112 # We need to re-parse each time the file changes, and bitbake
113 # needs to be told about that explicitly.
114 bb.parse.mark_dependency(d, wks_file)
115
116 try:
117 with open(wks_file, 'r') as f:
118 body = f.read()
119 except (IOError, OSError) as exc:
120 pass
121 else:
122 # Previously, I used expandWithRefs to get the dependency list
123 # and add it to WICVARS, but there's no point re-parsing the
124 # file in process_wks_template as well, so just put it in
125 # a variable and let the metadata deal with the deps.
126 d.setVar('_WKS_TEMPLATE', body)
127 bb.build.addtask('do_write_wks_template', 'do_image_wic', 'do_image', d)
128 bb.build.addtask('do_image_wic', 'do_image_complete', None, d)
129}
130
131#
132# Write environment variables used by wic
133# to tmp/sysroots/<machine>/imgdata/<image>.env
134#
135python do_rootfs_wicenv () {
136 wicvars = d.getVar('WICVARS')
137 if not wicvars:
138 return
139
140 stdir = d.getVar('STAGING_DIR')
141 outdir = os.path.join(stdir, d.getVar('MACHINE'), 'imgdata')
142 bb.utils.mkdirhier(outdir)
143 basename = d.getVar('IMAGE_BASENAME')
144 with open(os.path.join(outdir, basename) + '.env', 'w') as envf:
145 for var in wicvars.split():
146 value = d.getVar(var)
147 if value:
148 envf.write('%s="%s"\n' % (var, value.strip()))
149 envf.close()
150 # Copy .env file to deploy directory for later use with stand alone wic
151 depdir = d.getVar('IMGDEPLOYDIR')
152 bb.utils.copyfile(os.path.join(outdir, basename) + '.env', os.path.join(depdir, basename) + '.env')
153}
154addtask do_flush_pseudodb after do_rootfs before do_image do_image_qa
155addtask do_rootfs_wicenv after do_image before do_image_wic
156do_rootfs_wicenv[vardeps] += "${WICVARS}"
157do_rootfs_wicenv[prefuncs] = 'set_image_size'
diff --git a/meta/classes/insane.bbclass b/meta/classes/insane.bbclass
deleted file mode 100644
index 53230fc667..0000000000
--- a/meta/classes/insane.bbclass
+++ /dev/null
@@ -1,1403 +0,0 @@
1# BB Class inspired by ebuild.sh
2#
3# This class will test files after installation for certain
4# security issues and other kind of issues.
5#
6# Checks we do:
7# -Check the ownership and permissions
8# -Check the RUNTIME path for the $TMPDIR
9# -Check if .la files wrongly point to workdir
10# -Check if .pc files wrongly point to workdir
11# -Check if packages contains .debug directories or .so files
12# where they should be in -dev or -dbg
13# -Check if config.log contains traces to broken autoconf tests
14# -Check invalid characters (non-utf8) on some package metadata
15# -Ensure that binaries in base_[bindir|sbindir|libdir] do not link
16# into exec_prefix
17# -Check that scripts in base_[bindir|sbindir|libdir] do not reference
18# files under exec_prefix
19# -Check if the package name is upper case
20
21QA_SANE = "True"
22
23# Elect whether a given type of error is a warning or error, they may
24# have been set by other files.
25WARN_QA ?= " libdir xorg-driver-abi \
26 textrel incompatible-license files-invalid \
27 infodir build-deps src-uri-bad symlink-to-sysroot multilib \
28 invalid-packageconfig host-user-contaminated uppercase-pn patch-fuzz \
29 mime mime-xdg unlisted-pkg-lics unhandled-features-check \
30 missing-update-alternatives native-last \
31 "
32ERROR_QA ?= "dev-so debug-deps dev-deps debug-files arch pkgconfig la \
33 perms dep-cmp pkgvarcheck perm-config perm-line perm-link \
34 split-strip packages-list pkgv-undefined var-undefined \
35 version-going-backwards expanded-d invalid-chars \
36 license-checksum dev-elf file-rdeps configure-unsafe \
37 configure-gettext perllocalpod shebang-size \
38 already-stripped installed-vs-shipped ldflags compile-host-path \
39 install-host-path pn-overrides unknown-configure-option \
40 useless-rpaths rpaths staticdev \
41 "
42# Add usrmerge QA check based on distro feature
43ERROR_QA_append = "${@bb.utils.contains('DISTRO_FEATURES', 'usrmerge', ' usrmerge', '', d)}"
44
45FAKEROOT_QA = "host-user-contaminated"
46FAKEROOT_QA[doc] = "QA tests which need to run under fakeroot. If any \
47enabled tests are listed here, the do_package_qa task will run under fakeroot."
48
49ALL_QA = "${WARN_QA} ${ERROR_QA}"
50
51UNKNOWN_CONFIGURE_WHITELIST ?= "--enable-nls --disable-nls --disable-silent-rules --disable-dependency-tracking --with-libtool-sysroot --disable-static"
52
53def package_qa_clean_path(path, d, pkg=None):
54 """
55 Remove redundant paths from the path for display. If pkg isn't set then
56 TMPDIR is stripped, otherwise PKGDEST/pkg is stripped.
57 """
58 if pkg:
59 path = path.replace(os.path.join(d.getVar("PKGDEST"), pkg), "/")
60 return path.replace(d.getVar("TMPDIR"), "/").replace("//", "/")
61
62def package_qa_write_error(type, error, d):
63 logfile = d.getVar('QA_LOGFILE')
64 if logfile:
65 p = d.getVar('P')
66 with open(logfile, "a+") as f:
67 f.write("%s: %s [%s]\n" % (p, error, type))
68
69def package_qa_handle_error(error_class, error_msg, d):
70 if error_class in (d.getVar("ERROR_QA") or "").split():
71 package_qa_write_error(error_class, error_msg, d)
72 bb.error("QA Issue: %s [%s]" % (error_msg, error_class))
73 d.setVar("QA_SANE", False)
74 return False
75 elif error_class in (d.getVar("WARN_QA") or "").split():
76 package_qa_write_error(error_class, error_msg, d)
77 bb.warn("QA Issue: %s [%s]" % (error_msg, error_class))
78 else:
79 bb.note("QA Issue: %s [%s]" % (error_msg, error_class))
80 return True
81
82def package_qa_add_message(messages, section, new_msg):
83 if section not in messages:
84 messages[section] = new_msg
85 else:
86 messages[section] = messages[section] + "\n" + new_msg
87
88QAPATHTEST[shebang-size] = "package_qa_check_shebang_size"
89def package_qa_check_shebang_size(path, name, d, elf, messages):
90 import stat
91 if os.path.islink(path) or stat.S_ISFIFO(os.stat(path).st_mode) or elf:
92 return
93
94 try:
95 with open(path, 'rb') as f:
96 stanza = f.readline(130)
97 except IOError:
98 return
99
100 if stanza.startswith(b'#!'):
101 #Shebang not found
102 try:
103 stanza = stanza.decode("utf-8")
104 except UnicodeDecodeError:
105 #If it is not a text file, it is not a script
106 return
107
108 if len(stanza) > 129:
109 package_qa_add_message(messages, "shebang-size", "%s: %s maximum shebang size exceeded, the maximum size is 128." % (name, package_qa_clean_path(path, d)))
110 return
111
112QAPATHTEST[libexec] = "package_qa_check_libexec"
113def package_qa_check_libexec(path,name, d, elf, messages):
114
115 # Skip the case where the default is explicitly /usr/libexec
116 libexec = d.getVar('libexecdir')
117 if libexec == "/usr/libexec":
118 return True
119
120 if 'libexec' in path.split(os.path.sep):
121 package_qa_add_message(messages, "libexec", "%s: %s is using libexec please relocate to %s" % (name, package_qa_clean_path(path, d), libexec))
122 return False
123
124 return True
125
126QAPATHTEST[rpaths] = "package_qa_check_rpath"
127def package_qa_check_rpath(file,name, d, elf, messages):
128 """
129 Check for dangerous RPATHs
130 """
131 if not elf:
132 return
133
134 if os.path.islink(file):
135 return
136
137 bad_dirs = [d.getVar('BASE_WORKDIR'), d.getVar('STAGING_DIR_TARGET')]
138
139 phdrs = elf.run_objdump("-p", d)
140
141 import re
142 rpath_re = re.compile(r"\s+RPATH\s+(.*)")
143 for line in phdrs.split("\n"):
144 m = rpath_re.match(line)
145 if m:
146 rpath = m.group(1)
147 for dir in bad_dirs:
148 if dir in rpath:
149 package_qa_add_message(messages, "rpaths", "package %s contains bad RPATH %s in file %s" % (name, rpath, file))
150
151QAPATHTEST[useless-rpaths] = "package_qa_check_useless_rpaths"
152def package_qa_check_useless_rpaths(file, name, d, elf, messages):
153 """
154 Check for RPATHs that are useless but not dangerous
155 """
156 def rpath_eq(a, b):
157 return os.path.normpath(a) == os.path.normpath(b)
158
159 if not elf:
160 return
161
162 if os.path.islink(file):
163 return
164
165 libdir = d.getVar("libdir")
166 base_libdir = d.getVar("base_libdir")
167
168 phdrs = elf.run_objdump("-p", d)
169
170 import re
171 rpath_re = re.compile(r"\s+RPATH\s+(.*)")
172 for line in phdrs.split("\n"):
173 m = rpath_re.match(line)
174 if m:
175 rpath = m.group(1)
176 if rpath_eq(rpath, libdir) or rpath_eq(rpath, base_libdir):
177 # The dynamic linker searches both these places anyway. There is no point in
178 # looking there again.
179 package_qa_add_message(messages, "useless-rpaths", "%s: %s contains probably-redundant RPATH %s" % (name, package_qa_clean_path(file, d), rpath))
180
181QAPATHTEST[dev-so] = "package_qa_check_dev"
182def package_qa_check_dev(path, name, d, elf, messages):
183 """
184 Check for ".so" library symlinks in non-dev packages
185 """
186
187 if not name.endswith("-dev") and not name.endswith("-dbg") and not name.endswith("-ptest") and not name.startswith("nativesdk-") and path.endswith(".so") and os.path.islink(path):
188 package_qa_add_message(messages, "dev-so", "non -dev/-dbg/nativesdk- package contains symlink .so: %s path '%s'" % \
189 (name, package_qa_clean_path(path,d)))
190
191QAPATHTEST[dev-elf] = "package_qa_check_dev_elf"
192def package_qa_check_dev_elf(path, name, d, elf, messages):
193 """
194 Check that -dev doesn't contain real shared libraries. The test has to
195 check that the file is not a link and is an ELF object as some recipes
196 install link-time .so files that are linker scripts.
197 """
198 if name.endswith("-dev") and path.endswith(".so") and not os.path.islink(path) and elf:
199 package_qa_add_message(messages, "dev-elf", "-dev package contains non-symlink .so: %s path '%s'" % \
200 (name, package_qa_clean_path(path,d)))
201
202QAPATHTEST[staticdev] = "package_qa_check_staticdev"
203def package_qa_check_staticdev(path, name, d, elf, messages):
204 """
205 Check for ".a" library in non-staticdev packages
206 There are a number of exceptions to this rule, -pic packages can contain
207 static libraries, the _nonshared.a belong with their -dev packages and
208 libgcc.a, libgcov.a will be skipped in their packages
209 """
210
211 if not name.endswith("-pic") and not name.endswith("-staticdev") and not name.endswith("-ptest") and path.endswith(".a") and not path.endswith("_nonshared.a") and not '/usr/lib/debug-static/' in path and not '/.debug-static/' in path:
212 package_qa_add_message(messages, "staticdev", "non -staticdev package contains static .a library: %s path '%s'" % \
213 (name, package_qa_clean_path(path,d)))
214
215QAPATHTEST[mime] = "package_qa_check_mime"
216def package_qa_check_mime(path, name, d, elf, messages):
217 """
218 Check if package installs mime types to /usr/share/mime/packages
219 while no inheriting mime.bbclass
220 """
221
222 if d.getVar("datadir") + "/mime/packages" in path and path.endswith('.xml') and not bb.data.inherits_class("mime", d):
223 package_qa_add_message(messages, "mime", "package contains mime types but does not inherit mime: %s path '%s'" % \
224 (name, package_qa_clean_path(path,d)))
225
226QAPATHTEST[mime-xdg] = "package_qa_check_mime_xdg"
227def package_qa_check_mime_xdg(path, name, d, elf, messages):
228 """
229 Check if package installs desktop file containing MimeType and requires
230 mime-types.bbclass to create /usr/share/applications/mimeinfo.cache
231 """
232
233 if d.getVar("datadir") + "/applications" in path and path.endswith('.desktop') and not bb.data.inherits_class("mime-xdg", d):
234 mime_type_found = False
235 try:
236 with open(path, 'r') as f:
237 for line in f.read().split('\n'):
238 if 'MimeType' in line:
239 mime_type_found = True
240 break;
241 except:
242 # At least libreoffice installs symlinks with absolute paths that are dangling here.
243 # We could implement some magic but for few (one) recipes it is not worth the effort so just warn:
244 wstr = "%s cannot open %s - is it a symlink with absolute path?\n" % (name, package_qa_clean_path(path,d))
245 wstr += "Please check if (linked) file contains key 'MimeType'.\n"
246 pkgname = name
247 if name == d.getVar('PN'):
248 pkgname = '${PN}'
249 wstr += "If yes: add \'inhert mime-xdg\' and \'MIME_XDG_PACKAGES += \"%s\"\' / if no add \'INSANE_SKIP_%s += \"mime-xdg\"\' to recipe." % (pkgname, pkgname)
250 package_qa_add_message(messages, "mime-xdg", wstr)
251 if mime_type_found:
252 package_qa_add_message(messages, "mime-xdg", "package contains desktop file with key 'MimeType' but does not inhert mime-xdg: %s path '%s'" % \
253 (name, package_qa_clean_path(path,d)))
254
255def package_qa_check_libdir(d):
256 """
257 Check for wrong library installation paths. For instance, catch
258 recipes installing /lib/bar.so when ${base_libdir}="lib32" or
259 installing in /usr/lib64 when ${libdir}="/usr/lib"
260 """
261 import re
262
263 pkgdest = d.getVar('PKGDEST')
264 base_libdir = d.getVar("base_libdir") + os.sep
265 libdir = d.getVar("libdir") + os.sep
266 libexecdir = d.getVar("libexecdir") + os.sep
267 exec_prefix = d.getVar("exec_prefix") + os.sep
268
269 messages = []
270
271 # The re's are purposely fuzzy, as some there are some .so.x.y.z files
272 # that don't follow the standard naming convention. It checks later
273 # that they are actual ELF files
274 lib_re = re.compile(r"^/lib.+\.so(\..+)?$")
275 exec_re = re.compile(r"^%s.*/lib.+\.so(\..+)?$" % exec_prefix)
276
277 for root, dirs, files in os.walk(pkgdest):
278 if root == pkgdest:
279 # Skip subdirectories for any packages with libdir in INSANE_SKIP
280 skippackages = []
281 for package in dirs:
282 if 'libdir' in (d.getVar('INSANE_SKIP_' + package) or "").split():
283 bb.note("Package %s skipping libdir QA test" % (package))
284 skippackages.append(package)
285 elif d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-file-directory' and package.endswith("-dbg"):
286 bb.note("Package %s skipping libdir QA test for PACKAGE_DEBUG_SPLIT_STYLE equals debug-file-directory" % (package))
287 skippackages.append(package)
288 for package in skippackages:
289 dirs.remove(package)
290 for file in files:
291 full_path = os.path.join(root, file)
292 rel_path = os.path.relpath(full_path, pkgdest)
293 if os.sep in rel_path:
294 package, rel_path = rel_path.split(os.sep, 1)
295 rel_path = os.sep + rel_path
296 if lib_re.match(rel_path):
297 if base_libdir not in rel_path:
298 # make sure it's an actual ELF file
299 elf = oe.qa.ELFFile(full_path)
300 try:
301 elf.open()
302 messages.append("%s: found library in wrong location: %s" % (package, rel_path))
303 except (oe.qa.NotELFFileError):
304 pass
305 if exec_re.match(rel_path):
306 if libdir not in rel_path and libexecdir not in rel_path:
307 # make sure it's an actual ELF file
308 elf = oe.qa.ELFFile(full_path)
309 try:
310 elf.open()
311 messages.append("%s: found library in wrong location: %s" % (package, rel_path))
312 except (oe.qa.NotELFFileError):
313 pass
314
315 if messages:
316 package_qa_handle_error("libdir", "\n".join(messages), d)
317
318QAPATHTEST[debug-files] = "package_qa_check_dbg"
319def package_qa_check_dbg(path, name, d, elf, messages):
320 """
321 Check for ".debug" files or directories outside of the dbg package
322 """
323
324 if not "-dbg" in name and not "-ptest" in name:
325 if '.debug' in path.split(os.path.sep):
326 package_qa_add_message(messages, "debug-files", "non debug package contains .debug directory: %s path %s" % \
327 (name, package_qa_clean_path(path,d)))
328
329QAPATHTEST[arch] = "package_qa_check_arch"
330def package_qa_check_arch(path,name,d, elf, messages):
331 """
332 Check if archs are compatible
333 """
334 import re, oe.elf
335
336 if not elf:
337 return
338
339 target_os = d.getVar('TARGET_OS')
340 target_arch = d.getVar('TARGET_ARCH')
341 provides = d.getVar('PROVIDES')
342 bpn = d.getVar('BPN')
343
344 if target_arch == "allarch":
345 pn = d.getVar('PN')
346 package_qa_add_message(messages, "arch", pn + ": Recipe inherits the allarch class, but has packaged architecture-specific binaries")
347 return
348
349 # FIXME: Cross package confuse this check, so just skip them
350 for s in ['cross', 'nativesdk', 'cross-canadian']:
351 if bb.data.inherits_class(s, d):
352 return
353
354 # avoid following links to /usr/bin (e.g. on udev builds)
355 # we will check the files pointed to anyway...
356 if os.path.islink(path):
357 return
358
359 #if this will throw an exception, then fix the dict above
360 (machine, osabi, abiversion, littleendian, bits) \
361 = oe.elf.machine_dict(d)[target_os][target_arch]
362
363 # Check the architecture and endiannes of the binary
364 is_32 = (("virtual/kernel" in provides) or bb.data.inherits_class("module", d)) and \
365 (target_os == "linux-gnux32" or target_os == "linux-muslx32" or \
366 target_os == "linux-gnu_ilp32" or re.match(r'mips64.*32', d.getVar('DEFAULTTUNE')))
367 is_bpf = (oe.qa.elf_machine_to_string(elf.machine()) == "BPF")
368 if not ((machine == elf.machine()) or is_32 or is_bpf):
369 package_qa_add_message(messages, "arch", "Architecture did not match (%s, expected %s) in %s" % \
370 (oe.qa.elf_machine_to_string(elf.machine()), oe.qa.elf_machine_to_string(machine), package_qa_clean_path(path, d, name)))
371 elif not ((bits == elf.abiSize()) or is_32 or is_bpf):
372 package_qa_add_message(messages, "arch", "Bit size did not match (%d, expected %d) in %s" % \
373 (elf.abiSize(), bits, package_qa_clean_path(path, d, name)))
374 elif not ((littleendian == elf.isLittleEndian()) or is_bpf):
375 package_qa_add_message(messages, "arch", "Endiannes did not match (%d, expected %d) in %s" % \
376 (elf.isLittleEndian(), littleendian, package_qa_clean_path(path,d, name)))
377
378QAPATHTEST[desktop] = "package_qa_check_desktop"
379def package_qa_check_desktop(path, name, d, elf, messages):
380 """
381 Run all desktop files through desktop-file-validate.
382 """
383 if path.endswith(".desktop"):
384 desktop_file_validate = os.path.join(d.getVar('STAGING_BINDIR_NATIVE'),'desktop-file-validate')
385 output = os.popen("%s %s" % (desktop_file_validate, path))
386 # This only produces output on errors
387 for l in output:
388 package_qa_add_message(messages, "desktop", "Desktop file issue: " + l.strip())
389
390QAPATHTEST[textrel] = "package_qa_textrel"
391def package_qa_textrel(path, name, d, elf, messages):
392 """
393 Check if the binary contains relocations in .text
394 """
395
396 if not elf:
397 return
398
399 if os.path.islink(path):
400 return
401
402 phdrs = elf.run_objdump("-p", d)
403 sane = True
404
405 import re
406 textrel_re = re.compile(r"\s+TEXTREL\s+")
407 for line in phdrs.split("\n"):
408 if textrel_re.match(line):
409 sane = False
410 break
411
412 if not sane:
413 path = package_qa_clean_path(path, d, name)
414 package_qa_add_message(messages, "textrel", "%s: ELF binary %s has relocations in .text" % (name, path))
415
416QAPATHTEST[ldflags] = "package_qa_hash_style"
417def package_qa_hash_style(path, name, d, elf, messages):
418 """
419 Check if the binary has the right hash style...
420 """
421
422 if not elf:
423 return
424
425 if os.path.islink(path):
426 return
427
428 gnu_hash = "--hash-style=gnu" in d.getVar('LDFLAGS')
429 if not gnu_hash:
430 gnu_hash = "--hash-style=both" in d.getVar('LDFLAGS')
431 if not gnu_hash:
432 return
433
434 sane = False
435 has_syms = False
436
437 phdrs = elf.run_objdump("-p", d)
438
439 # If this binary has symbols, we expect it to have GNU_HASH too.
440 for line in phdrs.split("\n"):
441 if "SYMTAB" in line:
442 has_syms = True
443 if "GNU_HASH" in line or "DT_MIPS_XHASH" in line:
444 sane = True
445 if ("[mips32]" in line or "[mips64]" in line) and d.getVar('TCLIBC') == "musl":
446 sane = True
447 if has_syms and not sane:
448 path = package_qa_clean_path(path, d, name)
449 package_qa_add_message(messages, "ldflags", "File %s in package %s doesn't have GNU_HASH (didn't pass LDFLAGS?)" % (path, name))
450
451
452QAPATHTEST[buildpaths] = "package_qa_check_buildpaths"
453def package_qa_check_buildpaths(path, name, d, elf, messages):
454 """
455 Check for build paths inside target files and error if not found in the whitelist
456 """
457 # Ignore .debug files, not interesting
458 if path.find(".debug") != -1:
459 return
460
461 # Ignore symlinks
462 if os.path.islink(path):
463 return
464
465 tmpdir = bytes(d.getVar('TMPDIR'), encoding="utf-8")
466 with open(path, 'rb') as f:
467 file_content = f.read()
468 if tmpdir in file_content:
469 trimmed = path.replace(os.path.join (d.getVar("PKGDEST"), name), "")
470 package_qa_add_message(messages, "buildpaths", "File %s in package %s contains reference to TMPDIR" % (trimmed, name))
471
472
473QAPATHTEST[xorg-driver-abi] = "package_qa_check_xorg_driver_abi"
474def package_qa_check_xorg_driver_abi(path, name, d, elf, messages):
475 """
476 Check that all packages containing Xorg drivers have ABI dependencies
477 """
478
479 # Skip dev, dbg or nativesdk packages
480 if name.endswith("-dev") or name.endswith("-dbg") or name.startswith("nativesdk-"):
481 return
482
483 driverdir = d.expand("${libdir}/xorg/modules/drivers/")
484 if driverdir in path and path.endswith(".so"):
485 mlprefix = d.getVar('MLPREFIX') or ''
486 for rdep in bb.utils.explode_deps(d.getVar('RDEPENDS_' + name) or ""):
487 if rdep.startswith("%sxorg-abi-" % mlprefix):
488 return
489 package_qa_add_message(messages, "xorg-driver-abi", "Package %s contains Xorg driver (%s) but no xorg-abi- dependencies" % (name, os.path.basename(path)))
490
491QAPATHTEST[infodir] = "package_qa_check_infodir"
492def package_qa_check_infodir(path, name, d, elf, messages):
493 """
494 Check that /usr/share/info/dir isn't shipped in a particular package
495 """
496 infodir = d.expand("${infodir}/dir")
497
498 if infodir in path:
499 package_qa_add_message(messages, "infodir", "The /usr/share/info/dir file is not meant to be shipped in a particular package.")
500
501QAPATHTEST[symlink-to-sysroot] = "package_qa_check_symlink_to_sysroot"
502def package_qa_check_symlink_to_sysroot(path, name, d, elf, messages):
503 """
504 Check that the package doesn't contain any absolute symlinks to the sysroot.
505 """
506 if os.path.islink(path):
507 target = os.readlink(path)
508 if os.path.isabs(target):
509 tmpdir = d.getVar('TMPDIR')
510 if target.startswith(tmpdir):
511 trimmed = path.replace(os.path.join (d.getVar("PKGDEST"), name), "")
512 package_qa_add_message(messages, "symlink-to-sysroot", "Symlink %s in %s points to TMPDIR" % (trimmed, name))
513
514# Check license variables
515do_populate_lic[postfuncs] += "populate_lic_qa_checksum"
516python populate_lic_qa_checksum() {
517 """
518 Check for changes in the license files.
519 """
520 sane = True
521
522 lic_files = d.getVar('LIC_FILES_CHKSUM') or ''
523 lic = d.getVar('LICENSE')
524 pn = d.getVar('PN')
525
526 if lic == "CLOSED":
527 return
528
529 if not lic_files and d.getVar('SRC_URI'):
530 sane &= package_qa_handle_error("license-checksum", pn + ": Recipe file fetches files and does not have license file information (LIC_FILES_CHKSUM)", d)
531
532 srcdir = d.getVar('S')
533 corebase_licensefile = d.getVar('COREBASE') + "/LICENSE"
534 for url in lic_files.split():
535 try:
536 (type, host, path, user, pswd, parm) = bb.fetch.decodeurl(url)
537 except bb.fetch.MalformedUrl:
538 sane &= package_qa_handle_error("license-checksum", pn + ": LIC_FILES_CHKSUM contains an invalid URL: " + url, d)
539 continue
540 srclicfile = os.path.join(srcdir, path)
541 if not os.path.isfile(srclicfile):
542 sane &= package_qa_handle_error("license-checksum", pn + ": LIC_FILES_CHKSUM points to an invalid file: " + srclicfile, d)
543 continue
544
545 if (srclicfile == corebase_licensefile):
546 bb.warn("${COREBASE}/LICENSE is not a valid license file, please use '${COMMON_LICENSE_DIR}/MIT' for a MIT License file in LIC_FILES_CHKSUM. This will become an error in the future")
547
548 recipemd5 = parm.get('md5', '')
549 beginline, endline = 0, 0
550 if 'beginline' in parm:
551 beginline = int(parm['beginline'])
552 if 'endline' in parm:
553 endline = int(parm['endline'])
554
555 if (not beginline) and (not endline):
556 md5chksum = bb.utils.md5_file(srclicfile)
557 with open(srclicfile, 'r', errors='replace') as f:
558 license = f.read().splitlines()
559 else:
560 with open(srclicfile, 'rb') as f:
561 import hashlib
562 lineno = 0
563 license = []
564 m = hashlib.md5()
565 for line in f:
566 lineno += 1
567 if (lineno >= beginline):
568 if ((lineno <= endline) or not endline):
569 m.update(line)
570 license.append(line.decode('utf-8', errors='replace').rstrip())
571 else:
572 break
573 md5chksum = m.hexdigest()
574 if recipemd5 == md5chksum:
575 bb.note (pn + ": md5 checksum matched for ", url)
576 else:
577 if recipemd5:
578 msg = pn + ": The LIC_FILES_CHKSUM does not match for " + url
579 msg = msg + "\n" + pn + ": The new md5 checksum is " + md5chksum
580 max_lines = int(d.getVar('QA_MAX_LICENSE_LINES') or 20)
581 if not license or license[-1] != '':
582 # Ensure that our license text ends with a line break
583 # (will be added with join() below).
584 license.append('')
585 remove = len(license) - max_lines
586 if remove > 0:
587 start = max_lines // 2
588 end = start + remove - 1
589 del license[start:end]
590 license.insert(start, '...')
591 msg = msg + "\n" + pn + ": Here is the selected license text:" + \
592 "\n" + \
593 "{:v^70}".format(" beginline=%d " % beginline if beginline else "") + \
594 "\n" + "\n".join(license) + \
595 "{:^^70}".format(" endline=%d " % endline if endline else "")
596 if beginline:
597 if endline:
598 srcfiledesc = "%s (lines %d through to %d)" % (srclicfile, beginline, endline)
599 else:
600 srcfiledesc = "%s (beginning on line %d)" % (srclicfile, beginline)
601 elif endline:
602 srcfiledesc = "%s (ending on line %d)" % (srclicfile, endline)
603 else:
604 srcfiledesc = srclicfile
605 msg = msg + "\n" + pn + ": Check if the license information has changed in %s to verify that the LICENSE value \"%s\" remains valid" % (srcfiledesc, lic)
606
607 else:
608 msg = pn + ": LIC_FILES_CHKSUM is not specified for " + url
609 msg = msg + "\n" + pn + ": The md5 checksum is " + md5chksum
610 sane &= package_qa_handle_error("license-checksum", msg, d)
611
612 if not sane:
613 bb.fatal("Fatal QA errors found, failing task.")
614}
615
616def qa_check_staged(path,d):
617 """
618 Check staged la and pc files for common problems like references to the work
619 directory.
620
621 As this is run after every stage we should be able to find the one
622 responsible for the errors easily even if we look at every .pc and .la file.
623 """
624
625 sane = True
626 tmpdir = d.getVar('TMPDIR')
627 workdir = os.path.join(tmpdir, "work")
628 recipesysroot = d.getVar("RECIPE_SYSROOT")
629
630 if bb.data.inherits_class("native", d) or bb.data.inherits_class("cross", d):
631 pkgconfigcheck = workdir
632 else:
633 pkgconfigcheck = tmpdir
634
635 skip = (d.getVar('INSANE_SKIP') or "").split()
636 skip_la = False
637 if 'la' in skip:
638 bb.note("Recipe %s skipping qa checking: la" % d.getVar('PN'))
639 skip_la = True
640
641 skip_pkgconfig = False
642 if 'pkgconfig' in skip:
643 bb.note("Recipe %s skipping qa checking: pkgconfig" % d.getVar('PN'))
644 skip_pkgconfig = True
645
646 # find all .la and .pc files
647 # read the content
648 # and check for stuff that looks wrong
649 for root, dirs, files in os.walk(path):
650 for file in files:
651 path = os.path.join(root,file)
652 if file.endswith(".la") and not skip_la:
653 with open(path) as f:
654 file_content = f.read()
655 file_content = file_content.replace(recipesysroot, "")
656 if workdir in file_content:
657 error_msg = "%s failed sanity test (workdir) in path %s" % (file,root)
658 sane &= package_qa_handle_error("la", error_msg, d)
659 elif file.endswith(".pc") and not skip_pkgconfig:
660 with open(path) as f:
661 file_content = f.read()
662 file_content = file_content.replace(recipesysroot, "")
663 if pkgconfigcheck in file_content:
664 error_msg = "%s failed sanity test (tmpdir) in path %s" % (file,root)
665 sane &= package_qa_handle_error("pkgconfig", error_msg, d)
666
667 return sane
668
669# Run all package-wide warnfuncs and errorfuncs
670def package_qa_package(warnfuncs, errorfuncs, package, d):
671 warnings = {}
672 errors = {}
673
674 for func in warnfuncs:
675 func(package, d, warnings)
676 for func in errorfuncs:
677 func(package, d, errors)
678
679 for w in warnings:
680 package_qa_handle_error(w, warnings[w], d)
681 for e in errors:
682 package_qa_handle_error(e, errors[e], d)
683
684 return len(errors) == 0
685
686# Run all recipe-wide warnfuncs and errorfuncs
687def package_qa_recipe(warnfuncs, errorfuncs, pn, d):
688 warnings = {}
689 errors = {}
690
691 for func in warnfuncs:
692 func(pn, d, warnings)
693 for func in errorfuncs:
694 func(pn, d, errors)
695
696 for w in warnings:
697 package_qa_handle_error(w, warnings[w], d)
698 for e in errors:
699 package_qa_handle_error(e, errors[e], d)
700
701 return len(errors) == 0
702
703# Walk over all files in a directory and call func
704def package_qa_walk(warnfuncs, errorfuncs, package, d):
705 import oe.qa
706
707 #if this will throw an exception, then fix the dict above
708 target_os = d.getVar('TARGET_OS')
709 target_arch = d.getVar('TARGET_ARCH')
710
711 warnings = {}
712 errors = {}
713 for path in pkgfiles[package]:
714 elf = None
715 if os.path.isfile(path):
716 elf = oe.qa.ELFFile(path)
717 try:
718 elf.open()
719 except oe.qa.NotELFFileError:
720 elf = None
721 for func in warnfuncs:
722 func(path, package, d, elf, warnings)
723 for func in errorfuncs:
724 func(path, package, d, elf, errors)
725
726 for w in warnings:
727 package_qa_handle_error(w, warnings[w], d)
728 for e in errors:
729 package_qa_handle_error(e, errors[e], d)
730
731def package_qa_check_rdepends(pkg, pkgdest, skip, taskdeps, packages, d):
732 # Don't do this check for kernel/module recipes, there aren't too many debug/development
733 # packages and you can get false positives e.g. on kernel-module-lirc-dev
734 if bb.data.inherits_class("kernel", d) or bb.data.inherits_class("module-base", d):
735 return
736
737 if not "-dbg" in pkg and not "packagegroup-" in pkg and not "-image" in pkg:
738 localdata = bb.data.createCopy(d)
739 localdata.setVar('OVERRIDES', localdata.getVar('OVERRIDES') + ':' + pkg)
740
741 # Now check the RDEPENDS
742 rdepends = bb.utils.explode_deps(localdata.getVar('RDEPENDS') or "")
743
744 # Now do the sanity check!!!
745 if "build-deps" not in skip:
746 for rdepend in rdepends:
747 if "-dbg" in rdepend and "debug-deps" not in skip:
748 error_msg = "%s rdepends on %s" % (pkg,rdepend)
749 package_qa_handle_error("debug-deps", error_msg, d)
750 if (not "-dev" in pkg and not "-staticdev" in pkg) and rdepend.endswith("-dev") and "dev-deps" not in skip:
751 error_msg = "%s rdepends on %s" % (pkg, rdepend)
752 package_qa_handle_error("dev-deps", error_msg, d)
753 if rdepend not in packages:
754 rdep_data = oe.packagedata.read_subpkgdata(rdepend, d)
755 if rdep_data and 'PN' in rdep_data and rdep_data['PN'] in taskdeps:
756 continue
757 if not rdep_data or not 'PN' in rdep_data:
758 pkgdata_dir = d.getVar("PKGDATA_DIR")
759 try:
760 possibles = os.listdir("%s/runtime-rprovides/%s/" % (pkgdata_dir, rdepend))
761 except OSError:
762 possibles = []
763 for p in possibles:
764 rdep_data = oe.packagedata.read_subpkgdata(p, d)
765 if rdep_data and 'PN' in rdep_data and rdep_data['PN'] in taskdeps:
766 break
767 if rdep_data and 'PN' in rdep_data and rdep_data['PN'] in taskdeps:
768 continue
769 if rdep_data and 'PN' in rdep_data:
770 error_msg = "%s rdepends on %s, but it isn't a build dependency, missing %s in DEPENDS or PACKAGECONFIG?" % (pkg, rdepend, rdep_data['PN'])
771 else:
772 error_msg = "%s rdepends on %s, but it isn't a build dependency?" % (pkg, rdepend)
773 package_qa_handle_error("build-deps", error_msg, d)
774
775 if "file-rdeps" not in skip:
776 ignored_file_rdeps = set(['/bin/sh', '/usr/bin/env', 'rtld(GNU_HASH)'])
777 if bb.data.inherits_class('nativesdk', d):
778 ignored_file_rdeps |= set(['/bin/bash', '/usr/bin/perl', 'perl'])
779 # For Saving the FILERDEPENDS
780 filerdepends = {}
781 rdep_data = oe.packagedata.read_subpkgdata(pkg, d)
782 for key in rdep_data:
783 if key.startswith("FILERDEPENDS_"):
784 for subkey in bb.utils.explode_deps(rdep_data[key]):
785 if subkey not in ignored_file_rdeps and \
786 not subkey.startswith('perl('):
787 # We already know it starts with FILERDEPENDS_
788 filerdepends[subkey] = key[13:]
789
790 if filerdepends:
791 done = rdepends[:]
792 # Add the rprovides of itself
793 if pkg not in done:
794 done.insert(0, pkg)
795
796 # The python is not a package, but python-core provides it, so
797 # skip checking /usr/bin/python if python is in the rdeps, in
798 # case there is a RDEPENDS_pkg = "python" in the recipe.
799 for py in [ d.getVar('MLPREFIX') + "python", "python" ]:
800 if py in done:
801 filerdepends.pop("/usr/bin/python",None)
802 done.remove(py)
803 for rdep in done:
804 # The file dependencies may contain package names, e.g.,
805 # perl
806 filerdepends.pop(rdep,None)
807
808 # For Saving the FILERPROVIDES, RPROVIDES and FILES_INFO
809 rdep_data = oe.packagedata.read_subpkgdata(rdep, d)
810 for key in rdep_data:
811 if key.startswith("FILERPROVIDES_") or key.startswith("RPROVIDES_"):
812 for subkey in bb.utils.explode_deps(rdep_data[key]):
813 filerdepends.pop(subkey,None)
814 # Add the files list to the rprovides
815 if key == "FILES_INFO":
816 # Use eval() to make it as a dict
817 for subkey in eval(rdep_data[key]):
818 filerdepends.pop(subkey,None)
819 if not filerdepends:
820 # Break if all the file rdepends are met
821 break
822 if filerdepends:
823 for key in filerdepends:
824 error_msg = "%s contained in package %s requires %s, but no providers found in RDEPENDS_%s?" % \
825 (filerdepends[key].replace("_%s" % pkg, "").replace("@underscore@", "_"), pkg, key, pkg)
826 package_qa_handle_error("file-rdeps", error_msg, d)
827package_qa_check_rdepends[vardepsexclude] = "OVERRIDES"
828
829def package_qa_check_deps(pkg, pkgdest, d):
830
831 localdata = bb.data.createCopy(d)
832 localdata.setVar('OVERRIDES', pkg)
833
834 def check_valid_deps(var):
835 try:
836 rvar = bb.utils.explode_dep_versions2(localdata.getVar(var) or "")
837 except ValueError as e:
838 bb.fatal("%s_%s: %s" % (var, pkg, e))
839 for dep in rvar:
840 for v in rvar[dep]:
841 if v and not v.startswith(('< ', '= ', '> ', '<= ', '>=')):
842 error_msg = "%s_%s is invalid: %s (%s) only comparisons <, =, >, <=, and >= are allowed" % (var, pkg, dep, v)
843 package_qa_handle_error("dep-cmp", error_msg, d)
844
845 check_valid_deps('RDEPENDS')
846 check_valid_deps('RRECOMMENDS')
847 check_valid_deps('RSUGGESTS')
848 check_valid_deps('RPROVIDES')
849 check_valid_deps('RREPLACES')
850 check_valid_deps('RCONFLICTS')
851
852QAPKGTEST[usrmerge] = "package_qa_check_usrmerge"
853def package_qa_check_usrmerge(pkg, d, messages):
854 pkgdest = d.getVar('PKGDEST')
855 pkg_dir = pkgdest + os.sep + pkg + os.sep
856 merged_dirs = ['bin', 'sbin', 'lib'] + d.getVar('MULTILIB_VARIANTS').split()
857 for f in merged_dirs:
858 if os.path.exists(pkg_dir + f) and not os.path.islink(pkg_dir + f):
859 msg = "%s package is not obeying usrmerge distro feature. /%s should be relocated to /usr." % (pkg, f)
860 package_qa_add_message(messages, "usrmerge", msg)
861 return False
862 return True
863
864QAPKGTEST[perllocalpod] = "package_qa_check_perllocalpod"
865def package_qa_check_perllocalpod(pkg, d, messages):
866 """
867 Check that the recipe didn't ship a perlocal.pod file, which shouldn't be
868 installed in a distribution package. cpan.bbclass sets NO_PERLLOCAL=1 to
869 handle this for most recipes.
870 """
871 import glob
872 pkgd = oe.path.join(d.getVar('PKGDEST'), pkg)
873 podpath = oe.path.join(pkgd, d.getVar("libdir"), "perl*", "*", "*", "perllocal.pod")
874
875 matches = glob.glob(podpath)
876 if matches:
877 matches = [package_qa_clean_path(path, d, pkg) for path in matches]
878 msg = "%s contains perllocal.pod (%s), should not be installed" % (pkg, " ".join(matches))
879 package_qa_add_message(messages, "perllocalpod", msg)
880
881QAPKGTEST[expanded-d] = "package_qa_check_expanded_d"
882def package_qa_check_expanded_d(package, d, messages):
883 """
884 Check for the expanded D (${D}) value in pkg_* and FILES
885 variables, warn the user to use it correctly.
886 """
887 sane = True
888 expanded_d = d.getVar('D')
889
890 for var in 'FILES','pkg_preinst', 'pkg_postinst', 'pkg_prerm', 'pkg_postrm':
891 bbvar = d.getVar(var + "_" + package) or ""
892 if expanded_d in bbvar:
893 if var == 'FILES':
894 package_qa_add_message(messages, "expanded-d", "FILES in %s recipe should not contain the ${D} variable as it references the local build directory not the target filesystem, best solution is to remove the ${D} reference" % package)
895 sane = False
896 else:
897 package_qa_add_message(messages, "expanded-d", "%s in %s recipe contains ${D}, it should be replaced by $D instead" % (var, package))
898 sane = False
899 return sane
900
901QAPKGTEST[unlisted-pkg-lics] = "package_qa_check_unlisted_pkg_lics"
902def package_qa_check_unlisted_pkg_lics(package, d, messages):
903 """
904 Check that all licenses for a package are among the licenses for the recipe.
905 """
906 pkg_lics = d.getVar('LICENSE_' + package)
907 if not pkg_lics:
908 return True
909
910 recipe_lics_set = oe.license.list_licenses(d.getVar('LICENSE'))
911 unlisted = oe.license.list_licenses(pkg_lics) - recipe_lics_set
912 if not unlisted:
913 return True
914
915 package_qa_add_message(messages, "unlisted-pkg-lics",
916 "LICENSE_%s includes licenses (%s) that are not "
917 "listed in LICENSE" % (package, ' '.join(unlisted)))
918 return False
919
920def package_qa_check_encoding(keys, encode, d):
921 def check_encoding(key, enc):
922 sane = True
923 value = d.getVar(key)
924 if value:
925 try:
926 s = value.encode(enc)
927 except UnicodeDecodeError as e:
928 error_msg = "%s has non %s characters" % (key,enc)
929 sane = False
930 package_qa_handle_error("invalid-chars", error_msg, d)
931 return sane
932
933 for key in keys:
934 sane = check_encoding(key, encode)
935 if not sane:
936 break
937
938HOST_USER_UID := "${@os.getuid()}"
939HOST_USER_GID := "${@os.getgid()}"
940
941QAPATHTEST[host-user-contaminated] = "package_qa_check_host_user"
942def package_qa_check_host_user(path, name, d, elf, messages):
943 """Check for paths outside of /home which are owned by the user running bitbake."""
944
945 if not os.path.lexists(path):
946 return
947
948 dest = d.getVar('PKGDEST')
949 pn = d.getVar('PN')
950 home = os.path.join(dest, 'home')
951 if path == home or path.startswith(home + os.sep):
952 return
953
954 try:
955 stat = os.lstat(path)
956 except OSError as exc:
957 import errno
958 if exc.errno != errno.ENOENT:
959 raise
960 else:
961 check_uid = int(d.getVar('HOST_USER_UID'))
962 if stat.st_uid == check_uid:
963 package_qa_add_message(messages, "host-user-contaminated", "%s: %s is owned by uid %d, which is the same as the user running bitbake. This may be due to host contamination" % (pn, package_qa_clean_path(path, d, name), check_uid))
964 return False
965
966 check_gid = int(d.getVar('HOST_USER_GID'))
967 if stat.st_gid == check_gid:
968 package_qa_add_message(messages, "host-user-contaminated", "%s: %s is owned by gid %d, which is the same as the user running bitbake. This may be due to host contamination" % (pn, package_qa_clean_path(path, d, name), check_gid))
969 return False
970 return True
971
972QARECIPETEST[src-uri-bad] = "package_qa_check_src_uri"
973def package_qa_check_src_uri(pn, d, messages):
974 import re
975
976 if "${PN}" in d.getVar("SRC_URI", False):
977 package_qa_handle_error("src-uri-bad", "%s: SRC_URI uses PN not BPN" % pn, d)
978
979 for url in d.getVar("SRC_URI").split():
980 if re.search(r"git(hu|la)b\.com/.+/.+/archive/.+", url):
981 package_qa_handle_error("src-uri-bad", "%s: SRC_URI uses unstable GitHub/GitLab archives, convert recipe to use git protocol" % pn, d)
982
983QARECIPETEST[unhandled-features-check] = "package_qa_check_unhandled_features_check"
984def package_qa_check_unhandled_features_check(pn, d, messages):
985 if not bb.data.inherits_class('features_check', d):
986 var_set = False
987 for kind in ['DISTRO', 'MACHINE', 'COMBINED']:
988 for var in ['ANY_OF_' + kind + '_FEATURES', 'REQUIRED_' + kind + '_FEATURES', 'CONFLICT_' + kind + '_FEATURES']:
989 if d.getVar(var) is not None or d.overridedata.get(var) is not None:
990 var_set = True
991 if var_set:
992 package_qa_handle_error("unhandled-features-check", "%s: recipe doesn't inherit features_check" % pn, d)
993
994QARECIPETEST[missing-update-alternatives] = "package_qa_check_missing_update_alternatives"
995def package_qa_check_missing_update_alternatives(pn, d, messages):
996 # Look at all packages and find out if any of those sets ALTERNATIVE variable
997 # without inheriting update-alternatives class
998 for pkg in (d.getVar('PACKAGES') or '').split():
999 if d.getVar('ALTERNATIVE_%s' % pkg) and not bb.data.inherits_class('update-alternatives', d):
1000 package_qa_handle_error("missing-update-alternatives", "%s: recipe defines ALTERNATIVE_%s but doesn't inherit update-alternatives. This might fail during do_rootfs later!" % (pn, pkg), d)
1001
1002# The PACKAGE FUNC to scan each package
1003python do_package_qa () {
1004 import subprocess
1005 import oe.packagedata
1006
1007 bb.note("DO PACKAGE QA")
1008
1009 bb.build.exec_func("read_subpackage_metadata", d)
1010
1011 # Check non UTF-8 characters on recipe's metadata
1012 package_qa_check_encoding(['DESCRIPTION', 'SUMMARY', 'LICENSE', 'SECTION'], 'utf-8', d)
1013
1014 logdir = d.getVar('T')
1015 pn = d.getVar('PN')
1016
1017 # Check the compile log for host contamination
1018 compilelog = os.path.join(logdir,"log.do_compile")
1019
1020 if os.path.exists(compilelog):
1021 statement = "grep -e 'CROSS COMPILE Badness:' -e 'is unsafe for cross-compilation' %s > /dev/null" % compilelog
1022 if subprocess.call(statement, shell=True) == 0:
1023 msg = "%s: The compile log indicates that host include and/or library paths were used.\n \
1024 Please check the log '%s' for more information." % (pn, compilelog)
1025 package_qa_handle_error("compile-host-path", msg, d)
1026
1027 # Check the install log for host contamination
1028 installlog = os.path.join(logdir,"log.do_install")
1029
1030 if os.path.exists(installlog):
1031 statement = "grep -e 'CROSS COMPILE Badness:' -e 'is unsafe for cross-compilation' %s > /dev/null" % installlog
1032 if subprocess.call(statement, shell=True) == 0:
1033 msg = "%s: The install log indicates that host include and/or library paths were used.\n \
1034 Please check the log '%s' for more information." % (pn, installlog)
1035 package_qa_handle_error("install-host-path", msg, d)
1036
1037 # Scan the packages...
1038 pkgdest = d.getVar('PKGDEST')
1039 packages = set((d.getVar('PACKAGES') or '').split())
1040
1041 global pkgfiles
1042 pkgfiles = {}
1043 for pkg in packages:
1044 pkgfiles[pkg] = []
1045 pkgdir = os.path.join(pkgdest, pkg)
1046 for walkroot, dirs, files in os.walk(pkgdir):
1047 # Don't walk into top-level CONTROL or DEBIAN directories as these
1048 # are temporary directories created by do_package.
1049 if walkroot == pkgdir:
1050 for control in ("CONTROL", "DEBIAN"):
1051 if control in dirs:
1052 dirs.remove(control)
1053 for file in files:
1054 pkgfiles[pkg].append(os.path.join(walkroot, file))
1055
1056 # no packages should be scanned
1057 if not packages:
1058 return
1059
1060 import re
1061 # The package name matches the [a-z0-9.+-]+ regular expression
1062 pkgname_pattern = re.compile(r"^[a-z0-9.+-]+$")
1063
1064 taskdepdata = d.getVar("BB_TASKDEPDATA", False)
1065 taskdeps = set()
1066 for dep in taskdepdata:
1067 taskdeps.add(taskdepdata[dep][0])
1068
1069 def parse_test_matrix(matrix_name):
1070 testmatrix = d.getVarFlags(matrix_name) or {}
1071 g = globals()
1072 warnchecks = []
1073 for w in (d.getVar("WARN_QA") or "").split():
1074 if w in skip:
1075 continue
1076 if w in testmatrix and testmatrix[w] in g:
1077 warnchecks.append(g[testmatrix[w]])
1078
1079 errorchecks = []
1080 for e in (d.getVar("ERROR_QA") or "").split():
1081 if e in skip:
1082 continue
1083 if e in testmatrix and testmatrix[e] in g:
1084 errorchecks.append(g[testmatrix[e]])
1085 return warnchecks, errorchecks
1086
1087 for package in packages:
1088 skip = set((d.getVar('INSANE_SKIP') or "").split() +
1089 (d.getVar('INSANE_SKIP_' + package) or "").split())
1090 if skip:
1091 bb.note("Package %s skipping QA tests: %s" % (package, str(skip)))
1092
1093 bb.note("Checking Package: %s" % package)
1094 # Check package name
1095 if not pkgname_pattern.match(package):
1096 package_qa_handle_error("pkgname",
1097 "%s doesn't match the [a-z0-9.+-]+ regex" % package, d)
1098
1099 warn_checks, error_checks = parse_test_matrix("QAPATHTEST")
1100 package_qa_walk(warn_checks, error_checks, package, d)
1101
1102 warn_checks, error_checks = parse_test_matrix("QAPKGTEST")
1103 package_qa_package(warn_checks, error_checks, package, d)
1104
1105 package_qa_check_rdepends(package, pkgdest, skip, taskdeps, packages, d)
1106 package_qa_check_deps(package, pkgdest, d)
1107
1108 warn_checks, error_checks = parse_test_matrix("QARECIPETEST")
1109 package_qa_recipe(warn_checks, error_checks, pn, d)
1110
1111 if 'libdir' in d.getVar("ALL_QA").split():
1112 package_qa_check_libdir(d)
1113
1114 qa_sane = d.getVar("QA_SANE")
1115 if not qa_sane:
1116 bb.fatal("QA run found fatal errors. Please consider fixing them.")
1117 bb.note("DONE with PACKAGE QA")
1118}
1119
1120# binutils is used for most checks, so need to set as dependency
1121# POPULATESYSROOTDEPS is defined in staging class.
1122do_package_qa[depends] += "${POPULATESYSROOTDEPS}"
1123do_package_qa[vardepsexclude] = "BB_TASKDEPDATA"
1124do_package_qa[rdeptask] = "do_packagedata"
1125addtask do_package_qa after do_packagedata do_package before do_build
1126
1127# Add the package specific INSANE_SKIPs to the sstate dependencies
1128python() {
1129 pkgs = (d.getVar('PACKAGES') or '').split()
1130 for pkg in pkgs:
1131 d.appendVarFlag("do_package_qa", "vardeps", " INSANE_SKIP_{}".format(pkg))
1132}
1133
1134SSTATETASKS += "do_package_qa"
1135do_package_qa[sstate-inputdirs] = ""
1136do_package_qa[sstate-outputdirs] = ""
1137python do_package_qa_setscene () {
1138 sstate_setscene(d)
1139}
1140addtask do_package_qa_setscene
1141
1142python do_qa_staging() {
1143 bb.note("QA checking staging")
1144 if not qa_check_staged(d.expand('${SYSROOT_DESTDIR}${libdir}'), d):
1145 bb.fatal("QA staging was broken by the package built above")
1146}
1147
1148python do_qa_patch() {
1149 import subprocess
1150
1151 ###########################################################################
1152 # Check patch.log for fuzz warnings
1153 #
1154 # Further information on why we check for patch fuzz warnings:
1155 # http://lists.openembedded.org/pipermail/openembedded-core/2018-March/148675.html
1156 # https://bugzilla.yoctoproject.org/show_bug.cgi?id=10450
1157 ###########################################################################
1158
1159 logdir = d.getVar('T')
1160 patchlog = os.path.join(logdir,"log.do_patch")
1161
1162 if os.path.exists(patchlog):
1163 fuzzheader = '--- Patch fuzz start ---'
1164 fuzzfooter = '--- Patch fuzz end ---'
1165 statement = "grep -e '%s' %s > /dev/null" % (fuzzheader, patchlog)
1166 if subprocess.call(statement, shell=True) == 0:
1167 msg = "Fuzz detected:\n\n"
1168 fuzzmsg = ""
1169 inFuzzInfo = False
1170 f = open(patchlog, "r")
1171 for line in f:
1172 if fuzzheader in line:
1173 inFuzzInfo = True
1174 fuzzmsg = ""
1175 elif fuzzfooter in line:
1176 fuzzmsg = fuzzmsg.replace('\n\n', '\n')
1177 msg += fuzzmsg
1178 msg += "\n"
1179 inFuzzInfo = False
1180 elif inFuzzInfo and not 'Now at patch' in line:
1181 fuzzmsg += line
1182 f.close()
1183 msg += "The context lines in the patches can be updated with devtool:\n"
1184 msg += "\n"
1185 msg += " devtool modify %s\n" % d.getVar('PN')
1186 msg += " devtool finish --force-patch-refresh %s <layer_path>\n\n" % d.getVar('PN')
1187 msg += "Don't forget to review changes done by devtool!\n"
1188 if 'patch-fuzz' in d.getVar('ERROR_QA'):
1189 bb.error(msg)
1190 elif 'patch-fuzz' in d.getVar('WARN_QA'):
1191 bb.warn(msg)
1192 msg = "Patch log indicates that patches do not apply cleanly."
1193 package_qa_handle_error("patch-fuzz", msg, d)
1194}
1195
1196python do_qa_configure() {
1197 import subprocess
1198
1199 ###########################################################################
1200 # Check config.log for cross compile issues
1201 ###########################################################################
1202
1203 configs = []
1204 workdir = d.getVar('WORKDIR')
1205
1206 skip = (d.getVar('INSANE_SKIP') or "").split()
1207 skip_configure_unsafe = False
1208 if 'configure-unsafe' in skip:
1209 bb.note("Recipe %s skipping qa checking: configure-unsafe" % d.getVar('PN'))
1210 skip_configure_unsafe = True
1211
1212 if bb.data.inherits_class('autotools', d) and not skip_configure_unsafe:
1213 bb.note("Checking autotools environment for common misconfiguration")
1214 for root, dirs, files in os.walk(workdir):
1215 statement = "grep -q -F -e 'CROSS COMPILE Badness:' -e 'is unsafe for cross-compilation' %s" % \
1216 os.path.join(root,"config.log")
1217 if "config.log" in files:
1218 if subprocess.call(statement, shell=True) == 0:
1219 error_msg = """This autoconf log indicates errors, it looked at host include and/or library paths while determining system capabilities.
1220Rerun configure task after fixing this."""
1221 package_qa_handle_error("configure-unsafe", error_msg, d)
1222
1223 if "configure.ac" in files:
1224 configs.append(os.path.join(root,"configure.ac"))
1225 if "configure.in" in files:
1226 configs.append(os.path.join(root, "configure.in"))
1227
1228 ###########################################################################
1229 # Check gettext configuration and dependencies are correct
1230 ###########################################################################
1231
1232 skip_configure_gettext = False
1233 if 'configure-gettext' in skip:
1234 bb.note("Recipe %s skipping qa checking: configure-gettext" % d.getVar('PN'))
1235 skip_configure_gettext = True
1236
1237 cnf = d.getVar('EXTRA_OECONF') or ""
1238 if not ("gettext" in d.getVar('P') or "gcc-runtime" in d.getVar('P') or \
1239 "--disable-nls" in cnf or skip_configure_gettext):
1240 ml = d.getVar("MLPREFIX") or ""
1241 if bb.data.inherits_class('cross-canadian', d):
1242 gt = "nativesdk-gettext"
1243 else:
1244 gt = "gettext-native"
1245 deps = bb.utils.explode_deps(d.getVar('DEPENDS') or "")
1246 if gt not in deps:
1247 for config in configs:
1248 gnu = "grep \"^[[:space:]]*AM_GNU_GETTEXT\" %s >/dev/null" % config
1249 if subprocess.call(gnu, shell=True) == 0:
1250 error_msg = "AM_GNU_GETTEXT used but no inherit gettext"
1251 package_qa_handle_error("configure-gettext", error_msg, d)
1252
1253 ###########################################################################
1254 # Check unrecognised configure options (with a white list)
1255 ###########################################################################
1256 if bb.data.inherits_class("autotools", d) or bb.data.inherits_class("meson", d):
1257 bb.note("Checking configure output for unrecognised options")
1258 try:
1259 if bb.data.inherits_class("autotools", d):
1260 flag = "WARNING: unrecognized options:"
1261 log = os.path.join(d.getVar('B'), 'config.log')
1262 if bb.data.inherits_class("meson", d):
1263 flag = "WARNING: Unknown options:"
1264 log = os.path.join(d.getVar('T'), 'log.do_configure')
1265 output = subprocess.check_output(['grep', '-F', flag, log]).decode("utf-8").replace(', ', ' ').replace('"', '')
1266 options = set()
1267 for line in output.splitlines():
1268 options |= set(line.partition(flag)[2].split())
1269 whitelist = set(d.getVar("UNKNOWN_CONFIGURE_WHITELIST").split())
1270 options -= whitelist
1271 if options:
1272 pn = d.getVar('PN')
1273 error_msg = pn + ": configure was passed unrecognised options: " + " ".join(options)
1274 package_qa_handle_error("unknown-configure-option", error_msg, d)
1275 except subprocess.CalledProcessError:
1276 pass
1277
1278 # Check invalid PACKAGECONFIG
1279 pkgconfig = (d.getVar("PACKAGECONFIG") or "").split()
1280 if pkgconfig:
1281 pkgconfigflags = d.getVarFlags("PACKAGECONFIG") or {}
1282 for pconfig in pkgconfig:
1283 if pconfig not in pkgconfigflags:
1284 pn = d.getVar('PN')
1285 error_msg = "%s: invalid PACKAGECONFIG: %s" % (pn, pconfig)
1286 package_qa_handle_error("invalid-packageconfig", error_msg, d)
1287
1288 qa_sane = d.getVar("QA_SANE")
1289 if not qa_sane:
1290 bb.fatal("Fatal QA errors found, failing task.")
1291}
1292
1293python do_qa_unpack() {
1294 src_uri = d.getVar('SRC_URI')
1295 s_dir = d.getVar('S')
1296 if src_uri and not os.path.exists(s_dir):
1297 bb.warn('%s: the directory %s (%s) pointed to by the S variable doesn\'t exist - please set S within the recipe to point to where the source has been unpacked to' % (d.getVar('PN'), d.getVar('S', False), s_dir))
1298}
1299
1300# The Staging Func, to check all staging
1301#addtask qa_staging after do_populate_sysroot before do_build
1302do_populate_sysroot[postfuncs] += "do_qa_staging "
1303
1304# Check for patch fuzz
1305do_patch[postfuncs] += "do_qa_patch "
1306
1307# Check broken config.log files, for packages requiring Gettext which
1308# don't have it in DEPENDS.
1309#addtask qa_configure after do_configure before do_compile
1310do_configure[postfuncs] += "do_qa_configure "
1311
1312# Check does S exist.
1313do_unpack[postfuncs] += "do_qa_unpack"
1314
1315python () {
1316 import re
1317
1318 tests = d.getVar('ALL_QA').split()
1319 if "desktop" in tests:
1320 d.appendVar("PACKAGE_DEPENDS", " desktop-file-utils-native")
1321
1322 ###########################################################################
1323 # Check various variables
1324 ###########################################################################
1325
1326 # Checking ${FILESEXTRAPATHS}
1327 extrapaths = (d.getVar("FILESEXTRAPATHS") or "")
1328 if '__default' not in extrapaths.split(":"):
1329 msg = "FILESEXTRAPATHS-variable, must always use _prepend (or _append)\n"
1330 msg += "type of assignment, and don't forget the colon.\n"
1331 msg += "Please assign it with the format of:\n"
1332 msg += " FILESEXTRAPATHS_append := \":${THISDIR}/Your_Files_Path\" or\n"
1333 msg += " FILESEXTRAPATHS_prepend := \"${THISDIR}/Your_Files_Path:\"\n"
1334 msg += "in your bbappend file\n\n"
1335 msg += "Your incorrect assignment is:\n"
1336 msg += "%s\n" % extrapaths
1337 bb.warn(msg)
1338
1339 overrides = d.getVar('OVERRIDES').split(':')
1340 pn = d.getVar('PN')
1341 if pn in overrides:
1342 msg = 'Recipe %s has PN of "%s" which is in OVERRIDES, this can result in unexpected behaviour.' % (d.getVar("FILE"), pn)
1343 package_qa_handle_error("pn-overrides", msg, d)
1344 prog = re.compile(r'[A-Z]')
1345 if prog.search(pn):
1346 package_qa_handle_error("uppercase-pn", 'PN: %s is upper case, this can result in unexpected behavior.' % pn, d)
1347
1348 # Some people mistakenly use DEPENDS_${PN} instead of DEPENDS and wonder
1349 # why it doesn't work.
1350 if (d.getVar(d.expand('DEPENDS_${PN}'))):
1351 package_qa_handle_error("pkgvarcheck", "recipe uses DEPENDS_${PN}, should use DEPENDS", d)
1352
1353 issues = []
1354 if (d.getVar('PACKAGES') or "").split():
1355 for dep in (d.getVar('QADEPENDS') or "").split():
1356 d.appendVarFlag('do_package_qa', 'depends', " %s:do_populate_sysroot" % dep)
1357 for var in 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RCONFLICTS', 'RPROVIDES', 'RREPLACES', 'FILES', 'pkg_preinst', 'pkg_postinst', 'pkg_prerm', 'pkg_postrm', 'ALLOW_EMPTY':
1358 if d.getVar(var, False):
1359 issues.append(var)
1360
1361 fakeroot_tests = d.getVar('FAKEROOT_QA').split()
1362 if set(tests) & set(fakeroot_tests):
1363 d.setVarFlag('do_package_qa', 'fakeroot', '1')
1364 d.appendVarFlag('do_package_qa', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
1365 else:
1366 d.setVarFlag('do_package_qa', 'rdeptask', '')
1367 for i in issues:
1368 package_qa_handle_error("pkgvarcheck", "%s: Variable %s is set as not being package specific, please fix this." % (d.getVar("FILE"), i), d)
1369
1370 if 'native-last' not in (d.getVar('INSANE_SKIP') or "").split():
1371 for native_class in ['native', 'nativesdk']:
1372 if bb.data.inherits_class(native_class, d):
1373
1374 inherited_classes = d.getVar('__inherit_cache', False) or []
1375 needle = os.path.join('classes', native_class)
1376
1377 bbclassextend = (d.getVar('BBCLASSEXTEND') or '').split()
1378 # BBCLASSEXTEND items are always added in the end
1379 skip_classes = bbclassextend
1380 if bb.data.inherits_class('native', d) or 'native' in bbclassextend:
1381 # native also inherits nopackages and relocatable bbclasses
1382 skip_classes.extend(['nopackages', 'relocatable'])
1383
1384 broken_order = []
1385 for class_item in reversed(inherited_classes):
1386 if needle not in class_item:
1387 for extend_item in skip_classes:
1388 if os.path.join('classes', '%s.bbclass' % extend_item) in class_item:
1389 break
1390 else:
1391 pn = d.getVar('PN')
1392 broken_order.append(os.path.basename(class_item))
1393 else:
1394 break
1395 if broken_order:
1396 package_qa_handle_error("native-last", "%s: native/nativesdk class is not inherited last, this can result in unexpected behaviour. "
1397 "Classes inherited after native/nativesdk: %s" % (pn, " ".join(broken_order)), d)
1398
1399
1400 qa_sane = d.getVar("QA_SANE")
1401 if not qa_sane:
1402 bb.fatal("Fatal QA errors found, failing task.")
1403}
diff --git a/meta/classes/kernel-arch.bbclass b/meta/classes/kernel-arch.bbclass
deleted file mode 100644
index 07ec242e63..0000000000
--- a/meta/classes/kernel-arch.bbclass
+++ /dev/null
@@ -1,68 +0,0 @@
1#
2# set the ARCH environment variable for kernel compilation (including
3# modules). return value must match one of the architecture directories
4# in the kernel source "arch" directory
5#
6
7valid_archs = "alpha cris ia64 \
8 i386 x86 \
9 m68knommu m68k ppc powerpc powerpc64 ppc64 \
10 sparc sparc64 \
11 arm aarch64 \
12 m32r mips \
13 sh sh64 um h8300 \
14 parisc s390 v850 \
15 avr32 blackfin \
16 microblaze \
17 nios2 arc riscv xtensa"
18
19def map_kernel_arch(a, d):
20 import re
21
22 valid_archs = d.getVar('valid_archs').split()
23
24 if re.match('(i.86|athlon|x86.64)$', a): return 'x86'
25 elif re.match('arceb$', a): return 'arc'
26 elif re.match('armeb$', a): return 'arm'
27 elif re.match('aarch64$', a): return 'arm64'
28 elif re.match('aarch64_be$', a): return 'arm64'
29 elif re.match('aarch64_ilp32$', a): return 'arm64'
30 elif re.match('aarch64_be_ilp32$', a): return 'arm64'
31 elif re.match('mips(isa|)(32|64|)(r6|)(el|)$', a): return 'mips'
32 elif re.match('mcf', a): return 'm68k'
33 elif re.match('riscv(32|64|)(eb|)$', a): return 'riscv'
34 elif re.match('p(pc|owerpc)(|64)', a): return 'powerpc'
35 elif re.match('sh(3|4)$', a): return 'sh'
36 elif re.match('bfin', a): return 'blackfin'
37 elif re.match('microblazee[bl]', a): return 'microblaze'
38 elif a in valid_archs: return a
39 else:
40 if not d.getVar("TARGET_OS").startswith("linux"):
41 return a
42 bb.error("cannot map '%s' to a linux kernel architecture" % a)
43
44export ARCH = "${@map_kernel_arch(d.getVar('TARGET_ARCH'), d)}"
45
46def map_uboot_arch(a, d):
47 import re
48
49 if re.match('p(pc|owerpc)(|64)', a): return 'ppc'
50 elif re.match('i.86$', a): return 'x86'
51 return a
52
53export UBOOT_ARCH = "${@map_uboot_arch(d.getVar('ARCH'), d)}"
54
55# Set TARGET_??_KERNEL_ARCH in the machine .conf to set architecture
56# specific options necessary for building the kernel and modules.
57TARGET_CC_KERNEL_ARCH ?= ""
58HOST_CC_KERNEL_ARCH ?= "${TARGET_CC_KERNEL_ARCH}"
59TARGET_LD_KERNEL_ARCH ?= ""
60HOST_LD_KERNEL_ARCH ?= "${TARGET_LD_KERNEL_ARCH}"
61TARGET_AR_KERNEL_ARCH ?= ""
62HOST_AR_KERNEL_ARCH ?= "${TARGET_AR_KERNEL_ARCH}"
63
64KERNEL_CC = "${CCACHE}${HOST_PREFIX}gcc ${HOST_CC_KERNEL_ARCH} -fuse-ld=bfd ${DEBUG_PREFIX_MAP} -fdebug-prefix-map=${STAGING_KERNEL_DIR}=${KERNEL_SRC_PATH}"
65KERNEL_LD = "${CCACHE}${HOST_PREFIX}ld.bfd ${HOST_LD_KERNEL_ARCH}"
66KERNEL_AR = "${CCACHE}${HOST_PREFIX}ar ${HOST_AR_KERNEL_ARCH}"
67TOOLCHAIN = "gcc"
68
diff --git a/meta/classes/kernel-artifact-names.bbclass b/meta/classes/kernel-artifact-names.bbclass
deleted file mode 100644
index a65cdddb3e..0000000000
--- a/meta/classes/kernel-artifact-names.bbclass
+++ /dev/null
@@ -1,26 +0,0 @@
1##################################################################
2# Specific kernel creation info
3# for recipes/bbclasses which need to reuse some of the kernel
4# artifacts, but aren't kernel recipes themselves
5##################################################################
6
7inherit image-artifact-names
8
9KERNEL_ARTIFACT_NAME ?= "${PKGE}-${PKGV}-${PKGR}-${MACHINE}${IMAGE_VERSION_SUFFIX}"
10KERNEL_ARTIFACT_LINK_NAME ?= "${MACHINE}"
11
12KERNEL_IMAGE_NAME ?= "${KERNEL_ARTIFACT_NAME}"
13KERNEL_IMAGE_LINK_NAME ?= "${KERNEL_ARTIFACT_LINK_NAME}"
14
15KERNEL_DTB_NAME ?= "${KERNEL_ARTIFACT_NAME}"
16KERNEL_DTB_LINK_NAME ?= "${KERNEL_ARTIFACT_LINK_NAME}"
17
18KERNEL_FIT_NAME ?= "${KERNEL_ARTIFACT_NAME}"
19KERNEL_FIT_LINK_NAME ?= "${KERNEL_ARTIFACT_LINK_NAME}"
20
21MODULE_TARBALL_NAME ?= "${KERNEL_ARTIFACT_NAME}"
22MODULE_TARBALL_LINK_NAME ?= "${KERNEL_ARTIFACT_LINK_NAME}"
23MODULE_TARBALL_DEPLOY ?= "1"
24
25INITRAMFS_NAME ?= "initramfs-${KERNEL_ARTIFACT_NAME}"
26INITRAMFS_LINK_NAME ?= "initramfs-${KERNEL_ARTIFACT_LINK_NAME}"
diff --git a/meta/classes/kernel-devicetree.bbclass b/meta/classes/kernel-devicetree.bbclass
deleted file mode 100644
index d4f8864200..0000000000
--- a/meta/classes/kernel-devicetree.bbclass
+++ /dev/null
@@ -1,102 +0,0 @@
1# Support for device tree generation
2PACKAGES_append = " \
3 ${KERNEL_PACKAGE_NAME}-devicetree \
4 ${@[d.getVar('KERNEL_PACKAGE_NAME') + '-image-zimage-bundle', ''][d.getVar('KERNEL_DEVICETREE_BUNDLE') != '1']} \
5"
6FILES_${KERNEL_PACKAGE_NAME}-devicetree = "/${KERNEL_IMAGEDEST}/*.dtb /${KERNEL_IMAGEDEST}/*.dtbo"
7FILES_${KERNEL_PACKAGE_NAME}-image-zimage-bundle = "/${KERNEL_IMAGEDEST}/zImage-*.dtb.bin"
8
9# Generate kernel+devicetree bundle
10KERNEL_DEVICETREE_BUNDLE ?= "0"
11
12# dtc flags passed via DTC_FLAGS env variable
13KERNEL_DTC_FLAGS ?= ""
14
15normalize_dtb () {
16 dtb="$1"
17 if echo $dtb | grep -q '/dts/'; then
18 bbwarn "$dtb contains the full path to the the dts file, but only the dtb name should be used."
19 dtb=`basename $dtb | sed 's,\.dts$,.dtb,g'`
20 fi
21 echo "$dtb"
22}
23
24get_real_dtb_path_in_kernel () {
25 dtb="$1"
26 dtb_path="${B}/arch/${ARCH}/boot/dts/$dtb"
27 if [ ! -e "$dtb_path" ]; then
28 dtb_path="${B}/arch/${ARCH}/boot/$dtb"
29 fi
30 echo "$dtb_path"
31}
32
33do_configure_append() {
34 if [ "${KERNEL_DEVICETREE_BUNDLE}" = "1" ]; then
35 if echo ${KERNEL_IMAGETYPE_FOR_MAKE} | grep -q 'zImage'; then
36 case "${ARCH}" in
37 "arm")
38 config="${B}/.config"
39 if ! grep -q 'CONFIG_ARM_APPENDED_DTB=y' $config; then
40 bbwarn 'CONFIG_ARM_APPENDED_DTB is NOT enabled in the kernel. Enabling it to allow the kernel to boot with the Device Tree appended!'
41 sed -i "/CONFIG_ARM_APPENDED_DTB[ =]/d" $config
42 echo "CONFIG_ARM_APPENDED_DTB=y" >> $config
43 echo "# CONFIG_ARM_ATAG_DTB_COMPAT is not set" >> $config
44 fi
45 ;;
46 *)
47 bberror "KERNEL_DEVICETREE_BUNDLE is not supported for ${ARCH}. Currently it is only supported for 'ARM'."
48 esac
49 else
50 bberror 'The KERNEL_DEVICETREE_BUNDLE requires the KERNEL_IMAGETYPE to contain zImage.'
51 fi
52 fi
53}
54
55do_compile_append() {
56 if [ -n "${KERNEL_DTC_FLAGS}" ]; then
57 export DTC_FLAGS="${KERNEL_DTC_FLAGS}"
58 fi
59
60 for dtbf in ${KERNEL_DEVICETREE}; do
61 dtb=`normalize_dtb "$dtbf"`
62 oe_runmake $dtb CC="${KERNEL_CC} $cc_extra " LD="${KERNEL_LD}" ${KERNEL_EXTRA_ARGS}
63 done
64}
65
66do_install_append() {
67 for dtbf in ${KERNEL_DEVICETREE}; do
68 dtb=`normalize_dtb "$dtbf"`
69 dtb_ext=${dtb##*.}
70 dtb_base_name=`basename $dtb .$dtb_ext`
71 dtb_path=`get_real_dtb_path_in_kernel "$dtb"`
72 install -m 0644 $dtb_path ${D}/${KERNEL_IMAGEDEST}/$dtb_base_name.$dtb_ext
73 done
74}
75
76do_deploy_append() {
77 for dtbf in ${KERNEL_DEVICETREE}; do
78 dtb=`normalize_dtb "$dtbf"`
79 dtb_ext=${dtb##*.}
80 dtb_base_name=`basename $dtb .$dtb_ext`
81 install -d $deployDir
82 install -m 0644 ${D}/${KERNEL_IMAGEDEST}/$dtb_base_name.$dtb_ext $deployDir/$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext
83 ln -sf $dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext $deployDir/$dtb_base_name.$dtb_ext
84 ln -sf $dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext $deployDir/$dtb_base_name-${KERNEL_DTB_LINK_NAME}.$dtb_ext
85 for type in ${KERNEL_IMAGETYPE_FOR_MAKE}; do
86 if [ "$type" = "zImage" ] && [ "${KERNEL_DEVICETREE_BUNDLE}" = "1" ]; then
87 cat ${D}/${KERNEL_IMAGEDEST}/$type \
88 $deployDir/$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext \
89 > $deployDir/$type-$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext.bin
90 ln -sf $type-$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext.bin \
91 $deployDir/$type-$dtb_base_name-${KERNEL_DTB_LINK_NAME}.$dtb_ext.bin
92 if [ -e "${KERNEL_OUTPUT_DIR}/${type}.initramfs" ]; then
93 cat ${KERNEL_OUTPUT_DIR}/${type}.initramfs \
94 $deployDir/$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext \
95 > $deployDir/${type}-${INITRAMFS_NAME}-$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext.bin
96 ln -sf ${type}-${INITRAMFS_NAME}-$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext.bin \
97 $deployDir/${type}-${INITRAMFS_NAME}-$dtb_base_name-${KERNEL_DTB_LINK_NAME}.$dtb_ext.bin
98 fi
99 fi
100 done
101 done
102}
diff --git a/meta/classes/kernel-fitimage.bbclass b/meta/classes/kernel-fitimage.bbclass
deleted file mode 100644
index f5082c93df..0000000000
--- a/meta/classes/kernel-fitimage.bbclass
+++ /dev/null
@@ -1,772 +0,0 @@
1inherit kernel-uboot kernel-artifact-names uboot-sign
2
3KERNEL_IMAGETYPE_REPLACEMENT = ""
4
5python __anonymous () {
6 kerneltypes = d.getVar('KERNEL_IMAGETYPES') or ""
7 if 'fitImage' in kerneltypes.split():
8 depends = d.getVar("DEPENDS")
9 depends = "%s u-boot-tools-native dtc-native" % depends
10 d.setVar("DEPENDS", depends)
11
12 uarch = d.getVar("UBOOT_ARCH")
13 if uarch == "arm64":
14 replacementtype = "Image"
15 elif uarch == "riscv":
16 replacementtype = "Image"
17 elif uarch == "mips":
18 replacementtype = "vmlinuz.bin"
19 elif uarch == "x86":
20 replacementtype = "bzImage"
21 elif uarch == "microblaze":
22 replacementtype = "linux.bin"
23 else:
24 replacementtype = "zImage"
25
26 d.setVar("KERNEL_IMAGETYPE_REPLACEMENT", replacementtype)
27
28 # Override KERNEL_IMAGETYPE_FOR_MAKE variable, which is internal
29 # to kernel.bbclass . We have to override it, since we pack zImage
30 # (at least for now) into the fitImage .
31 typeformake = d.getVar("KERNEL_IMAGETYPE_FOR_MAKE") or ""
32 if 'fitImage' in typeformake.split():
33 d.setVar('KERNEL_IMAGETYPE_FOR_MAKE', typeformake.replace('fitImage', replacementtype))
34
35 image = d.getVar('INITRAMFS_IMAGE')
36 if image:
37 d.appendVarFlag('do_assemble_fitimage_initramfs', 'depends', ' ${INITRAMFS_IMAGE}:do_image_complete')
38
39 #check if there are any dtb providers
40 providerdtb = d.getVar("PREFERRED_PROVIDER_virtual/dtb")
41 if providerdtb:
42 d.appendVarFlag('do_assemble_fitimage', 'depends', ' virtual/dtb:do_populate_sysroot')
43 d.appendVarFlag('do_assemble_fitimage_initramfs', 'depends', ' virtual/dtb:do_populate_sysroot')
44 d.setVar('EXTERNAL_KERNEL_DEVICETREE', "${RECIPE_SYSROOT}/boot/devicetree")
45
46 # Verified boot will sign the fitImage and append the public key to
47 # U-Boot dtb. We ensure the U-Boot dtb is deployed before assembling
48 # the fitImage:
49 if d.getVar('UBOOT_SIGN_ENABLE') == "1" and d.getVar('UBOOT_DTB_BINARY'):
50 uboot_pn = d.getVar('PREFERRED_PROVIDER_u-boot') or 'u-boot'
51 d.appendVarFlag('do_assemble_fitimage', 'depends', ' %s:do_populate_sysroot' % uboot_pn)
52 if d.getVar('INITRAMFS_IMAGE_BUNDLE') == "1":
53 d.appendVarFlag('do_assemble_fitimage_initramfs', 'depends', ' %s:do_populate_sysroot' % uboot_pn)
54}
55
56# Options for the device tree compiler passed to mkimage '-D' feature:
57UBOOT_MKIMAGE_DTCOPTS ??= ""
58
59# fitImage Hash Algo
60FIT_HASH_ALG ?= "sha256"
61
62# fitImage Signature Algo
63FIT_SIGN_ALG ?= "rsa2048"
64
65# Generate keys for signing fitImage
66FIT_GENERATE_KEYS ?= "0"
67
68# Size of private key in number of bits
69FIT_SIGN_NUMBITS ?= "2048"
70
71# args to openssl genrsa (Default is just the public exponent)
72FIT_KEY_GENRSA_ARGS ?= "-F4"
73
74# args to openssl req (Default is -batch for non interactive mode and
75# -new for new certificate)
76FIT_KEY_REQ_ARGS ?= "-batch -new"
77
78# Standard format for public key certificate
79FIT_KEY_SIGN_PKCS ?= "-x509"
80
81# Description string
82FIT_DESC ?= "U-Boot fitImage for ${DISTRO_NAME}/${PV}/${MACHINE}"
83
84# Sign individual images as well
85FIT_SIGN_INDIVIDUAL ?= "0"
86
87# mkimage command
88UBOOT_MKIMAGE ?= "uboot-mkimage"
89UBOOT_MKIMAGE_SIGN ?= "${UBOOT_MKIMAGE}"
90
91# Arguments passed to mkimage for signing
92UBOOT_MKIMAGE_SIGN_ARGS ?= ""
93
94#
95# Emit the fitImage ITS header
96#
97# $1 ... .its filename
98fitimage_emit_fit_header() {
99 cat << EOF >> ${1}
100/dts-v1/;
101
102/ {
103 description = "${FIT_DESC}";
104 #address-cells = <1>;
105EOF
106}
107
108#
109# Emit the fitImage section bits
110#
111# $1 ... .its filename
112# $2 ... Section bit type: imagestart - image section start
113# confstart - configuration section start
114# sectend - section end
115# fitend - fitimage end
116#
117fitimage_emit_section_maint() {
118 case $2 in
119 imagestart)
120 cat << EOF >> ${1}
121
122 images {
123EOF
124 ;;
125 confstart)
126 cat << EOF >> ${1}
127
128 configurations {
129EOF
130 ;;
131 sectend)
132 cat << EOF >> ${1}
133 };
134EOF
135 ;;
136 fitend)
137 cat << EOF >> ${1}
138};
139EOF
140 ;;
141 esac
142}
143
144#
145# Emit the fitImage ITS kernel section
146#
147# $1 ... .its filename
148# $2 ... Image counter
149# $3 ... Path to kernel image
150# $4 ... Compression type
151fitimage_emit_section_kernel() {
152
153 kernel_csum="${FIT_HASH_ALG}"
154 kernel_sign_algo="${FIT_SIGN_ALG}"
155 kernel_sign_keyname="${UBOOT_SIGN_KEYNAME}"
156
157 ENTRYPOINT="${UBOOT_ENTRYPOINT}"
158 if [ -n "${UBOOT_ENTRYSYMBOL}" ]; then
159 ENTRYPOINT=`${HOST_PREFIX}nm vmlinux | \
160 awk '$3=="${UBOOT_ENTRYSYMBOL}" {print "0x"$1;exit}'`
161 fi
162
163 cat << EOF >> ${1}
164 kernel-${2} {
165 description = "Linux kernel";
166 data = /incbin/("${3}");
167 type = "kernel";
168 arch = "${UBOOT_ARCH}";
169 os = "linux";
170 compression = "${4}";
171 load = <${UBOOT_LOADADDRESS}>;
172 entry = <${ENTRYPOINT}>;
173 hash-1 {
174 algo = "${kernel_csum}";
175 };
176 };
177EOF
178
179 if [ "${UBOOT_SIGN_ENABLE}" = "1" -a "${FIT_SIGN_INDIVIDUAL}" = "1" -a -n "${kernel_sign_keyname}" ] ; then
180 sed -i '$ d' ${1}
181 cat << EOF >> ${1}
182 signature-1 {
183 algo = "${kernel_csum},${kernel_sign_algo}";
184 key-name-hint = "${kernel_sign_keyname}";
185 };
186 };
187EOF
188 fi
189}
190
191#
192# Emit the fitImage ITS DTB section
193#
194# $1 ... .its filename
195# $2 ... Image counter
196# $3 ... Path to DTB image
197fitimage_emit_section_dtb() {
198
199 dtb_csum="${FIT_HASH_ALG}"
200 dtb_sign_algo="${FIT_SIGN_ALG}"
201 dtb_sign_keyname="${UBOOT_SIGN_KEYNAME}"
202
203 dtb_loadline=""
204 dtb_ext=${DTB##*.}
205 if [ "${dtb_ext}" = "dtbo" ]; then
206 if [ -n "${UBOOT_DTBO_LOADADDRESS}" ]; then
207 dtb_loadline="load = <${UBOOT_DTBO_LOADADDRESS}>;"
208 fi
209 elif [ -n "${UBOOT_DTB_LOADADDRESS}" ]; then
210 dtb_loadline="load = <${UBOOT_DTB_LOADADDRESS}>;"
211 fi
212 cat << EOF >> ${1}
213 fdt-${2} {
214 description = "Flattened Device Tree blob";
215 data = /incbin/("${3}");
216 type = "flat_dt";
217 arch = "${UBOOT_ARCH}";
218 compression = "none";
219 ${dtb_loadline}
220 hash-1 {
221 algo = "${dtb_csum}";
222 };
223 };
224EOF
225
226 if [ "${UBOOT_SIGN_ENABLE}" = "1" -a "${FIT_SIGN_INDIVIDUAL}" = "1" -a -n "${dtb_sign_keyname}" ] ; then
227 sed -i '$ d' ${1}
228 cat << EOF >> ${1}
229 signature-1 {
230 algo = "${dtb_csum},${dtb_sign_algo}";
231 key-name-hint = "${dtb_sign_keyname}";
232 };
233 };
234EOF
235 fi
236}
237
238#
239# Emit the fitImage ITS u-boot script section
240#
241# $1 ... .its filename
242# $2 ... Image counter
243# $3 ... Path to boot script image
244fitimage_emit_section_boot_script() {
245
246 bootscr_csum="${FIT_HASH_ALG}"
247 bootscr_sign_algo="${FIT_SIGN_ALG}"
248 bootscr_sign_keyname="${UBOOT_SIGN_KEYNAME}"
249
250 cat << EOF >> ${1}
251 bootscr@${2} {
252 description = "U-boot script";
253 data = /incbin/("${3}");
254 type = "script";
255 arch = "${UBOOT_ARCH}";
256 compression = "none";
257 hash@1 {
258 algo = "${bootscr_csum}";
259 };
260 };
261EOF
262
263 if [ "${UBOOT_SIGN_ENABLE}" = "1" -a "${FIT_SIGN_INDIVIDUAL}" = "1" -a -n "${bootscr_sign_keyname}" ] ; then
264 sed -i '$ d' ${1}
265 cat << EOF >> ${1}
266 signature@1 {
267 algo = "${bootscr_csum},${bootscr_sign_algo}";
268 key-name-hint = "${bootscr_sign_keyname}";
269 };
270 };
271EOF
272 fi
273}
274
275#
276# Emit the fitImage ITS setup section
277#
278# $1 ... .its filename
279# $2 ... Image counter
280# $3 ... Path to setup image
281fitimage_emit_section_setup() {
282
283 setup_csum="${FIT_HASH_ALG}"
284
285 cat << EOF >> ${1}
286 setup-${2} {
287 description = "Linux setup.bin";
288 data = /incbin/("${3}");
289 type = "x86_setup";
290 arch = "${UBOOT_ARCH}";
291 os = "linux";
292 compression = "none";
293 load = <0x00090000>;
294 entry = <0x00090000>;
295 hash-1 {
296 algo = "${setup_csum}";
297 };
298 };
299EOF
300}
301
302#
303# Emit the fitImage ITS ramdisk section
304#
305# $1 ... .its filename
306# $2 ... Image counter
307# $3 ... Path to ramdisk image
308fitimage_emit_section_ramdisk() {
309
310 ramdisk_csum="${FIT_HASH_ALG}"
311 ramdisk_sign_algo="${FIT_SIGN_ALG}"
312 ramdisk_sign_keyname="${UBOOT_SIGN_KEYNAME}"
313 ramdisk_loadline=""
314 ramdisk_entryline=""
315
316 if [ -n "${UBOOT_RD_LOADADDRESS}" ]; then
317 ramdisk_loadline="load = <${UBOOT_RD_LOADADDRESS}>;"
318 fi
319 if [ -n "${UBOOT_RD_ENTRYPOINT}" ]; then
320 ramdisk_entryline="entry = <${UBOOT_RD_ENTRYPOINT}>;"
321 fi
322
323 cat << EOF >> ${1}
324 ramdisk-${2} {
325 description = "${INITRAMFS_IMAGE}";
326 data = /incbin/("${3}");
327 type = "ramdisk";
328 arch = "${UBOOT_ARCH}";
329 os = "linux";
330 compression = "none";
331 ${ramdisk_loadline}
332 ${ramdisk_entryline}
333 hash-1 {
334 algo = "${ramdisk_csum}";
335 };
336 };
337EOF
338
339 if [ "${UBOOT_SIGN_ENABLE}" = "1" -a "${FIT_SIGN_INDIVIDUAL}" = "1" -a -n "${ramdisk_sign_keyname}" ] ; then
340 sed -i '$ d' ${1}
341 cat << EOF >> ${1}
342 signature-1 {
343 algo = "${ramdisk_csum},${ramdisk_sign_algo}";
344 key-name-hint = "${ramdisk_sign_keyname}";
345 };
346 };
347EOF
348 fi
349}
350
351#
352# Emit the fitImage ITS configuration section
353#
354# $1 ... .its filename
355# $2 ... Linux kernel ID
356# $3 ... DTB image name
357# $4 ... ramdisk ID
358# $5 ... u-boot script ID
359# $6 ... config ID
360# $7 ... default flag
361fitimage_emit_section_config() {
362
363 conf_csum="${FIT_HASH_ALG}"
364 conf_sign_algo="${FIT_SIGN_ALG}"
365 if [ -n "${UBOOT_SIGN_ENABLE}" ] ; then
366 conf_sign_keyname="${UBOOT_SIGN_KEYNAME}"
367 fi
368
369 its_file="${1}"
370 kernel_id="${2}"
371 dtb_image="${3}"
372 ramdisk_id="${4}"
373 bootscr_id="${5}"
374 config_id="${6}"
375 default_flag="${7}"
376
377 # Test if we have any DTBs at all
378 sep=""
379 conf_desc=""
380 conf_node="conf-"
381 kernel_line=""
382 fdt_line=""
383 ramdisk_line=""
384 bootscr_line=""
385 setup_line=""
386 default_line=""
387
388 # conf node name is selected based on dtb ID if it is present,
389 # otherwise its selected based on kernel ID
390 if [ -n "${dtb_image}" ]; then
391 conf_node=$conf_node${dtb_image}
392 else
393 conf_node=$conf_node${kernel_id}
394 fi
395
396 if [ -n "${kernel_id}" ]; then
397 conf_desc="Linux kernel"
398 sep=", "
399 kernel_line="kernel = \"kernel-${kernel_id}\";"
400 fi
401
402 if [ -n "${dtb_image}" ]; then
403 conf_desc="${conf_desc}${sep}FDT blob"
404 sep=", "
405 fdt_line="fdt = \"fdt-${dtb_image}\";"
406 fi
407
408 if [ -n "${ramdisk_id}" ]; then
409 conf_desc="${conf_desc}${sep}ramdisk"
410 sep=", "
411 ramdisk_line="ramdisk = \"ramdisk-${ramdisk_id}\";"
412 fi
413
414 if [ -n "${bootscr_id}" ]; then
415 conf_desc="${conf_desc}${sep}u-boot script"
416 sep=", "
417 bootscr_line="bootscr = \"bootscr@${bootscr_id}\";"
418 fi
419
420 if [ -n "${config_id}" ]; then
421 conf_desc="${conf_desc}${sep}setup"
422 setup_line="setup = \"setup-${config_id}\";"
423 fi
424
425 if [ "${default_flag}" = "1" ]; then
426 # default node is selected based on dtb ID if it is present,
427 # otherwise its selected based on kernel ID
428 if [ -n "${dtb_image}" ]; then
429 default_line="default = \"conf-${dtb_image}\";"
430 else
431 default_line="default = \"conf-${kernel_id}\";"
432 fi
433 fi
434
435 cat << EOF >> ${its_file}
436 ${default_line}
437 $conf_node {
438 description = "${default_flag} ${conf_desc}";
439 ${kernel_line}
440 ${fdt_line}
441 ${ramdisk_line}
442 ${bootscr_line}
443 ${setup_line}
444 hash-1 {
445 algo = "${conf_csum}";
446 };
447EOF
448
449 if [ ! -z "${conf_sign_keyname}" ] ; then
450
451 sign_line="sign-images = "
452 sep=""
453
454 if [ -n "${kernel_id}" ]; then
455 sign_line="${sign_line}${sep}\"kernel\""
456 sep=", "
457 fi
458
459 if [ -n "${dtb_image}" ]; then
460 sign_line="${sign_line}${sep}\"fdt\""
461 sep=", "
462 fi
463
464 if [ -n "${ramdisk_id}" ]; then
465 sign_line="${sign_line}${sep}\"ramdisk\""
466 sep=", "
467 fi
468
469 if [ -n "${bootscr_id}" ]; then
470 sign_line="${sign_line}${sep}\"bootscr\""
471 sep=", "
472 fi
473
474 if [ -n "${config_id}" ]; then
475 sign_line="${sign_line}${sep}\"setup\""
476 fi
477
478 sign_line="${sign_line};"
479
480 cat << EOF >> ${its_file}
481 signature-1 {
482 algo = "${conf_csum},${conf_sign_algo}";
483 key-name-hint = "${conf_sign_keyname}";
484 ${sign_line}
485 };
486EOF
487 fi
488
489 cat << EOF >> ${its_file}
490 };
491EOF
492}
493
494#
495# Assemble fitImage
496#
497# $1 ... .its filename
498# $2 ... fitImage name
499# $3 ... include ramdisk
500fitimage_assemble() {
501 kernelcount=1
502 dtbcount=""
503 DTBS=""
504 ramdiskcount=${3}
505 setupcount=""
506 bootscr_id=""
507 rm -f ${1} arch/${ARCH}/boot/${2}
508
509 fitimage_emit_fit_header ${1}
510
511 #
512 # Step 1: Prepare a kernel image section.
513 #
514 fitimage_emit_section_maint ${1} imagestart
515
516 uboot_prep_kimage
517
518 if [ "${INITRAMFS_IMAGE_BUNDLE}" = "1" ]; then
519 initramfs_bundle_path="arch/"${UBOOT_ARCH}"/boot/"${KERNEL_IMAGETYPE_REPLACEMENT}".initramfs"
520 if [ -e "${initramfs_bundle_path}" ]; then
521
522 #
523 # Include the kernel/rootfs bundle.
524 #
525
526 fitimage_emit_section_kernel ${1} "${kernelcount}" "${initramfs_bundle_path}" "${linux_comp}"
527 else
528 bbwarn "${initramfs_bundle_path} not found."
529 fi
530 else
531 fitimage_emit_section_kernel ${1} "${kernelcount}" linux.bin "${linux_comp}"
532 fi
533
534 #
535 # Step 2: Prepare a DTB image section
536 #
537
538 if [ -z "${EXTERNAL_KERNEL_DEVICETREE}" ] && [ -n "${KERNEL_DEVICETREE}" ]; then
539 dtbcount=1
540 for DTB in ${KERNEL_DEVICETREE}; do
541 if echo ${DTB} | grep -q '/dts/'; then
542 bbwarn "${DTB} contains the full path to the the dts file, but only the dtb name should be used."
543 DTB=`basename ${DTB} | sed 's,\.dts$,.dtb,g'`
544 fi
545 DTB_PATH="arch/${ARCH}/boot/dts/${DTB}"
546 if [ ! -e "${DTB_PATH}" ]; then
547 DTB_PATH="arch/${ARCH}/boot/${DTB}"
548 fi
549
550 DTB=$(echo "${DTB}" | tr '/' '_')
551 DTBS="${DTBS} ${DTB}"
552 fitimage_emit_section_dtb ${1} ${DTB} ${DTB_PATH}
553 done
554 fi
555
556 if [ -n "${EXTERNAL_KERNEL_DEVICETREE}" ]; then
557 dtbcount=1
558 for DTB in $(find "${EXTERNAL_KERNEL_DEVICETREE}" \( -name '*.dtb' -o -name '*.dtbo' \) -printf '%P\n' | sort); do
559 DTB=$(echo "${DTB}" | tr '/' '_')
560 DTBS="${DTBS} ${DTB}"
561 fitimage_emit_section_dtb ${1} ${DTB} "${EXTERNAL_KERNEL_DEVICETREE}/${DTB}"
562 done
563 fi
564
565 #
566 # Step 3: Prepare a u-boot script section
567 #
568
569 if [ -n "${UBOOT_ENV}" ] && [ -d "${STAGING_DIR_HOST}/boot" ]; then
570 if [ -e "${STAGING_DIR_HOST}/boot/${UBOOT_ENV_BINARY}" ]; then
571 cp ${STAGING_DIR_HOST}/boot/${UBOOT_ENV_BINARY} ${B}
572 bootscr_id="${UBOOT_ENV_BINARY}"
573 fitimage_emit_section_boot_script ${1} "${bootscr_id}" ${UBOOT_ENV_BINARY}
574 else
575 bbwarn "${STAGING_DIR_HOST}/boot/${UBOOT_ENV_BINARY} not found."
576 fi
577 fi
578
579 #
580 # Step 4: Prepare a setup section. (For x86)
581 #
582 if [ -e arch/${ARCH}/boot/setup.bin ]; then
583 setupcount=1
584 fitimage_emit_section_setup ${1} "${setupcount}" arch/${ARCH}/boot/setup.bin
585 fi
586
587 #
588 # Step 5: Prepare a ramdisk section.
589 #
590 if [ "x${ramdiskcount}" = "x1" ] && [ "${INITRAMFS_IMAGE_BUNDLE}" != "1" ]; then
591 # Find and use the first initramfs image archive type we find
592 for img in cpio.lz4 cpio.lzo cpio.lzma cpio.xz cpio.gz ext2.gz cpio; do
593 initramfs_path="${DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE_NAME}.${img}"
594 echo "Using $initramfs_path"
595 if [ -e "${initramfs_path}" ]; then
596 fitimage_emit_section_ramdisk ${1} "${ramdiskcount}" "${initramfs_path}"
597 break
598 fi
599 done
600 fi
601
602 fitimage_emit_section_maint ${1} sectend
603
604 # Force the first Kernel and DTB in the default config
605 kernelcount=1
606 if [ -n "${dtbcount}" ]; then
607 dtbcount=1
608 fi
609
610 #
611 # Step 6: Prepare a configurations section
612 #
613 fitimage_emit_section_maint ${1} confstart
614
615 # kernel-fitimage.bbclass currently only supports a single kernel (no less or
616 # more) to be added to the FIT image along with 0 or more device trees and
617 # 0 or 1 ramdisk.
618 # It is also possible to include an initramfs bundle (kernel and rootfs in one binary)
619 # When the initramfs bundle is used ramdisk is disabled.
620 # If a device tree is to be part of the FIT image, then select
621 # the default configuration to be used is based on the dtbcount. If there is
622 # no dtb present than select the default configuation to be based on
623 # the kernelcount.
624 if [ -n "${DTBS}" ]; then
625 i=1
626 for DTB in ${DTBS}; do
627 dtb_ext=${DTB##*.}
628 if [ "${dtb_ext}" = "dtbo" ]; then
629 fitimage_emit_section_config ${1} "" "${DTB}" "" "${bootscr_id}" "" "`expr ${i} = ${dtbcount}`"
630 else
631 fitimage_emit_section_config ${1} "${kernelcount}" "${DTB}" "${ramdiskcount}" "${bootscr_id}" "${setupcount}" "`expr ${i} = ${dtbcount}`"
632 fi
633 i=`expr ${i} + 1`
634 done
635 else
636 defaultconfigcount=1
637 fitimage_emit_section_config ${1} "${kernelcount}" "" "${ramdiskcount}" "${bootscr_id}" "${setupcount}" "${defaultconfigcount}"
638 fi
639
640 fitimage_emit_section_maint ${1} sectend
641
642 fitimage_emit_section_maint ${1} fitend
643
644 #
645 # Step 7: Assemble the image
646 #
647 ${UBOOT_MKIMAGE} \
648 ${@'-D "${UBOOT_MKIMAGE_DTCOPTS}"' if len('${UBOOT_MKIMAGE_DTCOPTS}') else ''} \
649 -f ${1} \
650 arch/${ARCH}/boot/${2}
651
652 #
653 # Step 8: Sign the image and add public key to U-Boot dtb
654 #
655 if [ "x${UBOOT_SIGN_ENABLE}" = "x1" ] ; then
656 add_key_to_u_boot=""
657 if [ -n "${UBOOT_DTB_BINARY}" ]; then
658 # The u-boot.dtb is a symlink to UBOOT_DTB_IMAGE, so we need copy
659 # both of them, and don't dereference the symlink.
660 cp -P ${STAGING_DATADIR}/u-boot*.dtb ${B}
661 add_key_to_u_boot="-K ${B}/${UBOOT_DTB_BINARY}"
662 fi
663 ${UBOOT_MKIMAGE_SIGN} \
664 ${@'-D "${UBOOT_MKIMAGE_DTCOPTS}"' if len('${UBOOT_MKIMAGE_DTCOPTS}') else ''} \
665 -F -k "${UBOOT_SIGN_KEYDIR}" \
666 $add_key_to_u_boot \
667 -r arch/${ARCH}/boot/${2} \
668 ${UBOOT_MKIMAGE_SIGN_ARGS}
669 fi
670}
671
672do_assemble_fitimage() {
673 if echo ${KERNEL_IMAGETYPES} | grep -wq "fitImage"; then
674 cd ${B}
675 fitimage_assemble fit-image.its fitImage
676 fi
677}
678
679addtask assemble_fitimage before do_install after do_compile
680
681do_assemble_fitimage_initramfs() {
682 if echo ${KERNEL_IMAGETYPES} | grep -wq "fitImage" && \
683 test -n "${INITRAMFS_IMAGE}" ; then
684 cd ${B}
685 if [ "${INITRAMFS_IMAGE_BUNDLE}" = "1" ]; then
686 fitimage_assemble fit-image-${INITRAMFS_IMAGE}.its fitImage ""
687 else
688 fitimage_assemble fit-image-${INITRAMFS_IMAGE}.its fitImage-${INITRAMFS_IMAGE} 1
689 fi
690 fi
691}
692
693addtask assemble_fitimage_initramfs before do_deploy after do_bundle_initramfs
694
695do_generate_rsa_keys() {
696 if [ "${UBOOT_SIGN_ENABLE}" = "0" ] && [ "${FIT_GENERATE_KEYS}" = "1" ]; then
697 bbwarn "FIT_GENERATE_KEYS is set to 1 eventhough UBOOT_SIGN_ENABLE is set to 0. The keys will not be generated as they won't be used."
698 fi
699
700 if [ "${UBOOT_SIGN_ENABLE}" = "1" ] && [ "${FIT_GENERATE_KEYS}" = "1" ]; then
701
702 # Generate keys only if they don't already exist
703 if [ ! -f "${UBOOT_SIGN_KEYDIR}/${UBOOT_SIGN_KEYNAME}".key ] || \
704 [ ! -f "${UBOOT_SIGN_KEYDIR}/${UBOOT_SIGN_KEYNAME}".crt]; then
705
706 # make directory if it does not already exist
707 mkdir -p "${UBOOT_SIGN_KEYDIR}"
708
709 echo "Generating RSA private key for signing fitImage"
710 openssl genrsa ${FIT_KEY_GENRSA_ARGS} -out \
711 "${UBOOT_SIGN_KEYDIR}/${UBOOT_SIGN_KEYNAME}".key \
712 "${FIT_SIGN_NUMBITS}"
713
714 echo "Generating certificate for signing fitImage"
715 openssl req ${FIT_KEY_REQ_ARGS} "${FIT_KEY_SIGN_PKCS}" \
716 -key "${UBOOT_SIGN_KEYDIR}/${UBOOT_SIGN_KEYNAME}".key \
717 -out "${UBOOT_SIGN_KEYDIR}/${UBOOT_SIGN_KEYNAME}".crt
718 fi
719 fi
720}
721
722addtask generate_rsa_keys before do_assemble_fitimage after do_compile
723
724kernel_do_deploy[vardepsexclude] = "DATETIME"
725kernel_do_deploy_append() {
726 # Update deploy directory
727 if echo ${KERNEL_IMAGETYPES} | grep -wq "fitImage"; then
728
729 if [ "${INITRAMFS_IMAGE_BUNDLE}" != "1" ]; then
730 echo "Copying fit-image.its source file..."
731 install -m 0644 ${B}/fit-image.its "$deployDir/fitImage-its-${KERNEL_FIT_NAME}.its"
732 ln -snf fitImage-its-${KERNEL_FIT_NAME}.its "$deployDir/fitImage-its-${KERNEL_FIT_LINK_NAME}"
733
734 echo "Copying linux.bin file..."
735 install -m 0644 ${B}/linux.bin $deployDir/fitImage-linux.bin-${KERNEL_FIT_NAME}.bin
736 ln -snf fitImage-linux.bin-${KERNEL_FIT_NAME}.bin "$deployDir/fitImage-linux.bin-${KERNEL_FIT_LINK_NAME}"
737 fi
738
739 if [ -n "${INITRAMFS_IMAGE}" ]; then
740 echo "Copying fit-image-${INITRAMFS_IMAGE}.its source file..."
741 install -m 0644 ${B}/fit-image-${INITRAMFS_IMAGE}.its "$deployDir/fitImage-its-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.its"
742 ln -snf fitImage-its-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.its "$deployDir/fitImage-its-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_LINK_NAME}"
743
744 if [ "${INITRAMFS_IMAGE_BUNDLE}" != "1" ]; then
745 echo "Copying fitImage-${INITRAMFS_IMAGE} file..."
746 install -m 0644 ${B}/arch/${ARCH}/boot/fitImage-${INITRAMFS_IMAGE} "$deployDir/fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.bin"
747 ln -snf fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.bin "$deployDir/fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_LINK_NAME}"
748 fi
749 fi
750 if [ "${UBOOT_SIGN_ENABLE}" = "1" -a -n "${UBOOT_DTB_BINARY}" ] ; then
751 # UBOOT_DTB_IMAGE is a realfile, but we can't use
752 # ${UBOOT_DTB_IMAGE} since it contains ${PV} which is aimed
753 # for u-boot, but we are in kernel env now.
754 install -m 0644 ${B}/u-boot-${MACHINE}*.dtb "$deployDir/"
755 fi
756 fi
757}
758
759# The function below performs the following in case of initramfs bundles:
760# - Removes do_assemble_fitimage. FIT generation is done through
761# do_assemble_fitimage_initramfs. do_assemble_fitimage is not needed
762# and should not be part of the tasks to be executed.
763# - Since do_generate_rsa_keys is inserted by default
764# between do_compile and do_assemble_fitimage, this is
765# not suitable in case of initramfs bundles. do_generate_rsa_keys
766# should be between do_bundle_initramfs and do_assemble_fitimage_initramfs.
767python () {
768 if d.getVar('INITRAMFS_IMAGE_BUNDLE') == "1":
769 bb.build.deltask('do_assemble_fitimage', d)
770 bb.build.deltask('generate_rsa_keys', d)
771 bb.build.addtask('generate_rsa_keys', 'do_assemble_fitimage_initramfs', 'do_bundle_initramfs', d)
772} \ No newline at end of file
diff --git a/meta/classes/kernel-grub.bbclass b/meta/classes/kernel-grub.bbclass
deleted file mode 100644
index 5d92f3b636..0000000000
--- a/meta/classes/kernel-grub.bbclass
+++ /dev/null
@@ -1,105 +0,0 @@
1#
2# While installing a rpm to update kernel on a deployed target, it will update
3# the boot area and the boot menu with the kernel as the priority but allow
4# you to fall back to the original kernel as well.
5#
6# - In kernel-image's preinstall scriptlet, it backs up original kernel to avoid
7# probable confliction with the new one.
8#
9# - In kernel-image's postinstall scriptlet, it modifies grub's config file to
10# updates the new kernel as the boot priority.
11#
12
13python __anonymous () {
14 import re
15
16 preinst = '''
17 # Parsing confliction
18 [ -f "$D/boot/grub/menu.list" ] && grubcfg="$D/boot/grub/menu.list"
19 [ -f "$D/boot/grub/grub.cfg" ] && grubcfg="$D/boot/grub/grub.cfg"
20 if [ -n "$grubcfg" ]; then
21 # Dereference symlink to avoid confliction with new kernel name.
22 if grep -q "/KERNEL_IMAGETYPE \+root=" $grubcfg; then
23 if [ -L "$D/boot/KERNEL_IMAGETYPE" ]; then
24 kimage=`realpath $D/boot/KERNEL_IMAGETYPE 2>/dev/null`
25 if [ -f "$D$kimage" ]; then
26 sed -i "s:KERNEL_IMAGETYPE \+root=:${kimage##*/} root=:" $grubcfg
27 fi
28 fi
29 fi
30
31 # Rename old kernel if it conflicts with new kernel name.
32 if grep -q "/KERNEL_IMAGETYPE-${KERNEL_VERSION} \+root=" $grubcfg; then
33 if [ -f "$D/boot/KERNEL_IMAGETYPE-${KERNEL_VERSION}" ]; then
34 timestamp=`date +%s`
35 kimage="$D/boot/KERNEL_IMAGETYPE-${KERNEL_VERSION}-$timestamp-back"
36 sed -i "s:KERNEL_IMAGETYPE-${KERNEL_VERSION} \+root=:${kimage##*/} root=:" $grubcfg
37 mv "$D/boot/KERNEL_IMAGETYPE-${KERNEL_VERSION}" "$kimage"
38 fi
39 fi
40 fi
41'''
42
43 postinst = '''
44 get_new_grub_cfg() {
45 grubcfg="$1"
46 old_image="$2"
47 title="Update KERNEL_IMAGETYPE-${KERNEL_VERSION}-${PV}"
48 if [ "${grubcfg##*/}" = "grub.cfg" ]; then
49 rootfs=`grep " *linux \+[^ ]\+ \+root=" $grubcfg -m 1 | \
50 sed "s#${old_image}#${old_image%/*}/KERNEL_IMAGETYPE-${KERNEL_VERSION}#"`
51
52 echo "menuentry \"$title\" {"
53 echo " set root=(hd0,1)"
54 echo "$rootfs"
55 echo "}"
56 elif [ "${grubcfg##*/}" = "menu.list" ]; then
57 rootfs=`grep "kernel \+[^ ]\+ \+root=" $grubcfg -m 1 | \
58 sed "s#${old_image}#${old_image%/*}/KERNEL_IMAGETYPE-${KERNEL_VERSION}#"`
59
60 echo "default 0"
61 echo "timeout 30"
62 echo "title $title"
63 echo "root (hd0,0)"
64 echo "$rootfs"
65 fi
66 }
67
68 get_old_grub_cfg() {
69 grubcfg="$1"
70 if [ "${grubcfg##*/}" = "grub.cfg" ]; then
71 cat "$grubcfg"
72 elif [ "${grubcfg##*/}" = "menu.list" ]; then
73 sed -e '/^default/d' -e '/^timeout/d' "$grubcfg"
74 fi
75 }
76
77 if [ -f "$D/boot/grub/grub.cfg" ]; then
78 grubcfg="$D/boot/grub/grub.cfg"
79 old_image=`grep ' *linux \+[^ ]\+ \+root=' -m 1 "$grubcfg" | awk '{print $2}'`
80 elif [ -f "$D/boot/grub/menu.list" ]; then
81 grubcfg="$D/boot/grub/menu.list"
82 old_image=`grep '^kernel \+[^ ]\+ \+root=' -m 1 "$grubcfg" | awk '{print $2}'`
83 fi
84
85 # Don't update grubcfg at first install while old bzImage doesn't exist.
86 if [ -f "$D/boot/${old_image##*/}" ]; then
87 grubcfgtmp="$grubcfg.tmp"
88 get_new_grub_cfg "$grubcfg" "$old_image" > $grubcfgtmp
89 get_old_grub_cfg "$grubcfg" >> $grubcfgtmp
90 mv $grubcfgtmp $grubcfg
91 echo "Caution! Update kernel may affect kernel-module!"
92 fi
93'''
94
95 imagetypes = d.getVar('KERNEL_IMAGETYPES')
96 imagetypes = re.sub(r'\.gz$', '', imagetypes)
97
98 for type in imagetypes.split():
99 typelower = type.lower()
100 preinst_append = preinst.replace('KERNEL_IMAGETYPE', type)
101 postinst_prepend = postinst.replace('KERNEL_IMAGETYPE', type)
102 d.setVar('pkg_preinst_kernel-image-' + typelower + '_append', preinst_append)
103 d.setVar('pkg_postinst_kernel-image-' + typelower + '_prepend', postinst_prepend)
104}
105
diff --git a/meta/classes/kernel-module-split.bbclass b/meta/classes/kernel-module-split.bbclass
deleted file mode 100644
index baa32e0a90..0000000000
--- a/meta/classes/kernel-module-split.bbclass
+++ /dev/null
@@ -1,179 +0,0 @@
1pkg_postinst_modules () {
2if [ -z "$D" ]; then
3 depmod -a ${KERNEL_VERSION}
4else
5 # image.bbclass will call depmodwrapper after everything is installed,
6 # no need to do it here as well
7 :
8fi
9}
10
11pkg_postrm_modules () {
12if [ -z "$D" ]; then
13 depmod -a ${KERNEL_VERSION}
14else
15 depmodwrapper -a -b $D ${KERNEL_VERSION}
16fi
17}
18
19autoload_postinst_fragment() {
20if [ x"$D" = "x" ]; then
21 modprobe %s || true
22fi
23}
24
25PACKAGE_WRITE_DEPS += "kmod-native depmodwrapper-cross"
26
27do_install_append() {
28 install -d ${D}${sysconfdir}/modules-load.d/ ${D}${sysconfdir}/modprobe.d/
29}
30
31PACKAGESPLITFUNCS_prepend = "split_kernel_module_packages "
32
33KERNEL_MODULES_META_PACKAGE ?= "${@ d.getVar("KERNEL_PACKAGE_NAME") or "kernel" }-modules"
34
35KERNEL_MODULE_PACKAGE_PREFIX ?= ""
36KERNEL_MODULE_PACKAGE_SUFFIX ?= "-${KERNEL_VERSION}"
37KERNEL_MODULE_PROVIDE_VIRTUAL ?= "1"
38
39python split_kernel_module_packages () {
40 import re
41
42 modinfoexp = re.compile("([^=]+)=(.*)")
43
44 def extract_modinfo(file):
45 import tempfile, subprocess
46 tempfile.tempdir = d.getVar("WORKDIR")
47 compressed = re.match( r'.*\.([xg])z$', file)
48 tf = tempfile.mkstemp()
49 tmpfile = tf[1]
50 if compressed:
51 tmpkofile = tmpfile + ".ko"
52 if compressed.group(1) == 'g':
53 cmd = "gunzip -dc %s > %s" % (file, tmpkofile)
54 subprocess.check_call(cmd, shell=True)
55 elif compressed.group(1) == 'x':
56 cmd = "xz -dc %s > %s" % (file, tmpkofile)
57 subprocess.check_call(cmd, shell=True)
58 else:
59 msg = "Cannot decompress '%s'" % file
60 raise msg
61 cmd = "%sobjcopy -j .modinfo -O binary %s %s" % (d.getVar("HOST_PREFIX") or "", tmpkofile, tmpfile)
62 else:
63 cmd = "%sobjcopy -j .modinfo -O binary %s %s" % (d.getVar("HOST_PREFIX") or "", file, tmpfile)
64 subprocess.check_call(cmd, shell=True)
65 # errors='replace': Some old kernel versions contain invalid utf-8 characters in mod descriptions (like 0xf6, 'ö')
66 f = open(tmpfile, errors='replace')
67 l = f.read().split("\000")
68 f.close()
69 os.close(tf[0])
70 os.unlink(tmpfile)
71 if compressed:
72 os.unlink(tmpkofile)
73 vals = {}
74 for i in l:
75 m = modinfoexp.match(i)
76 if not m:
77 continue
78 vals[m.group(1)] = m.group(2)
79 return vals
80
81 def frob_metadata(file, pkg, pattern, format, basename):
82 vals = extract_modinfo(file)
83
84 dvar = d.getVar('PKGD')
85
86 # If autoloading is requested, output /etc/modules-load.d/<name>.conf and append
87 # appropriate modprobe commands to the postinst
88 autoloadlist = (d.getVar("KERNEL_MODULE_AUTOLOAD") or "").split()
89 autoload = d.getVar('module_autoload_%s' % basename)
90 if autoload and autoload == basename:
91 bb.warn("module_autoload_%s was replaced by KERNEL_MODULE_AUTOLOAD for cases where basename == module name, please drop it" % basename)
92 if autoload and basename not in autoloadlist:
93 bb.warn("module_autoload_%s is defined but '%s' isn't included in KERNEL_MODULE_AUTOLOAD, please add it there" % (basename, basename))
94 if basename in autoloadlist:
95 name = '%s/etc/modules-load.d/%s.conf' % (dvar, basename)
96 f = open(name, 'w')
97 if autoload:
98 for m in autoload.split():
99 f.write('%s\n' % m)
100 else:
101 f.write('%s\n' % basename)
102 f.close()
103 postinst = d.getVar('pkg_postinst_%s' % pkg)
104 if not postinst:
105 bb.fatal("pkg_postinst_%s not defined" % pkg)
106 postinst += d.getVar('autoload_postinst_fragment') % (autoload or basename)
107 d.setVar('pkg_postinst_%s' % pkg, postinst)
108
109 # Write out any modconf fragment
110 modconflist = (d.getVar("KERNEL_MODULE_PROBECONF") or "").split()
111 modconf = d.getVar('module_conf_%s' % basename)
112 if modconf and basename in modconflist:
113 name = '%s/etc/modprobe.d/%s.conf' % (dvar, basename)
114 f = open(name, 'w')
115 f.write("%s\n" % modconf)
116 f.close()
117 elif modconf:
118 bb.error("Please ensure module %s is listed in KERNEL_MODULE_PROBECONF since module_conf_%s is set" % (basename, basename))
119
120 files = d.getVar('FILES_%s' % pkg)
121 files = "%s /etc/modules-load.d/%s.conf /etc/modprobe.d/%s.conf" % (files, basename, basename)
122 d.setVar('FILES_%s' % pkg, files)
123
124 conffiles = d.getVar('CONFFILES_%s' % pkg)
125 conffiles = "%s /etc/modules-load.d/%s.conf /etc/modprobe.d/%s.conf" % (conffiles, basename, basename)
126 d.setVar('CONFFILES_%s' % pkg, conffiles)
127
128 if "description" in vals:
129 old_desc = d.getVar('DESCRIPTION_' + pkg) or ""
130 d.setVar('DESCRIPTION_' + pkg, old_desc + "; " + vals["description"])
131
132 rdepends = bb.utils.explode_dep_versions2(d.getVar('RDEPENDS_' + pkg) or "")
133 modinfo_deps = []
134 if "depends" in vals and vals["depends"] != "":
135 for dep in vals["depends"].split(","):
136 on = legitimize_package_name(dep)
137 dependency_pkg = format % on
138 modinfo_deps.append(dependency_pkg)
139 for dep in modinfo_deps:
140 if not dep in rdepends:
141 rdepends[dep] = []
142 d.setVar('RDEPENDS_' + pkg, bb.utils.join_deps(rdepends, commasep=False))
143
144 # Avoid automatic -dev recommendations for modules ending with -dev.
145 d.setVarFlag('RRECOMMENDS_' + pkg, 'nodeprrecs', 1)
146
147 # Provide virtual package without postfix
148 providevirt = d.getVar('KERNEL_MODULE_PROVIDE_VIRTUAL')
149 if providevirt == "1":
150 postfix = format.split('%s')[1]
151 d.setVar('RPROVIDES_' + pkg, pkg.replace(postfix, ''))
152
153 kernel_package_name = d.getVar("KERNEL_PACKAGE_NAME") or "kernel"
154 kernel_version = d.getVar("KERNEL_VERSION")
155
156 module_regex = r'^(.*)\.k?o(?:\.[xg]z)?$'
157
158 module_pattern_prefix = d.getVar('KERNEL_MODULE_PACKAGE_PREFIX')
159 module_pattern_suffix = d.getVar('KERNEL_MODULE_PACKAGE_SUFFIX')
160 module_pattern = module_pattern_prefix + kernel_package_name + '-module-%s' + module_pattern_suffix
161
162 postinst = d.getVar('pkg_postinst_modules')
163 postrm = d.getVar('pkg_postrm_modules')
164
165 modules = do_split_packages(d, root='${nonarch_base_libdir}/modules', file_regex=module_regex, output_pattern=module_pattern, description='%s kernel module', postinst=postinst, postrm=postrm, recursive=True, hook=frob_metadata, extra_depends='%s-%s' % (kernel_package_name, kernel_version))
166 if modules:
167 metapkg = d.getVar('KERNEL_MODULES_META_PACKAGE')
168 d.appendVar('RDEPENDS_' + metapkg, ' '+' '.join(modules))
169
170 # If modules-load.d and modprobe.d are empty at this point, remove them to
171 # avoid warnings. removedirs only raises an OSError if an empty
172 # directory cannot be removed.
173 dvar = d.getVar('PKGD')
174 for dir in ["%s/etc/modprobe.d" % (dvar), "%s/etc/modules-load.d" % (dvar), "%s/etc" % (dvar)]:
175 if len(os.listdir(dir)) == 0:
176 os.rmdir(dir)
177}
178
179do_package[vardeps] += '${@" ".join(map(lambda s: "module_conf_" + s, (d.getVar("KERNEL_MODULE_PROBECONF") or "").split()))}'
diff --git a/meta/classes/kernel-uboot.bbclass b/meta/classes/kernel-uboot.bbclass
deleted file mode 100644
index b1e7ac05c2..0000000000
--- a/meta/classes/kernel-uboot.bbclass
+++ /dev/null
@@ -1,30 +0,0 @@
1# fitImage kernel compression algorithm
2FIT_KERNEL_COMP_ALG ?= "gzip"
3FIT_KERNEL_COMP_ALG_EXTENSION ?= ".gz"
4
5uboot_prep_kimage() {
6 if [ -e arch/${ARCH}/boot/compressed/vmlinux ]; then
7 vmlinux_path="arch/${ARCH}/boot/compressed/vmlinux"
8 linux_suffix=""
9 linux_comp="none"
10 elif [ -e arch/${ARCH}/boot/vmlinuz.bin ]; then
11 rm -f linux.bin
12 cp -l arch/${ARCH}/boot/vmlinuz.bin linux.bin
13 vmlinux_path=""
14 linux_suffix=""
15 linux_comp="none"
16 else
17 vmlinux_path="vmlinux"
18 linux_suffix="${FIT_KERNEL_COMP_ALG_EXTENSION}"
19 linux_comp="${FIT_KERNEL_COMP_ALG}"
20 fi
21
22 [ -n "${vmlinux_path}" ] && ${OBJCOPY} -O binary -R .note -R .comment -S "${vmlinux_path}" linux.bin
23
24 if [ "${linux_comp}" != "none" ] ; then
25 gzip -9 linux.bin
26 mv -f "linux.bin${linux_suffix}" linux.bin
27 fi
28
29 echo "${linux_comp}"
30}
diff --git a/meta/classes/kernel-uimage.bbclass b/meta/classes/kernel-uimage.bbclass
deleted file mode 100644
index cedb4fa070..0000000000
--- a/meta/classes/kernel-uimage.bbclass
+++ /dev/null
@@ -1,35 +0,0 @@
1inherit kernel-uboot
2
3python __anonymous () {
4 if "uImage" in d.getVar('KERNEL_IMAGETYPES'):
5 depends = d.getVar("DEPENDS")
6 depends = "%s u-boot-tools-native" % depends
7 d.setVar("DEPENDS", depends)
8
9 # Override KERNEL_IMAGETYPE_FOR_MAKE variable, which is internal
10 # to kernel.bbclass . We override the variable here, since we need
11 # to build uImage using the kernel build system if and only if
12 # KEEPUIMAGE == yes. Otherwise, we pack compressed vmlinux into
13 # the uImage .
14 if d.getVar("KEEPUIMAGE") != 'yes':
15 typeformake = d.getVar("KERNEL_IMAGETYPE_FOR_MAKE") or ""
16 if "uImage" in typeformake.split():
17 d.setVar('KERNEL_IMAGETYPE_FOR_MAKE', typeformake.replace('uImage', 'vmlinux'))
18
19 # Enable building of uImage with mkimage
20 bb.build.addtask('do_uboot_mkimage', 'do_install', 'do_kernel_link_images', d)
21}
22
23do_uboot_mkimage[dirs] += "${B}"
24do_uboot_mkimage() {
25 uboot_prep_kimage
26
27 ENTRYPOINT=${UBOOT_ENTRYPOINT}
28 if [ -n "${UBOOT_ENTRYSYMBOL}" ]; then
29 ENTRYPOINT=`${HOST_PREFIX}nm ${B}/vmlinux | \
30 awk '$3=="${UBOOT_ENTRYSYMBOL}" {print "0x"$1;exit}'`
31 fi
32
33 uboot-mkimage -A ${UBOOT_ARCH} -O linux -T kernel -C "${linux_comp}" -a ${UBOOT_LOADADDRESS} -e $ENTRYPOINT -n "${DISTRO_NAME}/${PV}/${MACHINE}" -d linux.bin ${B}/arch/${ARCH}/boot/uImage
34 rm -f linux.bin
35}
diff --git a/meta/classes/kernel-yocto.bbclass b/meta/classes/kernel-yocto.bbclass
deleted file mode 100644
index 35587dd564..0000000000
--- a/meta/classes/kernel-yocto.bbclass
+++ /dev/null
@@ -1,670 +0,0 @@
1# remove tasks that modify the source tree in case externalsrc is inherited
2SRCTREECOVEREDTASKS += "do_validate_branches do_kernel_configcheck do_kernel_checkout do_fetch do_unpack do_patch"
3PATCH_GIT_USER_EMAIL ?= "kernel-yocto@oe"
4PATCH_GIT_USER_NAME ?= "OpenEmbedded"
5
6# The distro or local.conf should set this, but if nobody cares...
7LINUX_KERNEL_TYPE ??= "standard"
8
9# KMETA ?= ""
10KBRANCH ?= "master"
11KMACHINE ?= "${MACHINE}"
12SRCREV_FORMAT ?= "meta_machine"
13
14# LEVELS:
15# 0: no reporting
16# 1: report options that are specified, but not in the final config
17# 2: report options that are not hardware related, but set by a BSP
18KCONF_AUDIT_LEVEL ?= "1"
19KCONF_BSP_AUDIT_LEVEL ?= "0"
20KMETA_AUDIT ?= "yes"
21KMETA_AUDIT_WERROR ?= ""
22
23# returns local (absolute) path names for all valid patches in the
24# src_uri
25def find_patches(d,subdir):
26 patches = src_patches(d)
27 patch_list=[]
28 for p in patches:
29 _, _, local, _, _, parm = bb.fetch.decodeurl(p)
30 # if patchdir has been passed, we won't be able to apply it so skip
31 # the patch for now, and special processing happens later
32 patchdir = ''
33 if "patchdir" in parm:
34 patchdir = parm["patchdir"]
35 if subdir:
36 if subdir == patchdir:
37 patch_list.append(local)
38 else:
39 patch_list.append(local)
40
41 return patch_list
42
43# returns all the elements from the src uri that are .scc files
44def find_sccs(d):
45 sources=src_patches(d, True)
46 sources_list=[]
47 for s in sources:
48 base, ext = os.path.splitext(os.path.basename(s))
49 if ext and ext in [".scc", ".cfg"]:
50 sources_list.append(s)
51 elif base and 'defconfig' in base:
52 sources_list.append(s)
53
54 return sources_list
55
56# check the SRC_URI for "kmeta" type'd git repositories. Return the name of
57# the repository as it will be found in WORKDIR
58def find_kernel_feature_dirs(d):
59 feature_dirs=[]
60 fetch = bb.fetch2.Fetch([], d)
61 for url in fetch.urls:
62 urldata = fetch.ud[url]
63 parm = urldata.parm
64 type=""
65 if "type" in parm:
66 type = parm["type"]
67 if "destsuffix" in parm:
68 destdir = parm["destsuffix"]
69 if type == "kmeta":
70 feature_dirs.append(destdir)
71
72 return feature_dirs
73
74# find the master/machine source branch. In the same way that the fetcher proceses
75# git repositories in the SRC_URI we take the first repo found, first branch.
76def get_machine_branch(d, default):
77 fetch = bb.fetch2.Fetch([], d)
78 for url in fetch.urls:
79 urldata = fetch.ud[url]
80 parm = urldata.parm
81 if "branch" in parm:
82 branches = urldata.parm.get("branch").split(',')
83 btype = urldata.parm.get("type")
84 if btype != "kmeta":
85 return branches[0]
86
87 return default
88
89# returns a list of all directories that are on FILESEXTRAPATHS (and
90# hence available to the build) that contain .scc or .cfg files
91def get_dirs_with_fragments(d):
92 extrapaths = []
93 extrafiles = []
94 extrapathsvalue = (d.getVar("FILESEXTRAPATHS") or "")
95 # Remove default flag which was used for checking
96 extrapathsvalue = extrapathsvalue.replace("__default:", "")
97 extrapaths = extrapathsvalue.split(":")
98 for path in extrapaths:
99 if path + ":True" not in extrafiles:
100 extrafiles.append(path + ":" + str(os.path.exists(path)))
101
102 return " ".join(extrafiles)
103
104do_kernel_metadata() {
105 set +e
106
107 if [ -n "$1" ]; then
108 mode="$1"
109 else
110 mode="patch"
111 fi
112
113 cd ${S}
114 export KMETA=${KMETA}
115
116 # if kernel tools are available in-tree, they are preferred
117 # and are placed on the path before any external tools. Unless
118 # the external tools flag is set, in that case we do nothing.
119 if [ -f "${S}/scripts/util/configme" ]; then
120 if [ -z "${EXTERNAL_KERNEL_TOOLS}" ]; then
121 PATH=${S}/scripts/util:${PATH}
122 fi
123 fi
124
125 # In a similar manner to the kernel itself:
126 #
127 # defconfig: $(obj)/conf
128 # ifeq ($(KBUILD_DEFCONFIG),)
129 # $< --defconfig $(Kconfig)
130 # else
131 # @echo "*** Default configuration is based on '$(KBUILD_DEFCONFIG)'"
132 # $(Q)$< --defconfig=arch/$(SRCARCH)/configs/$(KBUILD_DEFCONFIG) $(Kconfig)
133 # endif
134 #
135 # If a defconfig is specified via the KBUILD_DEFCONFIG variable, we copy it
136 # from the source tree, into a common location and normalized "defconfig" name,
137 # where the rest of the process will include and incoroporate it into the build
138 #
139 # If the fetcher has already placed a defconfig in WORKDIR (from the SRC_URI),
140 # we don't overwrite it, but instead warn the user that SRC_URI defconfigs take
141 # precendence.
142 #
143 if [ -n "${KBUILD_DEFCONFIG}" ]; then
144 if [ -f "${S}/arch/${ARCH}/configs/${KBUILD_DEFCONFIG}" ]; then
145 if [ -f "${WORKDIR}/defconfig" ]; then
146 # If the two defconfig's are different, warn that we overwrote the
147 # one already placed in WORKDIR
148 cmp "${WORKDIR}/defconfig" "${S}/arch/${ARCH}/configs/${KBUILD_DEFCONFIG}"
149 if [ $? -ne 0 ]; then
150 bbdebug 1 "detected SRC_URI or unpatched defconfig in WORKDIR. ${KBUILD_DEFCONFIG} copied over it"
151 fi
152 cp -f ${S}/arch/${ARCH}/configs/${KBUILD_DEFCONFIG} ${WORKDIR}/defconfig
153 else
154 cp -f ${S}/arch/${ARCH}/configs/${KBUILD_DEFCONFIG} ${WORKDIR}/defconfig
155 fi
156 in_tree_defconfig="${WORKDIR}/defconfig"
157 else
158 bbfatal "A KBUILD_DEFCONFIG '${KBUILD_DEFCONFIG}' was specified, but not present in the source tree (${S}/arch/${ARCH}/configs/)"
159 fi
160 fi
161
162 if [ "$mode" = "patch" ]; then
163 # was anyone trying to patch the kernel meta data ?, we need to do
164 # this here, since the scc commands migrate the .cfg fragments to the
165 # kernel source tree, where they'll be used later.
166 check_git_config
167 patches="${@" ".join(find_patches(d,'kernel-meta'))}"
168 for p in $patches; do
169 (
170 cd ${WORKDIR}/kernel-meta
171 git am -s $p
172 )
173 done
174 fi
175
176 sccs_from_src_uri="${@" ".join(find_sccs(d))}"
177 patches="${@" ".join(find_patches(d,''))}"
178 feat_dirs="${@" ".join(find_kernel_feature_dirs(d))}"
179
180 # a quick check to make sure we don't have duplicate defconfigs If
181 # there's a defconfig in the SRC_URI, did we also have one from the
182 # KBUILD_DEFCONFIG processing above ?
183 src_uri_defconfig=$(echo $sccs_from_src_uri | awk '(match($0, "defconfig") != 0) { print $0 }' RS=' ')
184 # drop and defconfig's from the src_uri variable, we captured it just above here if it existed
185 sccs_from_src_uri=$(echo $sccs_from_src_uri | awk '(match($0, "defconfig") == 0) { print $0 }' RS=' ')
186
187 if [ -n "$in_tree_defconfig" ]; then
188 sccs_defconfig=$in_tree_defconfig
189 if [ -n "$src_uri_defconfig" ]; then
190 bbwarn "[NOTE]: defconfig was supplied both via KBUILD_DEFCONFIG and SRC_URI. Dropping SRC_URI defconfig"
191 fi
192 else
193 # if we didn't have an in-tree one, make our defconfig the one
194 # from the src_uri. Note: there may not have been one from the
195 # src_uri, so this can be an empty variable.
196 sccs_defconfig=$src_uri_defconfig
197 fi
198 sccs="$sccs_from_src_uri"
199
200 # check for feature directories/repos/branches that were part of the
201 # SRC_URI. If they were supplied, we convert them into include directives
202 # for the update part of the process
203 for f in ${feat_dirs}; do
204 if [ -d "${WORKDIR}/$f/meta" ]; then
205 includes="$includes -I${WORKDIR}/$f/kernel-meta"
206 elif [ -d "${WORKDIR}/../oe-local-files/$f" ]; then
207 includes="$includes -I${WORKDIR}/../oe-local-files/$f"
208 elif [ -d "${WORKDIR}/$f" ]; then
209 includes="$includes -I${WORKDIR}/$f"
210 fi
211 done
212 for s in ${sccs} ${patches}; do
213 sdir=$(dirname $s)
214 includes="$includes -I${sdir}"
215 # if a SRC_URI passed patch or .scc has a subdir of "kernel-meta",
216 # then we add it to the search path
217 if [ -d "${sdir}/kernel-meta" ]; then
218 includes="$includes -I${sdir}/kernel-meta"
219 fi
220 done
221
222 # expand kernel features into their full path equivalents
223 bsp_definition=$(spp ${includes} --find -DKMACHINE=${KMACHINE} -DKTYPE=${LINUX_KERNEL_TYPE})
224 if [ -z "$bsp_definition" ]; then
225 if [ -z "$sccs_defconfig" ]; then
226 bbfatal_log "Could not locate BSP definition for ${KMACHINE}/${LINUX_KERNEL_TYPE} and no defconfig was provided"
227 fi
228 else
229 # if the bsp definition has "define KMETA_EXTERNAL_BSP t",
230 # then we need to set a flag that will instruct the next
231 # steps to use the BSP as both configuration and patches.
232 grep -q KMETA_EXTERNAL_BSP $bsp_definition
233 if [ $? -eq 0 ]; then
234 KMETA_EXTERNAL_BSPS="t"
235 fi
236 fi
237 meta_dir=$(kgit --meta)
238
239 KERNEL_FEATURES_FINAL=""
240 if [ -n "${KERNEL_FEATURES}" ]; then
241 for feature in ${KERNEL_FEATURES}; do
242 feature_found=f
243 for d in $includes; do
244 path_to_check=$(echo $d | sed 's/^-I//')
245 if [ "$feature_found" = "f" ] && [ -e "$path_to_check/$feature" ]; then
246 feature_found=t
247 fi
248 done
249 if [ "$feature_found" = "f" ]; then
250 if [ -n "${KERNEL_DANGLING_FEATURES_WARN_ONLY}" ]; then
251 bbwarn "Feature '$feature' not found, but KERNEL_DANGLING_FEATURES_WARN_ONLY is set"
252 bbwarn "This may cause runtime issues, dropping feature and allowing configuration to continue"
253 else
254 bberror "Feature '$feature' not found, this will cause configuration failures."
255 bberror "Check the SRC_URI for meta-data repositories or directories that may be missing"
256 bbfatal_log "Set KERNEL_DANGLING_FEATURES_WARN_ONLY to ignore this issue"
257 fi
258 else
259 KERNEL_FEATURES_FINAL="$KERNEL_FEATURES_FINAL $feature"
260 fi
261 done
262 fi
263
264 if [ "$mode" = "config" ]; then
265 # run1: pull all the configuration fragments, no matter where they come from
266 elements="`echo -n ${bsp_definition} $sccs_defconfig ${sccs} ${patches} $KERNEL_FEATURES_FINAL`"
267 if [ -n "${elements}" ]; then
268 echo "${bsp_definition}" > ${S}/${meta_dir}/bsp_definition
269 scc --force -o ${S}/${meta_dir}:cfg,merge,meta ${includes} $sccs_defconfig $bsp_definition $sccs $patches $KERNEL_FEATURES_FINAL
270 if [ $? -ne 0 ]; then
271 bbfatal_log "Could not generate configuration queue for ${KMACHINE}."
272 fi
273 fi
274 fi
275
276 # if KMETA_EXTERNAL_BSPS has been set, or it has been detected from
277 # the bsp definition, then we inject the bsp_definition into the
278 # patch phase below. we'll piggy back on the sccs variable.
279 if [ -n "${KMETA_EXTERNAL_BSPS}" ]; then
280 sccs="${bsp_definition} ${sccs}"
281 fi
282
283 if [ "$mode" = "patch" ]; then
284 # run2: only generate patches for elements that have been passed on the SRC_URI
285 elements="`echo -n ${sccs} ${patches} $KERNEL_FEATURES_FINAL`"
286 if [ -n "${elements}" ]; then
287 scc --force -o ${S}/${meta_dir}:patch --cmds patch ${includes} ${sccs} ${patches} $KERNEL_FEATURES_FINAL
288 if [ $? -ne 0 ]; then
289 bbfatal_log "Could not generate configuration queue for ${KMACHINE}."
290 fi
291 fi
292 fi
293}
294
295do_patch() {
296 set +e
297 cd ${S}
298
299 check_git_config
300 meta_dir=$(kgit --meta)
301 (cd ${meta_dir}; ln -sf patch.queue series)
302 if [ -f "${meta_dir}/series" ]; then
303 kgit-s2q --gen -v --patches .kernel-meta/
304 if [ $? -ne 0 ]; then
305 bberror "Could not apply patches for ${KMACHINE}."
306 bbfatal_log "Patch failures can be resolved in the linux source directory ${S})"
307 fi
308 fi
309
310 if [ -f "${meta_dir}/merge.queue" ]; then
311 # we need to merge all these branches
312 for b in $(cat ${meta_dir}/merge.queue); do
313 git show-ref --verify --quiet refs/heads/${b}
314 if [ $? -eq 0 ]; then
315 bbnote "Merging branch ${b}"
316 git merge -q --no-ff -m "Merge branch ${b}" ${b}
317 else
318 bbfatal "branch ${b} does not exist, cannot merge"
319 fi
320 done
321 fi
322}
323
324do_kernel_checkout() {
325 set +e
326
327 source_dir=`echo ${S} | sed 's%/$%%'`
328 source_workdir="${WORKDIR}/git"
329 if [ -d "${WORKDIR}/git/" ]; then
330 # case: git repository
331 # if S is WORKDIR/git, then we shouldn't be moving or deleting the tree.
332 if [ "${source_dir}" != "${source_workdir}" ]; then
333 if [ -d "${source_workdir}/.git" ]; then
334 # regular git repository with .git
335 rm -rf ${S}
336 mv ${WORKDIR}/git ${S}
337 else
338 # create source for bare cloned git repository
339 git clone ${WORKDIR}/git ${S}
340 rm -rf ${WORKDIR}/git
341 fi
342 fi
343 cd ${S}
344 else
345 # case: we have no git repository at all.
346 # To support low bandwidth options for building the kernel, we'll just
347 # convert the tree to a git repo and let the rest of the process work unchanged
348
349 # if ${S} hasn't been set to the proper subdirectory a default of "linux" is
350 # used, but we can't initialize that empty directory. So check it and throw a
351 # clear error
352
353 cd ${S}
354 if [ ! -f "Makefile" ]; then
355 bberror "S is not set to the linux source directory. Check "
356 bbfatal "the recipe and set S to the proper extracted subdirectory"
357 fi
358 rm -f .gitignore
359 git init
360 check_git_config
361 git add .
362 git commit -q -m "baseline commit: creating repo for ${PN}-${PV}"
363 git clean -d -f
364 fi
365
366 # convert any remote branches to local tracking ones
367 for i in `git branch -a --no-color | grep remotes | grep -v HEAD`; do
368 b=`echo $i | cut -d' ' -f2 | sed 's%remotes/origin/%%'`;
369 git show-ref --quiet --verify -- "refs/heads/$b"
370 if [ $? -ne 0 ]; then
371 git branch $b $i > /dev/null
372 fi
373 done
374
375 # Create a working tree copy of the kernel by checking out a branch
376 machine_branch="${@ get_machine_branch(d, "${KBRANCH}" )}"
377
378 # checkout and clobber any unimportant files
379 git checkout -f ${machine_branch}
380}
381do_kernel_checkout[dirs] = "${S}"
382
383addtask kernel_checkout before do_kernel_metadata after do_symlink_kernsrc
384addtask kernel_metadata after do_validate_branches do_unpack before do_patch
385do_kernel_metadata[depends] = "kern-tools-native:do_populate_sysroot"
386do_kernel_metadata[file-checksums] = " ${@get_dirs_with_fragments(d)}"
387do_validate_branches[depends] = "kern-tools-native:do_populate_sysroot"
388
389do_kernel_configme[depends] += "virtual/${TARGET_PREFIX}binutils:do_populate_sysroot"
390do_kernel_configme[depends] += "virtual/${TARGET_PREFIX}gcc:do_populate_sysroot"
391do_kernel_configme[depends] += "bc-native:do_populate_sysroot bison-native:do_populate_sysroot"
392do_kernel_configme[depends] += "kern-tools-native:do_populate_sysroot"
393do_kernel_configme[dirs] += "${S} ${B}"
394do_kernel_configme() {
395 do_kernel_metadata config
396
397 # translate the kconfig_mode into something that merge_config.sh
398 # understands
399 case ${KCONFIG_MODE} in
400 *allnoconfig)
401 config_flags="-n"
402 ;;
403 *alldefconfig)
404 config_flags=""
405 ;;
406 *)
407 if [ -f ${WORKDIR}/defconfig ]; then
408 config_flags="-n"
409 fi
410 ;;
411 esac
412
413 cd ${S}
414
415 meta_dir=$(kgit --meta)
416 configs="$(scc --configs -o ${meta_dir})"
417 if [ $? -ne 0 ]; then
418 bberror "${configs}"
419 bbfatal_log "Could not find configuration queue (${meta_dir}/config.queue)"
420 fi
421
422 CFLAGS="${CFLAGS} ${TOOLCHAIN_OPTIONS}" HOSTCC="${BUILD_CC} ${BUILD_CFLAGS} ${BUILD_LDFLAGS}" HOSTCPP="${BUILD_CPP}" CC="${KERNEL_CC}" LD="${KERNEL_LD}" ARCH=${ARCH} merge_config.sh -O ${B} ${config_flags} ${configs} > ${meta_dir}/cfg/merge_config_build.log 2>&1
423 if [ $? -ne 0 -o ! -f ${B}/.config ]; then
424 bberror "Could not generate a .config for ${KMACHINE}-${LINUX_KERNEL_TYPE}"
425 if [ ${KCONF_AUDIT_LEVEL} -gt 1 ]; then
426 bbfatal_log "`cat ${meta_dir}/cfg/merge_config_build.log`"
427 else
428 bbfatal_log "Details can be found at: ${S}/${meta_dir}/cfg/merge_config_build.log"
429 fi
430 fi
431
432 if [ ! -z "${LINUX_VERSION_EXTENSION}" ]; then
433 echo "# Global settings from linux recipe" >> ${B}/.config
434 echo "CONFIG_LOCALVERSION="\"${LINUX_VERSION_EXTENSION}\" >> ${B}/.config
435 fi
436}
437
438addtask kernel_configme before do_configure after do_patch
439addtask config_analysis
440
441do_config_analysis[depends] = "virtual/kernel:do_configure"
442do_config_analysis[depends] += "kern-tools-native:do_populate_sysroot"
443
444CONFIG_AUDIT_FILE ?= "${WORKDIR}/config-audit.txt"
445CONFIG_ANALYSIS_FILE ?= "${WORKDIR}/config-analysis.txt"
446
447python do_config_analysis() {
448 import re, string, sys, subprocess
449
450 s = d.getVar('S')
451
452 env = os.environ.copy()
453 env['PATH'] = "%s:%s%s" % (d.getVar('PATH'), s, "/scripts/util/")
454 env['LD'] = d.getVar('KERNEL_LD')
455 env['CC'] = d.getVar('KERNEL_CC')
456 env['ARCH'] = d.getVar('ARCH')
457 env['srctree'] = s
458
459 # read specific symbols from the kernel recipe or from local.conf
460 # i.e.: CONFIG_ANALYSIS_pn-linux-yocto-dev = 'NF_CONNTRACK LOCALVERSION'
461 config = d.getVar( 'CONFIG_ANALYSIS' )
462 if not config:
463 config = [ "" ]
464 else:
465 config = config.split()
466
467 for c in config:
468 for action in ["analysis","audit"]:
469 if action == "analysis":
470 try:
471 analysis = subprocess.check_output(['symbol_why.py', '--dotconfig', '{}'.format( d.getVar('B') + '/.config' ), '--blame', c], cwd=s, env=env ).decode('utf-8')
472 except subprocess.CalledProcessError as e:
473 bb.fatal( "config analysis failed: %s" % e.output.decode('utf-8'))
474
475 outfile = d.getVar( 'CONFIG_ANALYSIS_FILE' )
476
477 if action == "audit":
478 try:
479 analysis = subprocess.check_output(['symbol_why.py', '--dotconfig', '{}'.format( d.getVar('B') + '/.config' ), '--summary', '--extended', '--sanity', c], cwd=s, env=env ).decode('utf-8')
480 except subprocess.CalledProcessError as e:
481 bb.fatal( "config analysis failed: %s" % e.output.decode('utf-8'))
482
483 outfile = d.getVar( 'CONFIG_AUDIT_FILE' )
484
485 if c:
486 outdir = os.path.dirname( outfile )
487 outname = os.path.basename( outfile )
488 outfile = outdir + '/'+ c + '-' + outname
489
490 if config and os.path.isfile(outfile):
491 os.remove(outfile)
492
493 with open(outfile, 'w+') as f:
494 f.write( analysis )
495
496 bb.warn( "Configuration {} executed, see: {} for details".format(action,outfile ))
497 if c:
498 bb.warn( analysis )
499}
500
501python do_kernel_configcheck() {
502 import re, string, sys, subprocess
503
504 # if KMETA isn't set globally by a recipe using this routine, we need to
505 # set the default to 'meta'. Otherwise, kconf_check is not passed a valid
506 # meta-series for processing
507 kmeta = d.getVar("KMETA") or "meta"
508 if not os.path.exists(kmeta):
509 kmeta = subprocess.check_output(['kgit', '--meta'], cwd=d.getVar('S')).decode('utf-8').rstrip()
510
511 s = d.getVar('S')
512
513 env = os.environ.copy()
514 env['PATH'] = "%s:%s%s" % (d.getVar('PATH'), s, "/scripts/util/")
515 env['LD'] = d.getVar('KERNEL_LD')
516 env['CC'] = d.getVar('KERNEL_CC')
517 env['ARCH'] = d.getVar('ARCH')
518 env['srctree'] = s
519
520 try:
521 configs = subprocess.check_output(['scc', '--configs', '-o', s + '/.kernel-meta'], env=env).decode('utf-8')
522 except subprocess.CalledProcessError as e:
523 bb.fatal( "Cannot gather config fragments for audit: %s" % e.output.decode("utf-8") )
524
525 config_check_visibility = int(d.getVar("KCONF_AUDIT_LEVEL") or 0)
526 bsp_check_visibility = int(d.getVar("KCONF_BSP_AUDIT_LEVEL") or 0)
527 kmeta_audit_werror = d.getVar("KMETA_AUDIT_WERROR") or ""
528 warnings_detected = False
529
530 # if config check visibility is "1", that's the lowest level of audit. So
531 # we add the --classify option to the run, since classification will
532 # streamline the output to only report options that could be boot issues,
533 # or are otherwise required for proper operation.
534 extra_params = ""
535 if config_check_visibility == 1:
536 extra_params = "--classify"
537
538 # category #1: mismatches
539 try:
540 analysis = subprocess.check_output(['symbol_why.py', '--dotconfig', '{}'.format( d.getVar('B') + '/.config' ), '--mismatches', extra_params], cwd=s, env=env ).decode('utf-8')
541 except subprocess.CalledProcessError as e:
542 bb.fatal( "config analysis failed: %s" % e.output.decode('utf-8'))
543
544 if analysis:
545 outfile = "{}/{}/cfg/mismatch.txt".format( s, kmeta )
546 if os.path.isfile(outfile):
547 os.remove(outfile)
548 with open(outfile, 'w+') as f:
549 f.write( analysis )
550
551 if config_check_visibility and os.stat(outfile).st_size > 0:
552 with open (outfile, "r") as myfile:
553 results = myfile.read()
554 bb.warn( "[kernel config]: specified values did not make it into the kernel's final configuration:\n\n%s" % results)
555 warnings_detected = True
556
557 # category #2: invalid fragment elements
558 extra_params = ""
559 if bsp_check_visibility > 1:
560 extra_params = "--strict"
561 try:
562 analysis = subprocess.check_output(['symbol_why.py', '--dotconfig', '{}'.format( d.getVar('B') + '/.config' ), '--invalid', extra_params], cwd=s, env=env ).decode('utf-8')
563 except subprocess.CalledProcessError as e:
564 bb.fatal( "config analysis failed: %s" % e.output.decode('utf-8'))
565
566 if analysis:
567 outfile = "{}/{}/cfg/invalid.txt".format(s,kmeta)
568 if os.path.isfile(outfile):
569 os.remove(outfile)
570 with open(outfile, 'w+') as f:
571 f.write( analysis )
572
573 if bsp_check_visibility and os.stat(outfile).st_size > 0:
574 with open (outfile, "r") as myfile:
575 results = myfile.read()
576 bb.warn( "[kernel config]: This BSP contains fragments with warnings:\n\n%s" % results)
577 warnings_detected = True
578
579 # category #3: redefined options (this is pretty verbose and is debug only)
580 try:
581 analysis = subprocess.check_output(['symbol_why.py', '--dotconfig', '{}'.format( d.getVar('B') + '/.config' ), '--sanity'], cwd=s, env=env ).decode('utf-8')
582 except subprocess.CalledProcessError as e:
583 bb.fatal( "config analysis failed: %s" % e.output.decode('utf-8'))
584
585 if analysis:
586 outfile = "{}/{}/cfg/redefinition.txt".format(s,kmeta)
587 if os.path.isfile(outfile):
588 os.remove(outfile)
589 with open(outfile, 'w+') as f:
590 f.write( analysis )
591
592 # if the audit level is greater than two, we report if a fragment has overriden
593 # a value from a base fragment. This is really only used for new kernel introduction
594 if bsp_check_visibility > 2 and os.stat(outfile).st_size > 0:
595 with open (outfile, "r") as myfile:
596 results = myfile.read()
597 bb.warn( "[kernel config]: This BSP has configuration options defined in more than one config, with differing values:\n\n%s" % results)
598 warnings_detected = True
599
600 if warnings_detected and kmeta_audit_werror:
601 bb.fatal( "configuration warnings detected, werror is set, promoting to fatal" )
602}
603
604# Ensure that the branches (BSP and meta) are on the locations specified by
605# their SRCREV values. If they are NOT on the right commits, the branches
606# are corrected to the proper commit.
607do_validate_branches() {
608 set +e
609 cd ${S}
610
611 machine_branch="${@ get_machine_branch(d, "${KBRANCH}" )}"
612 machine_srcrev="${SRCREV_machine}"
613
614 # if SRCREV is AUTOREV it shows up as AUTOINC there's nothing to
615 # check and we can exit early
616 if [ "${machine_srcrev}" = "AUTOINC" ]; then
617 bbnote "SRCREV validation is not required for AUTOREV"
618 elif [ "${machine_srcrev}" = "" ]; then
619 if [ "${SRCREV}" != "AUTOINC" ] && [ "${SRCREV}" != "INVALID" ]; then
620 # SRCREV_machine_<MACHINE> was not set. This means that a custom recipe
621 # that doesn't use the SRCREV_FORMAT "machine_meta" is being built. In
622 # this case, we need to reset to the give SRCREV before heading to patching
623 bbnote "custom recipe is being built, forcing SRCREV to ${SRCREV}"
624 force_srcrev="${SRCREV}"
625 fi
626 else
627 git cat-file -t ${machine_srcrev} > /dev/null
628 if [ $? -ne 0 ]; then
629 bberror "${machine_srcrev} is not a valid commit ID."
630 bbfatal_log "The kernel source tree may be out of sync"
631 fi
632 force_srcrev=${machine_srcrev}
633 fi
634
635 git checkout -q -f ${machine_branch}
636 if [ -n "${force_srcrev}" ]; then
637 # see if the branch we are about to patch has been properly reset to the defined
638 # SRCREV .. if not, we reset it.
639 branch_head=`git rev-parse HEAD`
640 if [ "${force_srcrev}" != "${branch_head}" ]; then
641 current_branch=`git rev-parse --abbrev-ref HEAD`
642 git branch "$current_branch-orig"
643 git reset --hard ${force_srcrev}
644 # We've checked out HEAD, make sure we cleanup kgit-s2q fence post check
645 # so the patches are applied as expected otherwise no patching
646 # would be done in some corner cases.
647 kgit-s2q --clean
648 fi
649 fi
650}
651
652OE_TERMINAL_EXPORTS += "KBUILD_OUTPUT"
653KBUILD_OUTPUT = "${B}"
654
655python () {
656 # If diffconfig is available, ensure it runs after kernel_configme
657 if 'do_diffconfig' in d:
658 bb.build.addtask('do_diffconfig', None, 'do_kernel_configme', d)
659
660 externalsrc = d.getVar('EXTERNALSRC')
661 if externalsrc:
662 # If we deltask do_patch, do_kernel_configme is left without
663 # dependencies and runs too early
664 d.setVarFlag('do_kernel_configme', 'deps', (d.getVarFlag('do_kernel_configme', 'deps', False) or []) + ['do_unpack'])
665}
666
667# extra tasks
668addtask kernel_version_sanity_check after do_kernel_metadata do_kernel_checkout before do_compile
669addtask validate_branches before do_patch after do_kernel_checkout
670addtask kernel_configcheck after do_configure before do_compile
diff --git a/meta/classes/kernel.bbclass b/meta/classes/kernel.bbclass
deleted file mode 100644
index 8693ab86be..0000000000
--- a/meta/classes/kernel.bbclass
+++ /dev/null
@@ -1,782 +0,0 @@
1inherit linux-kernel-base kernel-module-split
2
3COMPATIBLE_HOST = ".*-linux"
4
5KERNEL_PACKAGE_NAME ??= "kernel"
6KERNEL_DEPLOYSUBDIR ??= "${@ "" if (d.getVar("KERNEL_PACKAGE_NAME") == "kernel") else d.getVar("KERNEL_PACKAGE_NAME") }"
7
8PROVIDES += "${@ "virtual/kernel" if (d.getVar("KERNEL_PACKAGE_NAME") == "kernel") else "" }"
9DEPENDS += "virtual/${TARGET_PREFIX}binutils virtual/${TARGET_PREFIX}gcc kmod-native bc-native bison-native"
10DEPENDS += "${@bb.utils.contains("INITRAMFS_FSTYPES", "cpio.lzo", "lzop-native", "", d)}"
11DEPENDS += "${@bb.utils.contains("INITRAMFS_FSTYPES", "cpio.lz4", "lz4-native", "", d)}"
12PACKAGE_WRITE_DEPS += "depmodwrapper-cross"
13
14do_deploy[depends] += "depmodwrapper-cross:do_populate_sysroot gzip-native:do_populate_sysroot"
15do_clean[depends] += "make-mod-scripts:do_clean"
16
17CVE_PRODUCT ?= "linux_kernel"
18
19S = "${STAGING_KERNEL_DIR}"
20B = "${WORKDIR}/build"
21KBUILD_OUTPUT = "${B}"
22OE_TERMINAL_EXPORTS += "KBUILD_OUTPUT"
23
24# we include gcc above, we dont need virtual/libc
25INHIBIT_DEFAULT_DEPS = "1"
26
27KERNEL_IMAGETYPE ?= "zImage"
28INITRAMFS_IMAGE ?= ""
29INITRAMFS_IMAGE_NAME ?= "${@['${INITRAMFS_IMAGE}-${MACHINE}', ''][d.getVar('INITRAMFS_IMAGE') == '']}"
30INITRAMFS_TASK ?= ""
31INITRAMFS_IMAGE_BUNDLE ?= ""
32
33# KERNEL_VERSION is extracted from source code. It is evaluated as
34# None for the first parsing, since the code has not been fetched.
35# After the code is fetched, it will be evaluated as real version
36# number and cause kernel to be rebuilt. To avoid this, make
37# KERNEL_VERSION_NAME and KERNEL_VERSION_PKG_NAME depend on
38# LINUX_VERSION which is a constant.
39KERNEL_VERSION_NAME = "${@d.getVar('KERNEL_VERSION') or ""}"
40KERNEL_VERSION_NAME[vardepvalue] = "${LINUX_VERSION}"
41KERNEL_VERSION_PKG_NAME = "${@legitimize_package_name(d.getVar('KERNEL_VERSION'))}"
42KERNEL_VERSION_PKG_NAME[vardepvalue] = "${LINUX_VERSION}"
43
44python __anonymous () {
45 pn = d.getVar("PN")
46 kpn = d.getVar("KERNEL_PACKAGE_NAME")
47
48 # XXX Remove this after bug 11905 is resolved
49 # FILES_${KERNEL_PACKAGE_NAME}-dev doesn't expand correctly
50 if kpn == pn:
51 bb.warn("Some packages (E.g. *-dev) might be missing due to "
52 "bug 11905 (variable KERNEL_PACKAGE_NAME == PN)")
53
54 # The default kernel recipe builds in a shared location defined by
55 # bitbake/distro confs: STAGING_KERNEL_DIR and STAGING_KERNEL_BUILDDIR.
56 # Set these variables to directories under ${WORKDIR} in alternate
57 # kernel recipes (I.e. where KERNEL_PACKAGE_NAME != kernel) so that they
58 # may build in parallel with the default kernel without clobbering.
59 if kpn != "kernel":
60 workdir = d.getVar("WORKDIR")
61 sourceDir = os.path.join(workdir, 'kernel-source')
62 artifactsDir = os.path.join(workdir, 'kernel-build-artifacts')
63 d.setVar("STAGING_KERNEL_DIR", sourceDir)
64 d.setVar("STAGING_KERNEL_BUILDDIR", artifactsDir)
65
66 # Merge KERNEL_IMAGETYPE and KERNEL_ALT_IMAGETYPE into KERNEL_IMAGETYPES
67 type = d.getVar('KERNEL_IMAGETYPE') or ""
68 alttype = d.getVar('KERNEL_ALT_IMAGETYPE') or ""
69 types = d.getVar('KERNEL_IMAGETYPES') or ""
70 if type not in types.split():
71 types = (type + ' ' + types).strip()
72 if alttype not in types.split():
73 types = (alttype + ' ' + types).strip()
74 d.setVar('KERNEL_IMAGETYPES', types)
75
76 # KERNEL_IMAGETYPES may contain a mixture of image types supported directly
77 # by the kernel build system and types which are created by post-processing
78 # the output of the kernel build system (e.g. compressing vmlinux ->
79 # vmlinux.gz in kernel_do_compile()).
80 # KERNEL_IMAGETYPE_FOR_MAKE should contain only image types supported
81 # directly by the kernel build system.
82 if not d.getVar('KERNEL_IMAGETYPE_FOR_MAKE'):
83 typeformake = set()
84 for type in types.split():
85 if type == 'vmlinux.gz':
86 type = 'vmlinux'
87 typeformake.add(type)
88
89 d.setVar('KERNEL_IMAGETYPE_FOR_MAKE', ' '.join(sorted(typeformake)))
90
91 kname = d.getVar('KERNEL_PACKAGE_NAME') or "kernel"
92 imagedest = d.getVar('KERNEL_IMAGEDEST')
93
94 for type in types.split():
95 typelower = type.lower()
96 d.appendVar('PACKAGES', ' %s-image-%s' % (kname, typelower))
97 d.setVar('FILES_' + kname + '-image-' + typelower, '/' + imagedest + '/' + type + '-${KERNEL_VERSION_NAME}' + ' /' + imagedest + '/' + type)
98 d.appendVar('RDEPENDS_%s-image' % kname, ' %s-image-%s' % (kname, typelower))
99 d.setVar('PKG_%s-image-%s' % (kname,typelower), '%s-image-%s-${KERNEL_VERSION_PKG_NAME}' % (kname, typelower))
100 d.setVar('ALLOW_EMPTY_%s-image-%s' % (kname, typelower), '1')
101 d.setVar('pkg_postinst_%s-image-%s' % (kname,typelower), """set +e
102if [ -n "$D" ]; then
103 ln -sf %s-${KERNEL_VERSION} $D/${KERNEL_IMAGEDEST}/%s > /dev/null 2>&1
104else
105 ln -sf %s-${KERNEL_VERSION} ${KERNEL_IMAGEDEST}/%s > /dev/null 2>&1
106 if [ $? -ne 0 ]; then
107 echo "Filesystem on ${KERNEL_IMAGEDEST}/ doesn't support symlinks, falling back to copied image (%s)."
108 install -m 0644 ${KERNEL_IMAGEDEST}/%s-${KERNEL_VERSION} ${KERNEL_IMAGEDEST}/%s
109 fi
110fi
111set -e
112""" % (type, type, type, type, type, type, type))
113 d.setVar('pkg_postrm_%s-image-%s' % (kname,typelower), """set +e
114if [ -f "${KERNEL_IMAGEDEST}/%s" -o -L "${KERNEL_IMAGEDEST}/%s" ]; then
115 rm -f ${KERNEL_IMAGEDEST}/%s > /dev/null 2>&1
116fi
117set -e
118""" % (type, type, type))
119
120
121 image = d.getVar('INITRAMFS_IMAGE')
122 # If the INTIRAMFS_IMAGE is set but the INITRAMFS_IMAGE_BUNDLE is set to 0,
123 # the do_bundle_initramfs does nothing, but the INITRAMFS_IMAGE is built
124 # standalone for use by wic and other tools.
125 if image:
126 d.appendVarFlag('do_bundle_initramfs', 'depends', ' ${INITRAMFS_IMAGE}:do_image_complete')
127
128 # NOTE: setting INITRAMFS_TASK is for backward compatibility
129 # The preferred method is to set INITRAMFS_IMAGE, because
130 # this INITRAMFS_TASK has circular dependency problems
131 # if the initramfs requires kernel modules
132 image_task = d.getVar('INITRAMFS_TASK')
133 if image_task:
134 d.appendVarFlag('do_configure', 'depends', ' ${INITRAMFS_TASK}')
135}
136
137# Here we pull in all various kernel image types which we support.
138#
139# In case you're wondering why kernel.bbclass inherits the other image
140# types instead of the other way around, the reason for that is to
141# maintain compatibility with various currently existing meta-layers.
142# By pulling in the various kernel image types here, we retain the
143# original behavior of kernel.bbclass, so no meta-layers should get
144# broken.
145#
146# KERNEL_CLASSES by default pulls in kernel-uimage.bbclass, since this
147# used to be the default behavior when only uImage was supported. This
148# variable can be appended by users who implement support for new kernel
149# image types.
150
151KERNEL_CLASSES ?= " kernel-uimage "
152inherit ${KERNEL_CLASSES}
153
154# Old style kernels may set ${S} = ${WORKDIR}/git for example
155# We need to move these over to STAGING_KERNEL_DIR. We can't just
156# create the symlink in advance as the git fetcher can't cope with
157# the symlink.
158do_unpack[cleandirs] += " ${S} ${STAGING_KERNEL_DIR} ${B} ${STAGING_KERNEL_BUILDDIR}"
159do_clean[cleandirs] += " ${S} ${STAGING_KERNEL_DIR} ${B} ${STAGING_KERNEL_BUILDDIR}"
160python do_symlink_kernsrc () {
161 s = d.getVar("S")
162 if s[-1] == '/':
163 # drop trailing slash, so that os.symlink(kernsrc, s) doesn't use s as directory name and fail
164 s=s[:-1]
165 kernsrc = d.getVar("STAGING_KERNEL_DIR")
166 if s != kernsrc:
167 bb.utils.mkdirhier(kernsrc)
168 bb.utils.remove(kernsrc, recurse=True)
169 if d.getVar("EXTERNALSRC"):
170 # With EXTERNALSRC S will not be wiped so we can symlink to it
171 os.symlink(s, kernsrc)
172 else:
173 import shutil
174 shutil.move(s, kernsrc)
175 os.symlink(kernsrc, s)
176}
177# do_patch is normally ordered before do_configure, but
178# externalsrc.bbclass deletes do_patch, breaking the dependency of
179# do_configure on do_symlink_kernsrc.
180addtask symlink_kernsrc before do_patch do_configure after do_unpack
181
182inherit kernel-arch deploy
183
184PACKAGES_DYNAMIC += "^${KERNEL_PACKAGE_NAME}-module-.*"
185PACKAGES_DYNAMIC += "^${KERNEL_PACKAGE_NAME}-image-.*"
186PACKAGES_DYNAMIC += "^${KERNEL_PACKAGE_NAME}-firmware-.*"
187
188export OS = "${TARGET_OS}"
189export CROSS_COMPILE = "${TARGET_PREFIX}"
190export KBUILD_BUILD_VERSION = "1"
191export KBUILD_BUILD_USER ?= "oe-user"
192export KBUILD_BUILD_HOST ?= "oe-host"
193
194KERNEL_RELEASE ?= "${KERNEL_VERSION}"
195
196# The directory where built kernel lies in the kernel tree
197KERNEL_OUTPUT_DIR ?= "arch/${ARCH}/boot"
198KERNEL_IMAGEDEST ?= "boot"
199
200#
201# configuration
202#
203export CMDLINE_CONSOLE = "console=${@d.getVar("KERNEL_CONSOLE") or "ttyS0"}"
204
205KERNEL_VERSION = "${@get_kernelversion_headers('${B}')}"
206
207KERNEL_LOCALVERSION ?= ""
208
209# kernels are generally machine specific
210PACKAGE_ARCH = "${MACHINE_ARCH}"
211
212# U-Boot support
213UBOOT_ENTRYPOINT ?= "20008000"
214UBOOT_LOADADDRESS ?= "${UBOOT_ENTRYPOINT}"
215
216# Some Linux kernel configurations need additional parameters on the command line
217KERNEL_EXTRA_ARGS ?= ""
218
219EXTRA_OEMAKE = " HOSTCC="${BUILD_CC} ${BUILD_CFLAGS} ${BUILD_LDFLAGS}" HOSTCPP="${BUILD_CPP}""
220EXTRA_OEMAKE += " HOSTCXX="${BUILD_CXX} ${BUILD_CXXFLAGS} ${BUILD_LDFLAGS}""
221
222KERNEL_ALT_IMAGETYPE ??= ""
223
224copy_initramfs() {
225 echo "Copying initramfs into ./usr ..."
226 # In case the directory is not created yet from the first pass compile:
227 mkdir -p ${B}/usr
228 # Find and use the first initramfs image archive type we find
229 rm -f ${B}/usr/${INITRAMFS_IMAGE_NAME}.cpio
230 for img in cpio cpio.gz cpio.lz4 cpio.lzo cpio.lzma cpio.xz; do
231 if [ -e "${DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE_NAME}.$img" ]; then
232 cp ${DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE_NAME}.$img ${B}/usr/.
233 case $img in
234 *gz)
235 echo "gzip decompressing image"
236 gunzip -f ${B}/usr/${INITRAMFS_IMAGE_NAME}.$img
237 break
238 ;;
239 *lz4)
240 echo "lz4 decompressing image"
241 lz4 -df ${B}/usr/${INITRAMFS_IMAGE_NAME}.$img ${B}/usr/${INITRAMFS_IMAGE_NAME}.cpio
242 break
243 ;;
244 *lzo)
245 echo "lzo decompressing image"
246 lzop -df ${B}/usr/${INITRAMFS_IMAGE_NAME}.$img
247 break
248 ;;
249 *lzma)
250 echo "lzma decompressing image"
251 lzma -df ${B}/usr/${INITRAMFS_IMAGE_NAME}.$img
252 break
253 ;;
254 *xz)
255 echo "xz decompressing image"
256 xz -df ${B}/usr/${INITRAMFS_IMAGE_NAME}.$img
257 break
258 ;;
259 esac
260 break
261 fi
262 done
263 # Verify that the above loop found a initramfs, fail otherwise
264 [ -f ${B}/usr/${INITRAMFS_IMAGE_NAME}.cpio ] && echo "Finished copy of initramfs into ./usr" || die "Could not find any ${DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE_NAME}.cpio{.gz|.lz4|.lzo|.lzma|.xz) for bundling; INITRAMFS_IMAGE_NAME might be wrong."
265}
266
267do_bundle_initramfs () {
268 if [ ! -z "${INITRAMFS_IMAGE}" -a x"${INITRAMFS_IMAGE_BUNDLE}" = x1 ]; then
269 echo "Creating a kernel image with a bundled initramfs..."
270 copy_initramfs
271 # Backing up kernel image relies on its type(regular file or symbolic link)
272 tmp_path=""
273 for imageType in ${KERNEL_IMAGETYPE_FOR_MAKE} ; do
274 if [ -h ${KERNEL_OUTPUT_DIR}/$imageType ] ; then
275 linkpath=`readlink -n ${KERNEL_OUTPUT_DIR}/$imageType`
276 realpath=`readlink -fn ${KERNEL_OUTPUT_DIR}/$imageType`
277 mv -f $realpath $realpath.bak
278 tmp_path=$tmp_path" "$imageType"#"$linkpath"#"$realpath
279 elif [ -f ${KERNEL_OUTPUT_DIR}/$imageType ]; then
280 mv -f ${KERNEL_OUTPUT_DIR}/$imageType ${KERNEL_OUTPUT_DIR}/$imageType.bak
281 tmp_path=$tmp_path" "$imageType"##"
282 fi
283 done
284 use_alternate_initrd=CONFIG_INITRAMFS_SOURCE=${B}/usr/${INITRAMFS_IMAGE_NAME}.cpio
285 kernel_do_compile
286 # Restoring kernel image
287 for tp in $tmp_path ; do
288 imageType=`echo $tp|cut -d "#" -f 1`
289 linkpath=`echo $tp|cut -d "#" -f 2`
290 realpath=`echo $tp|cut -d "#" -f 3`
291 if [ -n "$realpath" ]; then
292 mv -f $realpath $realpath.initramfs
293 mv -f $realpath.bak $realpath
294 ln -sf $linkpath.initramfs ${B}/${KERNEL_OUTPUT_DIR}/$imageType.initramfs
295 else
296 mv -f ${KERNEL_OUTPUT_DIR}/$imageType ${KERNEL_OUTPUT_DIR}/$imageType.initramfs
297 mv -f ${KERNEL_OUTPUT_DIR}/$imageType.bak ${KERNEL_OUTPUT_DIR}/$imageType
298 fi
299 done
300 fi
301}
302do_bundle_initramfs[dirs] = "${B}"
303
304python do_devshell_prepend () {
305 os.environ["LDFLAGS"] = ''
306}
307
308addtask bundle_initramfs after do_install before do_deploy
309
310get_cc_option () {
311 # Check if KERNEL_CC supports the option "file-prefix-map".
312 # This option allows us to build images with __FILE__ values that do not
313 # contain the host build path.
314 if ${KERNEL_CC} -Q --help=joined | grep -q "\-ffile-prefix-map=<old=new>"; then
315 echo "-ffile-prefix-map=${S}=/kernel-source/"
316 fi
317}
318
319kernel_do_compile() {
320 unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS MACHINE
321 if [ "${BUILD_REPRODUCIBLE_BINARIES}" = "1" ]; then
322 # kernel sources do not use do_unpack, so SOURCE_DATE_EPOCH may not
323 # be set....
324 if [ "${SOURCE_DATE_EPOCH}" = "" -o "${SOURCE_DATE_EPOCH}" = "0" ]; then
325 # The source directory is not necessarily a git repository, so we
326 # specify the git-dir to ensure that git does not query a
327 # repository in any parent directory.
328 SOURCE_DATE_EPOCH=`git --git-dir="${S}/.git" log -1 --pretty=%ct 2>/dev/null || echo "${REPRODUCIBLE_TIMESTAMP_ROOTFS}"`
329 fi
330
331 ts=`LC_ALL=C date -d @$SOURCE_DATE_EPOCH`
332 export KBUILD_BUILD_TIMESTAMP="$ts"
333 export KCONFIG_NOTIMESTAMP=1
334 bbnote "KBUILD_BUILD_TIMESTAMP: $ts"
335 fi
336 # The $use_alternate_initrd is only set from
337 # do_bundle_initramfs() This variable is specifically for the
338 # case where we are making a second pass at the kernel
339 # compilation and we want to force the kernel build to use a
340 # different initramfs image. The way to do that in the kernel
341 # is to specify:
342 # make ...args... CONFIG_INITRAMFS_SOURCE=some_other_initramfs.cpio
343 if [ "$use_alternate_initrd" = "" ] && [ "${INITRAMFS_TASK}" != "" ] ; then
344 # The old style way of copying an prebuilt image and building it
345 # is turned on via INTIRAMFS_TASK != ""
346 copy_initramfs
347 use_alternate_initrd=CONFIG_INITRAMFS_SOURCE=${B}/usr/${INITRAMFS_IMAGE_NAME}.cpio
348 fi
349 cc_extra=$(get_cc_option)
350 for typeformake in ${KERNEL_IMAGETYPE_FOR_MAKE} ; do
351 oe_runmake ${typeformake} CC="${KERNEL_CC} $cc_extra " LD="${KERNEL_LD}" ${KERNEL_EXTRA_ARGS} $use_alternate_initrd
352 done
353 # vmlinux.gz is not built by kernel
354 if (echo "${KERNEL_IMAGETYPES}" | grep -wq "vmlinux\.gz"); then
355 mkdir -p "${KERNEL_OUTPUT_DIR}"
356 gzip -9cn < ${B}/vmlinux > "${KERNEL_OUTPUT_DIR}/vmlinux.gz"
357 fi
358}
359
360do_compile_kernelmodules() {
361 unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS MACHINE
362 if [ "${BUILD_REPRODUCIBLE_BINARIES}" = "1" ]; then
363 # kernel sources do not use do_unpack, so SOURCE_DATE_EPOCH may not
364 # be set....
365 if [ "${SOURCE_DATE_EPOCH}" = "" -o "${SOURCE_DATE_EPOCH}" = "0" ]; then
366 # The source directory is not necessarily a git repository, so we
367 # specify the git-dir to ensure that git does not query a
368 # repository in any parent directory.
369 SOURCE_DATE_EPOCH=`git --git-dir="${S}/.git" log -1 --pretty=%ct 2>/dev/null || echo "${REPRODUCIBLE_TIMESTAMP_ROOTFS}"`
370 fi
371
372 ts=`LC_ALL=C date -d @$SOURCE_DATE_EPOCH`
373 export KBUILD_BUILD_TIMESTAMP="$ts"
374 export KCONFIG_NOTIMESTAMP=1
375 bbnote "KBUILD_BUILD_TIMESTAMP: $ts"
376 fi
377 if (grep -q -i -e '^CONFIG_MODULES=y$' ${B}/.config); then
378 cc_extra=$(get_cc_option)
379 oe_runmake -C ${B} ${PARALLEL_MAKE} modules CC="${KERNEL_CC} $cc_extra " LD="${KERNEL_LD}" ${KERNEL_EXTRA_ARGS}
380
381 # Module.symvers gets updated during the
382 # building of the kernel modules. We need to
383 # update this in the shared workdir since some
384 # external kernel modules has a dependency on
385 # other kernel modules and will look at this
386 # file to do symbol lookups
387 cp ${B}/Module.symvers ${STAGING_KERNEL_BUILDDIR}/
388 # 5.10+ kernels have module.lds that we need to copy for external module builds
389 if [ -e "${B}/scripts/module.lds" ]; then
390 install -Dm 0644 ${B}/scripts/module.lds ${STAGING_KERNEL_BUILDDIR}/scripts/module.lds
391 fi
392 else
393 bbnote "no modules to compile"
394 fi
395}
396addtask compile_kernelmodules after do_compile before do_strip
397
398kernel_do_install() {
399 #
400 # First install the modules
401 #
402 unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS MACHINE
403 if (grep -q -i -e '^CONFIG_MODULES=y$' .config); then
404 oe_runmake DEPMOD=echo MODLIB=${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION} INSTALL_FW_PATH=${D}${nonarch_base_libdir}/firmware modules_install
405 rm "${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION}/build"
406 rm "${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION}/source"
407 # If the kernel/ directory is empty remove it to prevent QA issues
408 rmdir --ignore-fail-on-non-empty "${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION}/kernel"
409 else
410 bbnote "no modules to install"
411 fi
412
413 #
414 # Install various kernel output (zImage, map file, config, module support files)
415 #
416 install -d ${D}/${KERNEL_IMAGEDEST}
417 install -d ${D}/boot
418
419 #
420 # When including an initramfs bundle inside a FIT image, the fitImage is created after the install task
421 # by do_assemble_fitimage_initramfs.
422 # This happens after the generation of the initramfs bundle (done by do_bundle_initramfs).
423 # So, at the level of the install task we should not try to install the fitImage. fitImage is still not
424 # generated yet.
425 # After the generation of the fitImage, the deploy task copies the fitImage from the build directory to
426 # the deploy folder.
427 #
428
429 for imageType in ${KERNEL_IMAGETYPES} ; do
430 if [ $imageType != "fitImage" ] || [ "${INITRAMFS_IMAGE_BUNDLE}" != "1" ] ; then
431 install -m 0644 ${KERNEL_OUTPUT_DIR}/$imageType ${D}/${KERNEL_IMAGEDEST}/$imageType-${KERNEL_VERSION}
432 fi
433 done
434
435 install -m 0644 System.map ${D}/boot/System.map-${KERNEL_VERSION}
436 install -m 0644 .config ${D}/boot/config-${KERNEL_VERSION}
437 install -m 0644 vmlinux ${D}/boot/vmlinux-${KERNEL_VERSION}
438 [ -e Module.symvers ] && install -m 0644 Module.symvers ${D}/boot/Module.symvers-${KERNEL_VERSION}
439 install -d ${D}${sysconfdir}/modules-load.d
440 install -d ${D}${sysconfdir}/modprobe.d
441}
442
443# Must be ran no earlier than after do_kernel_checkout or else Makefile won't be in ${S}/Makefile
444do_kernel_version_sanity_check() {
445 if [ "x${KERNEL_VERSION_SANITY_SKIP}" = "x1" ]; then
446 exit 0
447 fi
448
449 # The Makefile determines the kernel version shown at runtime
450 # Don't use KERNEL_VERSION because the headers it grabs the version from aren't generated until do_compile
451 VERSION=$(grep "^VERSION =" ${S}/Makefile | sed s/.*=\ *//)
452 PATCHLEVEL=$(grep "^PATCHLEVEL =" ${S}/Makefile | sed s/.*=\ *//)
453 SUBLEVEL=$(grep "^SUBLEVEL =" ${S}/Makefile | sed s/.*=\ *//)
454 EXTRAVERSION=$(grep "^EXTRAVERSION =" ${S}/Makefile | sed s/.*=\ *//)
455
456 # Build a string for regex and a plain version string
457 reg="^${VERSION}\.${PATCHLEVEL}"
458 vers="${VERSION}.${PATCHLEVEL}"
459 if [ -n "${SUBLEVEL}" ]; then
460 # Ignoring a SUBLEVEL of zero is fine
461 if [ "${SUBLEVEL}" = "0" ]; then
462 reg="${reg}(\.${SUBLEVEL})?"
463 else
464 reg="${reg}\.${SUBLEVEL}"
465 vers="${vers}.${SUBLEVEL}"
466 fi
467 fi
468 vers="${vers}${EXTRAVERSION}"
469 reg="${reg}${EXTRAVERSION}"
470
471 if [ -z `echo ${PV} | grep -E "${reg}"` ]; then
472 bbfatal "Package Version (${PV}) does not match of kernel being built (${vers}). Please update the PV variable to match the kernel source or set KERNEL_VERSION_SANITY_SKIP=\"1\" in your recipe."
473 fi
474 exit 0
475}
476
477addtask shared_workdir after do_compile before do_compile_kernelmodules
478addtask shared_workdir_setscene
479
480do_shared_workdir_setscene () {
481 exit 1
482}
483
484emit_depmod_pkgdata() {
485 # Stash data for depmod
486 install -d ${PKGDESTWORK}/${KERNEL_PACKAGE_NAME}-depmod/
487 echo "${KERNEL_VERSION}" > ${PKGDESTWORK}/${KERNEL_PACKAGE_NAME}-depmod/${KERNEL_PACKAGE_NAME}-abiversion
488 cp ${B}/System.map ${PKGDESTWORK}/${KERNEL_PACKAGE_NAME}-depmod/System.map-${KERNEL_VERSION}
489}
490
491PACKAGEFUNCS += "emit_depmod_pkgdata"
492
493do_shared_workdir[cleandirs] += " ${STAGING_KERNEL_BUILDDIR}"
494do_shared_workdir () {
495 cd ${B}
496
497 kerneldir=${STAGING_KERNEL_BUILDDIR}
498 install -d $kerneldir
499
500 #
501 # Store the kernel version in sysroots for module-base.bbclass
502 #
503
504 echo "${KERNEL_VERSION}" > $kerneldir/${KERNEL_PACKAGE_NAME}-abiversion
505
506 # Copy files required for module builds
507 cp System.map $kerneldir/System.map-${KERNEL_VERSION}
508 [ -e Module.symvers ] && cp Module.symvers $kerneldir/
509 cp .config $kerneldir/
510 mkdir -p $kerneldir/include/config
511 cp include/config/kernel.release $kerneldir/include/config/kernel.release
512 if [ -e certs/signing_key.x509 ]; then
513 # The signing_key.* files are stored in the certs/ dir in
514 # newer Linux kernels
515 mkdir -p $kerneldir/certs
516 cp certs/signing_key.* $kerneldir/certs/
517 elif [ -e signing_key.priv ]; then
518 cp signing_key.* $kerneldir/
519 fi
520
521 # We can also copy over all the generated files and avoid special cases
522 # like version.h, but we've opted to keep this small until file creep starts
523 # to happen
524 if [ -e include/linux/version.h ]; then
525 mkdir -p $kerneldir/include/linux
526 cp include/linux/version.h $kerneldir/include/linux/version.h
527 fi
528
529 # As of Linux kernel version 3.0.1, the clean target removes
530 # arch/powerpc/lib/crtsavres.o which is present in
531 # KBUILD_LDFLAGS_MODULE, making it required to build external modules.
532 if [ ${ARCH} = "powerpc" ]; then
533 if [ -e arch/powerpc/lib/crtsavres.o ]; then
534 mkdir -p $kerneldir/arch/powerpc/lib/
535 cp arch/powerpc/lib/crtsavres.o $kerneldir/arch/powerpc/lib/crtsavres.o
536 fi
537 fi
538
539 if [ -d include/generated ]; then
540 mkdir -p $kerneldir/include/generated/
541 cp -fR include/generated/* $kerneldir/include/generated/
542 fi
543
544 if [ -d arch/${ARCH}/include/generated ]; then
545 mkdir -p $kerneldir/arch/${ARCH}/include/generated/
546 cp -fR arch/${ARCH}/include/generated/* $kerneldir/arch/${ARCH}/include/generated/
547 fi
548
549 if (grep -q -i -e '^CONFIG_UNWINDER_ORC=y$' $kerneldir/.config); then
550 # With CONFIG_UNWINDER_ORC (the default in 4.14), objtool is required for
551 # out-of-tree modules to be able to generate object files.
552 if [ -x tools/objtool/objtool ]; then
553 mkdir -p ${kerneldir}/tools/objtool
554 cp tools/objtool/objtool ${kerneldir}/tools/objtool/
555 fi
556 fi
557}
558
559# We don't need to stage anything, not the modules/firmware since those would clash with linux-firmware
560sysroot_stage_all () {
561 :
562}
563
564KERNEL_CONFIG_COMMAND ?= "oe_runmake_call -C ${S} CC="${KERNEL_CC}" LD="${KERNEL_LD}" O=${B} olddefconfig || oe_runmake -C ${S} O=${B} CC="${KERNEL_CC}" LD="${KERNEL_LD}" oldnoconfig"
565
566python check_oldest_kernel() {
567 oldest_kernel = d.getVar('OLDEST_KERNEL')
568 kernel_version = d.getVar('KERNEL_VERSION')
569 tclibc = d.getVar('TCLIBC')
570 if tclibc == 'glibc':
571 kernel_version = kernel_version.split('-', 1)[0]
572 if oldest_kernel and kernel_version:
573 if bb.utils.vercmp_string(kernel_version, oldest_kernel) < 0:
574 bb.warn('%s: OLDEST_KERNEL is "%s" but the version of the kernel you are building is "%s" - therefore %s as built may not be compatible with this kernel. Either set OLDEST_KERNEL to an older version, or build a newer kernel.' % (d.getVar('PN'), oldest_kernel, kernel_version, tclibc))
575}
576
577check_oldest_kernel[vardepsexclude] += "OLDEST_KERNEL KERNEL_VERSION"
578do_configure[prefuncs] += "check_oldest_kernel"
579
580kernel_do_configure() {
581 # fixes extra + in /lib/modules/2.6.37+
582 # $ scripts/setlocalversion . => +
583 # $ make kernelversion => 2.6.37
584 # $ make kernelrelease => 2.6.37+
585 touch ${B}/.scmversion ${S}/.scmversion
586
587 if [ "${S}" != "${B}" ] && [ -f "${S}/.config" ] && [ ! -f "${B}/.config" ]; then
588 mv "${S}/.config" "${B}/.config"
589 fi
590
591 # Copy defconfig to .config if .config does not exist. This allows
592 # recipes to manage the .config themselves in do_configure_prepend().
593 if [ -f "${WORKDIR}/defconfig" ] && [ ! -f "${B}/.config" ]; then
594 cp "${WORKDIR}/defconfig" "${B}/.config"
595 fi
596
597 ${KERNEL_CONFIG_COMMAND}
598}
599
600do_savedefconfig() {
601 bbplain "Saving defconfig to:\n${B}/defconfig"
602 oe_runmake -C ${B} savedefconfig
603}
604do_savedefconfig[nostamp] = "1"
605addtask savedefconfig after do_configure
606
607inherit cml1
608
609KCONFIG_CONFIG_COMMAND_append = " LD='${KERNEL_LD}' HOSTLDFLAGS='${BUILD_LDFLAGS}'"
610
611EXPORT_FUNCTIONS do_compile do_install do_configure
612
613# kernel-base becomes kernel-${KERNEL_VERSION}
614# kernel-image becomes kernel-image-${KERNEL_VERSION}
615PACKAGES = "${KERNEL_PACKAGE_NAME} ${KERNEL_PACKAGE_NAME}-base ${KERNEL_PACKAGE_NAME}-vmlinux ${KERNEL_PACKAGE_NAME}-image ${KERNEL_PACKAGE_NAME}-dev ${KERNEL_PACKAGE_NAME}-modules"
616FILES_${PN} = ""
617FILES_${KERNEL_PACKAGE_NAME}-base = "${nonarch_base_libdir}/modules/${KERNEL_VERSION}/modules.order ${nonarch_base_libdir}/modules/${KERNEL_VERSION}/modules.builtin ${nonarch_base_libdir}/modules/${KERNEL_VERSION}/modules.builtin.modinfo"
618FILES_${KERNEL_PACKAGE_NAME}-image = ""
619FILES_${KERNEL_PACKAGE_NAME}-dev = "/boot/System.map* /boot/Module.symvers* /boot/config* ${KERNEL_SRC_PATH} ${nonarch_base_libdir}/modules/${KERNEL_VERSION}/build"
620FILES_${KERNEL_PACKAGE_NAME}-vmlinux = "/boot/vmlinux-${KERNEL_VERSION_NAME}"
621FILES_${KERNEL_PACKAGE_NAME}-modules = ""
622RDEPENDS_${KERNEL_PACKAGE_NAME} = "${KERNEL_PACKAGE_NAME}-base"
623# Allow machines to override this dependency if kernel image files are
624# not wanted in images as standard
625RDEPENDS_${KERNEL_PACKAGE_NAME}-base ?= "${KERNEL_PACKAGE_NAME}-image"
626PKG_${KERNEL_PACKAGE_NAME}-image = "${KERNEL_PACKAGE_NAME}-image-${@legitimize_package_name(d.getVar('KERNEL_VERSION'))}"
627RDEPENDS_${KERNEL_PACKAGE_NAME}-image += "${@oe.utils.conditional('KERNEL_IMAGETYPE', 'vmlinux', '${KERNEL_PACKAGE_NAME}-vmlinux', '', d)}"
628PKG_${KERNEL_PACKAGE_NAME}-base = "${KERNEL_PACKAGE_NAME}-${@legitimize_package_name(d.getVar('KERNEL_VERSION'))}"
629RPROVIDES_${KERNEL_PACKAGE_NAME}-base += "${KERNEL_PACKAGE_NAME}-${KERNEL_VERSION}"
630ALLOW_EMPTY_${KERNEL_PACKAGE_NAME} = "1"
631ALLOW_EMPTY_${KERNEL_PACKAGE_NAME}-base = "1"
632ALLOW_EMPTY_${KERNEL_PACKAGE_NAME}-image = "1"
633ALLOW_EMPTY_${KERNEL_PACKAGE_NAME}-modules = "1"
634DESCRIPTION_${KERNEL_PACKAGE_NAME}-modules = "Kernel modules meta package"
635
636pkg_postinst_${KERNEL_PACKAGE_NAME}-base () {
637 if [ ! -e "$D/lib/modules/${KERNEL_VERSION}" ]; then
638 mkdir -p $D/lib/modules/${KERNEL_VERSION}
639 fi
640 if [ -n "$D" ]; then
641 depmodwrapper -a -b $D ${KERNEL_VERSION}
642 else
643 depmod -a ${KERNEL_VERSION}
644 fi
645}
646
647PACKAGESPLITFUNCS_prepend = "split_kernel_packages "
648
649python split_kernel_packages () {
650 do_split_packages(d, root='${nonarch_base_libdir}/firmware', file_regex=r'^(.*)\.(bin|fw|cis|csp|dsp)$', output_pattern='${KERNEL_PACKAGE_NAME}-firmware-%s', description='Firmware for %s', recursive=True, extra_depends='')
651}
652
653# Many scripts want to look in arch/$arch/boot for the bootable
654# image. This poses a problem for vmlinux and vmlinuz based
655# booting. This task arranges to have vmlinux and vmlinuz appear
656# in the normalized directory location.
657do_kernel_link_images() {
658 if [ ! -d "${B}/arch/${ARCH}/boot" ]; then
659 mkdir ${B}/arch/${ARCH}/boot
660 fi
661 cd ${B}/arch/${ARCH}/boot
662 ln -sf ../../../vmlinux
663 if [ -f ../../../vmlinuz ]; then
664 ln -sf ../../../vmlinuz
665 fi
666 if [ -f ../../../vmlinuz.bin ]; then
667 ln -sf ../../../vmlinuz.bin
668 fi
669 if [ -f ../../../vmlinux.64 ]; then
670 ln -sf ../../../vmlinux.64
671 fi
672}
673addtask kernel_link_images after do_compile before do_strip
674
675do_strip() {
676 if [ -n "${KERNEL_IMAGE_STRIP_EXTRA_SECTIONS}" ]; then
677 if ! (echo "${KERNEL_IMAGETYPES}" | grep -wq "vmlinux"); then
678 bbwarn "image type(s) will not be stripped (not supported): ${KERNEL_IMAGETYPES}"
679 return
680 fi
681
682 cd ${B}
683 headers=`"$CROSS_COMPILE"readelf -S ${KERNEL_OUTPUT_DIR}/vmlinux | \
684 grep "^ \{1,\}\[[0-9 ]\{1,\}\] [^ ]" | \
685 sed "s/^ \{1,\}\[[0-9 ]\{1,\}\] //" | \
686 gawk '{print $1}'`
687
688 for str in ${KERNEL_IMAGE_STRIP_EXTRA_SECTIONS}; do {
689 if ! (echo "$headers" | grep -q "^$str$"); then
690 bbwarn "Section not found: $str";
691 fi
692
693 "$CROSS_COMPILE"strip -s -R $str ${KERNEL_OUTPUT_DIR}/vmlinux
694 }; done
695
696 bbnote "KERNEL_IMAGE_STRIP_EXTRA_SECTIONS is set, stripping sections:" \
697 "${KERNEL_IMAGE_STRIP_EXTRA_SECTIONS}"
698 fi;
699}
700do_strip[dirs] = "${B}"
701
702addtask strip before do_sizecheck after do_kernel_link_images
703
704# Support checking the kernel size since some kernels need to reside in partitions
705# with a fixed length or there is a limit in transferring the kernel to memory.
706# If more than one image type is enabled, warn on any that don't fit but only fail
707# if none fit.
708do_sizecheck() {
709 if [ ! -z "${KERNEL_IMAGE_MAXSIZE}" ]; then
710 invalid=`echo ${KERNEL_IMAGE_MAXSIZE} | sed 's/[0-9]//g'`
711 if [ -n "$invalid" ]; then
712 die "Invalid KERNEL_IMAGE_MAXSIZE: ${KERNEL_IMAGE_MAXSIZE}, should be an integer (The unit is Kbytes)"
713 fi
714 at_least_one_fits=
715 for imageType in ${KERNEL_IMAGETYPES} ; do
716 size=`du -ks ${B}/${KERNEL_OUTPUT_DIR}/$imageType | awk '{print $1}'`
717 if [ $size -ge ${KERNEL_IMAGE_MAXSIZE} ]; then
718 bbwarn "This kernel $imageType (size=$size(K) > ${KERNEL_IMAGE_MAXSIZE}(K)) is too big for your device."
719 else
720 at_least_one_fits=y
721 fi
722 done
723 if [ -z "$at_least_one_fits" ]; then
724 die "All kernel images are too big for your device. Please reduce the size of the kernel by making more of it modular."
725 fi
726 fi
727}
728do_sizecheck[dirs] = "${B}"
729
730addtask sizecheck before do_install after do_strip
731
732inherit kernel-artifact-names
733
734kernel_do_deploy() {
735 deployDir="${DEPLOYDIR}"
736 if [ -n "${KERNEL_DEPLOYSUBDIR}" ]; then
737 deployDir="${DEPLOYDIR}/${KERNEL_DEPLOYSUBDIR}"
738 mkdir "$deployDir"
739 fi
740
741 for imageType in ${KERNEL_IMAGETYPES} ; do
742 baseName=$imageType-${KERNEL_IMAGE_NAME}
743 install -m 0644 ${KERNEL_OUTPUT_DIR}/$imageType $deployDir/$baseName.bin
744 ln -sf $baseName.bin $deployDir/$imageType-${KERNEL_IMAGE_LINK_NAME}.bin
745 ln -sf $baseName.bin $deployDir/$imageType
746 done
747
748 if [ ${MODULE_TARBALL_DEPLOY} = "1" ] && (grep -q -i -e '^CONFIG_MODULES=y$' .config); then
749 mkdir -p ${D}${root_prefix}/lib
750 if [ -n "${SOURCE_DATE_EPOCH}" ]; then
751 TAR_ARGS="--sort=name --clamp-mtime --mtime=@${SOURCE_DATE_EPOCH}"
752 else
753 TAR_ARGS=""
754 fi
755 TAR_ARGS="$TAR_ARGS --owner=0 --group=0"
756 tar $TAR_ARGS -cv -C ${D}${root_prefix} lib | gzip -9n > $deployDir/modules-${MODULE_TARBALL_NAME}.tgz
757
758 ln -sf modules-${MODULE_TARBALL_NAME}.tgz $deployDir/modules-${MODULE_TARBALL_LINK_NAME}.tgz
759 fi
760
761 if [ ! -z "${INITRAMFS_IMAGE}" -a x"${INITRAMFS_IMAGE_BUNDLE}" = x1 ]; then
762 for imageType in ${KERNEL_IMAGETYPE_FOR_MAKE} ; do
763 if [ "$imageType" = "fitImage" ] ; then
764 continue
765 fi
766 initramfsBaseName=$imageType-${INITRAMFS_NAME}
767 install -m 0644 ${KERNEL_OUTPUT_DIR}/$imageType.initramfs $deployDir/$initramfsBaseName.bin
768 ln -sf $initramfsBaseName.bin $deployDir/$imageType-${INITRAMFS_LINK_NAME}.bin
769 done
770 fi
771}
772
773# We deploy to filenames that include PKGV and PKGR, read the saved data to
774# ensure we get the right values for both
775do_deploy[prefuncs] += "read_subpackage_metadata"
776
777addtask deploy after do_populate_sysroot do_packagedata
778
779EXPORT_FUNCTIONS do_deploy
780
781# Add using Device Tree support
782inherit kernel-devicetree
diff --git a/meta/classes/kernelsrc.bbclass b/meta/classes/kernelsrc.bbclass
deleted file mode 100644
index a951ba3325..0000000000
--- a/meta/classes/kernelsrc.bbclass
+++ /dev/null
@@ -1,10 +0,0 @@
1S = "${STAGING_KERNEL_DIR}"
2deltask do_fetch
3deltask do_unpack
4do_patch[depends] += "virtual/kernel:do_shared_workdir"
5do_patch[noexec] = "1"
6do_package[depends] += "virtual/kernel:do_populate_sysroot"
7KERNEL_VERSION = "${@get_kernelversion_file("${STAGING_KERNEL_BUILDDIR}")}"
8
9inherit linux-kernel-base
10
diff --git a/meta/classes/lib_package.bbclass b/meta/classes/lib_package.bbclass
deleted file mode 100644
index 8849f59042..0000000000
--- a/meta/classes/lib_package.bbclass
+++ /dev/null
@@ -1,7 +0,0 @@
1#
2# ${PN}-bin is defined in bitbake.conf
3#
4# We need to allow the other packages to be greedy with what they
5# want out of /usr/bin and /usr/sbin before ${PN}-bin gets greedy.
6#
7PACKAGE_BEFORE_PN = "${PN}-bin"
diff --git a/meta/classes/libc-package.bbclass b/meta/classes/libc-package.bbclass
deleted file mode 100644
index de3b4250c7..0000000000
--- a/meta/classes/libc-package.bbclass
+++ /dev/null
@@ -1,384 +0,0 @@
1#
2# This class knows how to package up [e]glibc. Its shared since prebuild binary toolchains
3# may need packaging and its pointless to duplicate this code.
4#
5# Caller should set GLIBC_INTERNAL_USE_BINARY_LOCALE to one of:
6# "compile" - Use QEMU to generate the binary locale files
7# "precompiled" - The binary locale files are pregenerated and already present
8# "ondevice" - The device will build the locale files upon first boot through the postinst
9
10GLIBC_INTERNAL_USE_BINARY_LOCALE ?= "ondevice"
11
12GLIBC_SPLIT_LC_PACKAGES ?= "0"
13
14python __anonymous () {
15 enabled = d.getVar("ENABLE_BINARY_LOCALE_GENERATION")
16
17 pn = d.getVar("PN")
18 if pn.endswith("-initial"):
19 enabled = False
20
21 if enabled and int(enabled):
22 import re
23
24 target_arch = d.getVar("TARGET_ARCH")
25 binary_arches = d.getVar("BINARY_LOCALE_ARCHES") or ""
26 use_cross_localedef = d.getVar("LOCALE_GENERATION_WITH_CROSS-LOCALEDEF") or ""
27
28 for regexp in binary_arches.split(" "):
29 r = re.compile(regexp)
30
31 if r.match(target_arch):
32 depends = d.getVar("DEPENDS")
33 if use_cross_localedef == "1" :
34 depends = "%s cross-localedef-native" % depends
35 else:
36 depends = "%s qemu-native" % depends
37 d.setVar("DEPENDS", depends)
38 d.setVar("GLIBC_INTERNAL_USE_BINARY_LOCALE", "compile")
39 break
40}
41
42# try to fix disable charsets/locales/locale-code compile fail
43PACKAGE_NO_GCONV ?= "0"
44
45OVERRIDES_append = ":${TARGET_ARCH}-${TARGET_OS}"
46
47locale_base_postinst_ontarget() {
48localedef --inputfile=${datadir}/i18n/locales/%s --charmap=%s %s
49}
50
51locale_base_postrm() {
52#!/bin/sh
53localedef --delete-from-archive --inputfile=${datadir}/locales/%s --charmap=%s %s
54}
55
56LOCALETREESRC ?= "${PKGD}"
57
58do_prep_locale_tree() {
59 treedir=${WORKDIR}/locale-tree
60 rm -rf $treedir
61 mkdir -p $treedir/${base_bindir} $treedir/${base_libdir} $treedir/${datadir} $treedir/${localedir}
62 tar -cf - -C ${LOCALETREESRC}${datadir} -p i18n | tar -xf - -C $treedir/${datadir}
63 # unzip to avoid parsing errors
64 for i in $treedir/${datadir}/i18n/charmaps/*gz; do
65 gunzip $i
66 done
67 # The extract pattern "./l*.so*" is carefully selected so that it will
68 # match ld*.so and lib*.so*, but not any files in the gconv directory
69 # (if it exists). This makes sure we only unpack the files we need.
70 # This is important in case usrmerge is set in DISTRO_FEATURES, which
71 # means ${base_libdir} == ${libdir}.
72 tar -cf - -C ${LOCALETREESRC}${base_libdir} -p . | tar -xf - -C $treedir/${base_libdir} --wildcards './l*.so*'
73 if [ -f ${STAGING_LIBDIR_NATIVE}/libgcc_s.* ]; then
74 tar -cf - -C ${STAGING_LIBDIR_NATIVE} -p libgcc_s.* | tar -xf - -C $treedir/${base_libdir}
75 fi
76 install -m 0755 ${LOCALETREESRC}${bindir}/localedef $treedir/${base_bindir}
77}
78
79do_collect_bins_from_locale_tree() {
80 treedir=${WORKDIR}/locale-tree
81
82 parent=$(dirname ${localedir})
83 mkdir -p ${PKGD}/$parent
84 tar -cf - -C $treedir/$parent -p $(basename ${localedir}) | tar -xf - -C ${PKGD}$parent
85
86 # Finalize tree by chaning all duplicate files into hard links
87 cross-localedef-hardlink -c -v ${WORKDIR}/locale-tree
88}
89
90inherit qemu
91
92python package_do_split_gconvs () {
93 import re
94 if (d.getVar('PACKAGE_NO_GCONV') == '1'):
95 bb.note("package requested not splitting gconvs")
96 return
97
98 if not d.getVar('PACKAGES'):
99 return
100
101 mlprefix = d.getVar("MLPREFIX") or ""
102
103 bpn = d.getVar('BPN')
104 libdir = d.getVar('libdir')
105 if not libdir:
106 bb.error("libdir not defined")
107 return
108 datadir = d.getVar('datadir')
109 if not datadir:
110 bb.error("datadir not defined")
111 return
112
113 gconv_libdir = oe.path.join(libdir, "gconv")
114 charmap_dir = oe.path.join(datadir, "i18n", "charmaps")
115 locales_dir = oe.path.join(datadir, "i18n", "locales")
116 binary_locales_dir = d.getVar('localedir')
117
118 def calc_gconv_deps(fn, pkg, file_regex, output_pattern, group):
119 deps = []
120 f = open(fn, "rb")
121 c_re = re.compile(r'^copy "(.*)"')
122 i_re = re.compile(r'^include "(\w+)".*')
123 for l in f.readlines():
124 l = l.decode("latin-1")
125 m = c_re.match(l) or i_re.match(l)
126 if m:
127 dp = legitimize_package_name('%s%s-gconv-%s' % (mlprefix, bpn, m.group(1)))
128 if not dp in deps:
129 deps.append(dp)
130 f.close()
131 if deps != []:
132 d.setVar('RDEPENDS_%s' % pkg, " ".join(deps))
133 if bpn != 'glibc':
134 d.setVar('RPROVIDES_%s' % pkg, pkg.replace(bpn, 'glibc'))
135
136 do_split_packages(d, gconv_libdir, file_regex=r'^(.*)\.so$', output_pattern=bpn+'-gconv-%s', \
137 description='gconv module for character set %s', hook=calc_gconv_deps, \
138 extra_depends=bpn+'-gconv')
139
140 def calc_charmap_deps(fn, pkg, file_regex, output_pattern, group):
141 deps = []
142 f = open(fn, "rb")
143 c_re = re.compile(r'^copy "(.*)"')
144 i_re = re.compile(r'^include "(\w+)".*')
145 for l in f.readlines():
146 l = l.decode("latin-1")
147 m = c_re.match(l) or i_re.match(l)
148 if m:
149 dp = legitimize_package_name('%s%s-charmap-%s' % (mlprefix, bpn, m.group(1)))
150 if not dp in deps:
151 deps.append(dp)
152 f.close()
153 if deps != []:
154 d.setVar('RDEPENDS_%s' % pkg, " ".join(deps))
155 if bpn != 'glibc':
156 d.setVar('RPROVIDES_%s' % pkg, pkg.replace(bpn, 'glibc'))
157
158 do_split_packages(d, charmap_dir, file_regex=r'^(.*)\.gz$', output_pattern=bpn+'-charmap-%s', \
159 description='character map for %s encoding', hook=calc_charmap_deps, extra_depends='')
160
161 def calc_locale_deps(fn, pkg, file_regex, output_pattern, group):
162 deps = []
163 f = open(fn, "rb")
164 c_re = re.compile(r'^copy "(.*)"')
165 i_re = re.compile(r'^include "(\w+)".*')
166 for l in f.readlines():
167 l = l.decode("latin-1")
168 m = c_re.match(l) or i_re.match(l)
169 if m:
170 dp = legitimize_package_name(mlprefix+bpn+'-localedata-%s' % m.group(1))
171 if not dp in deps:
172 deps.append(dp)
173 f.close()
174 if deps != []:
175 d.setVar('RDEPENDS_%s' % pkg, " ".join(deps))
176 if bpn != 'glibc':
177 d.setVar('RPROVIDES_%s' % pkg, pkg.replace(bpn, 'glibc'))
178
179 do_split_packages(d, locales_dir, file_regex=r'(.*)', output_pattern=bpn+'-localedata-%s', \
180 description='locale definition for %s', hook=calc_locale_deps, extra_depends='')
181 d.setVar('PACKAGES', d.getVar('PACKAGES', False) + ' ' + d.getVar('MLPREFIX', False) + bpn + '-gconv')
182
183 use_bin = d.getVar("GLIBC_INTERNAL_USE_BINARY_LOCALE")
184
185 dot_re = re.compile(r"(.*)\.(.*)")
186
187 # Read in supported locales and associated encodings
188 supported = {}
189 with open(oe.path.join(d.getVar('WORKDIR'), "SUPPORTED")) as f:
190 for line in f.readlines():
191 try:
192 locale, charset = line.rstrip().split()
193 except ValueError:
194 continue
195 supported[locale] = charset
196
197 # GLIBC_GENERATE_LOCALES var specifies which locales to be generated. empty or "all" means all locales
198 to_generate = d.getVar('GLIBC_GENERATE_LOCALES')
199 if not to_generate or to_generate == 'all':
200 to_generate = sorted(supported.keys())
201 else:
202 to_generate = to_generate.split()
203 for locale in to_generate:
204 if locale not in supported:
205 if '.' in locale:
206 charset = locale.split('.')[1]
207 else:
208 charset = 'UTF-8'
209 bb.warn("Unsupported locale '%s', assuming encoding '%s'" % (locale, charset))
210 supported[locale] = charset
211
212 def output_locale_source(name, pkgname, locale, encoding):
213 d.setVar('RDEPENDS_%s' % pkgname, '%slocaledef %s-localedata-%s %s-charmap-%s' % \
214 (mlprefix, mlprefix+bpn, legitimize_package_name(locale), mlprefix+bpn, legitimize_package_name(encoding)))
215 d.setVar('pkg_postinst_ontarget_%s' % pkgname, d.getVar('locale_base_postinst_ontarget') \
216 % (locale, encoding, locale))
217 d.setVar('pkg_postrm_%s' % pkgname, d.getVar('locale_base_postrm') % \
218 (locale, encoding, locale))
219
220 def output_locale_binary_rdepends(name, pkgname, locale, encoding):
221 dep = legitimize_package_name('%s-binary-localedata-%s' % (bpn, name))
222 lcsplit = d.getVar('GLIBC_SPLIT_LC_PACKAGES')
223 if lcsplit and int(lcsplit):
224 d.appendVar('PACKAGES', ' ' + dep)
225 d.setVar('ALLOW_EMPTY_%s' % dep, '1')
226 d.setVar('RDEPENDS_%s' % pkgname, mlprefix + dep)
227
228 commands = {}
229
230 def output_locale_binary(name, pkgname, locale, encoding):
231 treedir = oe.path.join(d.getVar("WORKDIR"), "locale-tree")
232 ldlibdir = oe.path.join(treedir, d.getVar("base_libdir"))
233 path = d.getVar("PATH")
234 i18npath = oe.path.join(treedir, datadir, "i18n")
235 gconvpath = oe.path.join(treedir, "iconvdata")
236 outputpath = oe.path.join(treedir, binary_locales_dir)
237
238 use_cross_localedef = d.getVar("LOCALE_GENERATION_WITH_CROSS-LOCALEDEF") or "0"
239 if use_cross_localedef == "1":
240 target_arch = d.getVar('TARGET_ARCH')
241 locale_arch_options = { \
242 "arc": " --uint32-align=4 --little-endian ", \
243 "arceb": " --uint32-align=4 --big-endian ", \
244 "arm": " --uint32-align=4 --little-endian ", \
245 "armeb": " --uint32-align=4 --big-endian ", \
246 "aarch64": " --uint32-align=4 --little-endian ", \
247 "aarch64_be": " --uint32-align=4 --big-endian ", \
248 "sh4": " --uint32-align=4 --big-endian ", \
249 "powerpc": " --uint32-align=4 --big-endian ", \
250 "powerpc64": " --uint32-align=4 --big-endian ", \
251 "powerpc64le": " --uint32-align=4 --little-endian ", \
252 "mips": " --uint32-align=4 --big-endian ", \
253 "mipsisa32r6": " --uint32-align=4 --big-endian ", \
254 "mips64": " --uint32-align=4 --big-endian ", \
255 "mipsisa64r6": " --uint32-align=4 --big-endian ", \
256 "mipsel": " --uint32-align=4 --little-endian ", \
257 "mipsisa32r6el": " --uint32-align=4 --little-endian ", \
258 "mips64el":" --uint32-align=4 --little-endian ", \
259 "mipsisa64r6el":" --uint32-align=4 --little-endian ", \
260 "riscv64": " --uint32-align=4 --little-endian ", \
261 "riscv32": " --uint32-align=4 --little-endian ", \
262 "i586": " --uint32-align=4 --little-endian ", \
263 "i686": " --uint32-align=4 --little-endian ", \
264 "x86_64": " --uint32-align=4 --little-endian " }
265
266 if target_arch in locale_arch_options:
267 localedef_opts = locale_arch_options[target_arch]
268 else:
269 bb.error("locale_arch_options not found for target_arch=" + target_arch)
270 bb.fatal("unknown arch:" + target_arch + " for locale_arch_options")
271
272 localedef_opts += " --force --no-hard-links --no-archive --prefix=%s \
273 --inputfile=%s/%s/i18n/locales/%s --charmap=%s %s/%s" \
274 % (treedir, treedir, datadir, locale, encoding, outputpath, name)
275
276 cmd = "PATH=\"%s\" I18NPATH=\"%s\" GCONV_PATH=\"%s\" cross-localedef %s" % \
277 (path, i18npath, gconvpath, localedef_opts)
278 else: # earlier slower qemu way
279 qemu = qemu_target_binary(d)
280 localedef_opts = "--force --no-hard-links --no-archive --prefix=%s \
281 --inputfile=%s/i18n/locales/%s --charmap=%s %s" \
282 % (treedir, datadir, locale, encoding, name)
283
284 qemu_options = d.getVar('QEMU_OPTIONS')
285
286 cmd = "PSEUDO_RELOADED=YES PATH=\"%s\" I18NPATH=\"%s\" %s -L %s \
287 -E LD_LIBRARY_PATH=%s %s %s${base_bindir}/localedef %s" % \
288 (path, i18npath, qemu, treedir, ldlibdir, qemu_options, treedir, localedef_opts)
289
290 commands["%s/%s" % (outputpath, name)] = cmd
291
292 bb.note("generating locale %s (%s)" % (locale, encoding))
293
294 def output_locale(name, locale, encoding):
295 pkgname = d.getVar('MLPREFIX', False) + 'locale-base-' + legitimize_package_name(name)
296 d.setVar('ALLOW_EMPTY_%s' % pkgname, '1')
297 d.setVar('PACKAGES', '%s %s' % (pkgname, d.getVar('PACKAGES')))
298 rprovides = ' %svirtual-locale-%s' % (mlprefix, legitimize_package_name(name))
299 m = re.match(r"(.*)_(.*)", name)
300 if m:
301 rprovides += ' %svirtual-locale-%s' % (mlprefix, m.group(1))
302 d.setVar('RPROVIDES_%s' % pkgname, rprovides)
303
304 if use_bin == "compile":
305 output_locale_binary_rdepends(name, pkgname, locale, encoding)
306 output_locale_binary(name, pkgname, locale, encoding)
307 elif use_bin == "precompiled":
308 output_locale_binary_rdepends(name, pkgname, locale, encoding)
309 else:
310 output_locale_source(name, pkgname, locale, encoding)
311
312 if use_bin == "compile":
313 bb.note("preparing tree for binary locale generation")
314 bb.build.exec_func("do_prep_locale_tree", d)
315
316 utf8_only = int(d.getVar('LOCALE_UTF8_ONLY') or 0)
317 utf8_is_default = int(d.getVar('LOCALE_UTF8_IS_DEFAULT') or 0)
318
319 encodings = {}
320 for locale in to_generate:
321 charset = supported[locale]
322 if utf8_only and charset != 'UTF-8':
323 continue
324
325 m = dot_re.match(locale)
326 if m:
327 base = m.group(1)
328 else:
329 base = locale
330
331 # Non-precompiled locales may be renamed so that the default
332 # (non-suffixed) encoding is always UTF-8, i.e., instead of en_US and
333 # en_US.UTF-8, we have en_US and en_US.ISO-8859-1. This implicitly
334 # contradicts SUPPORTED.
335 if use_bin == "precompiled" or not utf8_is_default:
336 output_locale(locale, base, charset)
337 else:
338 if charset == 'UTF-8':
339 output_locale(base, base, charset)
340 else:
341 output_locale('%s.%s' % (base, charset), base, charset)
342
343 def metapkg_hook(file, pkg, pattern, format, basename):
344 name = basename.split('/', 1)[0]
345 metapkg = legitimize_package_name('%s-binary-localedata-%s' % (mlprefix+bpn, name))
346 d.appendVar('RDEPENDS_%s' % metapkg, ' ' + pkg)
347
348 if use_bin == "compile":
349 makefile = oe.path.join(d.getVar("WORKDIR"), "locale-tree", "Makefile")
350 with open(makefile, "w") as m:
351 m.write("all: %s\n\n" % " ".join(commands.keys()))
352 total = len(commands)
353 for i, (maketarget, makerecipe) in enumerate(commands.items()):
354 m.write(maketarget + ":\n")
355 m.write("\t@echo 'Progress %d/%d'\n" % (i, total))
356 m.write("\t" + makerecipe + "\n\n")
357 d.setVar("EXTRA_OEMAKE", "-C %s ${PARALLEL_MAKE}" % (os.path.dirname(makefile)))
358 d.setVarFlag("oe_runmake", "progress", "outof:Progress\s(\d+)/(\d+)")
359 bb.note("Executing binary locale generation makefile")
360 bb.build.exec_func("oe_runmake", d)
361 bb.note("collecting binary locales from locale tree")
362 bb.build.exec_func("do_collect_bins_from_locale_tree", d)
363
364 if use_bin in ('compile', 'precompiled'):
365 lcsplit = d.getVar('GLIBC_SPLIT_LC_PACKAGES')
366 if lcsplit and int(lcsplit):
367 do_split_packages(d, binary_locales_dir, file_regex=r'^(.*/LC_\w+)', \
368 output_pattern=bpn+'-binary-localedata-%s', \
369 description='binary locale definition for %s', recursive=True,
370 hook=metapkg_hook, extra_depends='', allow_dirs=True, match_path=True)
371 else:
372 do_split_packages(d, binary_locales_dir, file_regex=r'(.*)', \
373 output_pattern=bpn+'-binary-localedata-%s', \
374 description='binary locale definition for %s', extra_depends='', allow_dirs=True)
375 else:
376 bb.note("generation of binary locales disabled. this may break i18n!")
377
378}
379
380# We want to do this indirection so that we can safely 'return'
381# from the called function even though we're prepending
382python populate_packages_prepend () {
383 bb.build.exec_func('package_do_split_gconvs', d)
384}
diff --git a/meta/classes/license.bbclass b/meta/classes/license.bbclass
deleted file mode 100644
index bcea0b3cb5..0000000000
--- a/meta/classes/license.bbclass
+++ /dev/null
@@ -1,436 +0,0 @@
1# Populates LICENSE_DIRECTORY as set in distro config with the license files as set by
2# LIC_FILES_CHKSUM.
3# TODO:
4# - There is a real issue revolving around license naming standards.
5
6LICENSE_DIRECTORY ??= "${DEPLOY_DIR}/licenses"
7LICSSTATEDIR = "${WORKDIR}/license-destdir/"
8
9# Create extra package with license texts and add it to RRECOMMENDS_${PN}
10LICENSE_CREATE_PACKAGE[type] = "boolean"
11LICENSE_CREATE_PACKAGE ??= "0"
12LICENSE_PACKAGE_SUFFIX ??= "-lic"
13LICENSE_FILES_DIRECTORY ??= "${datadir}/licenses/"
14
15addtask populate_lic after do_patch before do_build
16do_populate_lic[dirs] = "${LICSSTATEDIR}/${PN}"
17do_populate_lic[cleandirs] = "${LICSSTATEDIR}"
18
19python do_populate_lic() {
20 """
21 Populate LICENSE_DIRECTORY with licenses.
22 """
23 lic_files_paths = find_license_files(d)
24
25 # The base directory we wrangle licenses to
26 destdir = os.path.join(d.getVar('LICSSTATEDIR'), d.getVar('PN'))
27 copy_license_files(lic_files_paths, destdir)
28 info = get_recipe_info(d)
29 with open(os.path.join(destdir, "recipeinfo"), "w") as f:
30 for key in sorted(info.keys()):
31 f.write("%s: %s\n" % (key, info[key]))
32}
33
34PSEUDO_IGNORE_PATHS .= ",${@','.join(((d.getVar('COMMON_LICENSE_DIR') or '') + ' ' + (d.getVar('LICENSE_PATH') or '')).split())}"
35# it would be better to copy them in do_install_append, but find_license_filesa is python
36python perform_packagecopy_prepend () {
37 enabled = oe.data.typed_value('LICENSE_CREATE_PACKAGE', d)
38 if d.getVar('CLASSOVERRIDE') == 'class-target' and enabled:
39 lic_files_paths = find_license_files(d)
40
41 # LICENSE_FILES_DIRECTORY starts with '/' so os.path.join cannot be used to join D and LICENSE_FILES_DIRECTORY
42 destdir = d.getVar('D') + os.path.join(d.getVar('LICENSE_FILES_DIRECTORY'), d.getVar('PN'))
43 copy_license_files(lic_files_paths, destdir)
44 add_package_and_files(d)
45}
46perform_packagecopy[vardeps] += "LICENSE_CREATE_PACKAGE"
47
48def get_recipe_info(d):
49 info = {}
50 info["PV"] = d.getVar("PV")
51 info["PR"] = d.getVar("PR")
52 info["LICENSE"] = d.getVar("LICENSE")
53 return info
54
55def add_package_and_files(d):
56 packages = d.getVar('PACKAGES')
57 files = d.getVar('LICENSE_FILES_DIRECTORY')
58 pn = d.getVar('PN')
59 pn_lic = "%s%s" % (pn, d.getVar('LICENSE_PACKAGE_SUFFIX', False))
60 if pn_lic in packages.split():
61 bb.warn("%s package already existed in %s." % (pn_lic, pn))
62 else:
63 # first in PACKAGES to be sure that nothing else gets LICENSE_FILES_DIRECTORY
64 d.setVar('PACKAGES', "%s %s" % (pn_lic, packages))
65 d.setVar('FILES_' + pn_lic, files)
66 for pn in packages.split():
67 if pn == pn_lic:
68 continue
69 rrecommends_pn = d.getVar('RRECOMMENDS_' + pn)
70 if rrecommends_pn:
71 d.setVar('RRECOMMENDS_' + pn, "%s %s" % (pn_lic, rrecommends_pn))
72 else:
73 d.setVar('RRECOMMENDS_' + pn, "%s" % (pn_lic))
74
75def copy_license_files(lic_files_paths, destdir):
76 import shutil
77 import errno
78
79 bb.utils.mkdirhier(destdir)
80 for (basename, path, beginline, endline) in lic_files_paths:
81 try:
82 src = path
83 dst = os.path.join(destdir, basename)
84 if os.path.exists(dst):
85 os.remove(dst)
86 if os.path.islink(src):
87 src = os.path.realpath(src)
88 canlink = os.access(src, os.W_OK) and (os.stat(src).st_dev == os.stat(destdir).st_dev) and beginline is None and endline is None
89 if canlink:
90 try:
91 os.link(src, dst)
92 except OSError as err:
93 if err.errno == errno.EXDEV:
94 # Copy license files if hard-link is not possible even if st_dev is the
95 # same on source and destination (docker container with device-mapper?)
96 canlink = False
97 else:
98 raise
99 # Only chown if we did hardling, and, we're running under pseudo
100 if canlink and os.environ.get('PSEUDO_DISABLED') == '0':
101 os.chown(dst,0,0)
102 if not canlink:
103 begin_idx = int(beginline)-1 if beginline is not None else None
104 end_idx = int(endline) if endline is not None else None
105 if begin_idx is None and end_idx is None:
106 shutil.copyfile(src, dst)
107 else:
108 with open(src, 'rb') as src_f:
109 with open(dst, 'wb') as dst_f:
110 dst_f.write(b''.join(src_f.readlines()[begin_idx:end_idx]))
111
112 except Exception as e:
113 bb.warn("Could not copy license file %s to %s: %s" % (src, dst, e))
114
115def find_license_files(d):
116 """
117 Creates list of files used in LIC_FILES_CHKSUM and generic LICENSE files.
118 """
119 import shutil
120 import oe.license
121 from collections import defaultdict, OrderedDict
122
123 # All the license files for the package
124 lic_files = d.getVar('LIC_FILES_CHKSUM') or ""
125 pn = d.getVar('PN')
126 # The license files are located in S/LIC_FILE_CHECKSUM.
127 srcdir = d.getVar('S')
128 # Directory we store the generic licenses as set in the distro configuration
129 generic_directory = d.getVar('COMMON_LICENSE_DIR')
130 # List of basename, path tuples
131 lic_files_paths = []
132 # hash for keep track generic lics mappings
133 non_generic_lics = {}
134 # Entries from LIC_FILES_CHKSUM
135 lic_chksums = {}
136 license_source_dirs = []
137 license_source_dirs.append(generic_directory)
138 try:
139 additional_lic_dirs = d.getVar('LICENSE_PATH').split()
140 for lic_dir in additional_lic_dirs:
141 license_source_dirs.append(lic_dir)
142 except:
143 pass
144
145 class FindVisitor(oe.license.LicenseVisitor):
146 def visit_Str(self, node):
147 #
148 # Until I figure out what to do with
149 # the two modifiers I support (or greater = +
150 # and "with exceptions" being *
151 # we'll just strip out the modifier and put
152 # the base license.
153 find_license(node.s.replace("+", "").replace("*", ""))
154 self.generic_visit(node)
155
156 def find_license(license_type):
157 try:
158 bb.utils.mkdirhier(gen_lic_dest)
159 except:
160 pass
161 spdx_generic = None
162 license_source = None
163 # If the generic does not exist we need to check to see if there is an SPDX mapping to it,
164 # unless NO_GENERIC_LICENSE is set.
165 for lic_dir in license_source_dirs:
166 if not os.path.isfile(os.path.join(lic_dir, license_type)):
167 if d.getVarFlag('SPDXLICENSEMAP', license_type) != None:
168 # Great, there is an SPDXLICENSEMAP. We can copy!
169 bb.debug(1, "We need to use a SPDXLICENSEMAP for %s" % (license_type))
170 spdx_generic = d.getVarFlag('SPDXLICENSEMAP', license_type)
171 license_source = lic_dir
172 break
173 elif os.path.isfile(os.path.join(lic_dir, license_type)):
174 spdx_generic = license_type
175 license_source = lic_dir
176 break
177
178 non_generic_lic = d.getVarFlag('NO_GENERIC_LICENSE', license_type)
179 if spdx_generic and license_source:
180 # we really should copy to generic_ + spdx_generic, however, that ends up messing the manifest
181 # audit up. This should be fixed in emit_pkgdata (or, we actually got and fix all the recipes)
182
183 lic_files_paths.append(("generic_" + license_type, os.path.join(license_source, spdx_generic),
184 None, None))
185
186 # The user may attempt to use NO_GENERIC_LICENSE for a generic license which doesn't make sense
187 # and should not be allowed, warn the user in this case.
188 if d.getVarFlag('NO_GENERIC_LICENSE', license_type):
189 bb.warn("%s: %s is a generic license, please don't use NO_GENERIC_LICENSE for it." % (pn, license_type))
190
191 elif non_generic_lic and non_generic_lic in lic_chksums:
192 # if NO_GENERIC_LICENSE is set, we copy the license files from the fetched source
193 # of the package rather than the license_source_dirs.
194 lic_files_paths.append(("generic_" + license_type,
195 os.path.join(srcdir, non_generic_lic), None, None))
196 non_generic_lics[non_generic_lic] = license_type
197 else:
198 # Add explicity avoid of CLOSED license because this isn't generic
199 if license_type != 'CLOSED':
200 # And here is where we warn people that their licenses are lousy
201 bb.warn("%s: No generic license file exists for: %s in any provider" % (pn, license_type))
202 pass
203
204 if not generic_directory:
205 bb.fatal("COMMON_LICENSE_DIR is unset. Please set this in your distro config")
206
207 for url in lic_files.split():
208 try:
209 (method, host, path, user, pswd, parm) = bb.fetch.decodeurl(url)
210 if method != "file" or not path:
211 raise bb.fetch.MalformedUrl()
212 except bb.fetch.MalformedUrl:
213 bb.fatal("%s: LIC_FILES_CHKSUM contains an invalid URL: %s" % (d.getVar('PF'), url))
214 # We want the license filename and path
215 chksum = parm.get('md5', None)
216 beginline = parm.get('beginline')
217 endline = parm.get('endline')
218 lic_chksums[path] = (chksum, beginline, endline)
219
220 v = FindVisitor()
221 try:
222 v.visit_string(d.getVar('LICENSE'))
223 except oe.license.InvalidLicense as exc:
224 bb.fatal('%s: %s' % (d.getVar('PF'), exc))
225 except SyntaxError:
226 bb.warn("%s: Failed to parse it's LICENSE field." % (d.getVar('PF')))
227 # Add files from LIC_FILES_CHKSUM to list of license files
228 lic_chksum_paths = defaultdict(OrderedDict)
229 for path, data in sorted(lic_chksums.items()):
230 lic_chksum_paths[os.path.basename(path)][data] = (os.path.join(srcdir, path), data[1], data[2])
231 for basename, files in lic_chksum_paths.items():
232 if len(files) == 1:
233 # Don't copy again a LICENSE already handled as non-generic
234 if basename in non_generic_lics:
235 continue
236 data = list(files.values())[0]
237 lic_files_paths.append(tuple([basename] + list(data)))
238 else:
239 # If there are multiple different license files with identical
240 # basenames we rename them to <file>.0, <file>.1, ...
241 for i, data in enumerate(files.values()):
242 lic_files_paths.append(tuple(["%s.%d" % (basename, i)] + list(data)))
243
244 return lic_files_paths
245
246def return_spdx(d, license):
247 """
248 This function returns the spdx mapping of a license if it exists.
249 """
250 return d.getVarFlag('SPDXLICENSEMAP', license)
251
252def canonical_license(d, license):
253 """
254 Return the canonical (SPDX) form of the license if available (so GPLv3
255 becomes GPL-3.0) or the passed license if there is no canonical form.
256 """
257 return d.getVarFlag('SPDXLICENSEMAP', license) or license
258
259def available_licenses(d):
260 """
261 Return the available licenses by searching the directories specified by
262 COMMON_LICENSE_DIR and LICENSE_PATH.
263 """
264 lic_dirs = ((d.getVar('COMMON_LICENSE_DIR') or '') + ' ' +
265 (d.getVar('LICENSE_PATH') or '')).split()
266
267 licenses = []
268 for lic_dir in lic_dirs:
269 licenses += os.listdir(lic_dir)
270
271 licenses = sorted(licenses)
272 return licenses
273
274# Only determine the list of all available licenses once. This assumes that any
275# additions to LICENSE_PATH have been done before this file is parsed.
276AVAILABLE_LICENSES := "${@' '.join(available_licenses(d))}"
277
278def expand_wildcard_licenses(d, wildcard_licenses):
279 """
280 Return actual spdx format license names if wildcards are used. We expand
281 wildcards from SPDXLICENSEMAP flags and AVAILABLE_LICENSES.
282 """
283 import fnmatch
284
285 # Assume if we're passed "GPLv3" or "*GPLv3" it means -or-later as well
286 for lic in wildcard_licenses[:]:
287 if not lic.endswith(("-or-later", "-only", "*")):
288 wildcard_licenses.append(lic + "+")
289
290 licenses = wildcard_licenses[:]
291 spdxmapkeys = d.getVarFlags('SPDXLICENSEMAP').keys()
292 for wld_lic in wildcard_licenses:
293 spdxflags = fnmatch.filter(spdxmapkeys, wld_lic)
294 licenses += [d.getVarFlag('SPDXLICENSEMAP', flag) for flag in spdxflags]
295
296 spdx_lics = d.getVar('AVAILABLE_LICENSES').split()
297 for wld_lic in wildcard_licenses:
298 licenses += fnmatch.filter(spdx_lics, wld_lic)
299
300 licenses = list(set(licenses))
301 return licenses
302
303def incompatible_license_contains(license, truevalue, falsevalue, d):
304 license = canonical_license(d, license)
305 bad_licenses = (d.getVar('INCOMPATIBLE_LICENSE') or "").split()
306 bad_licenses = expand_wildcard_licenses(d, bad_licenses)
307 return truevalue if license in bad_licenses else falsevalue
308
309def incompatible_pkg_license(d, dont_want_licenses, license):
310 # Handles an "or" or two license sets provided by
311 # flattened_licenses(), pick one that works if possible.
312 def choose_lic_set(a, b):
313 return a if all(oe.license.license_ok(canonical_license(d, lic),
314 dont_want_licenses) for lic in a) else b
315
316 try:
317 licenses = oe.license.flattened_licenses(license, choose_lic_set)
318 except oe.license.LicenseError as exc:
319 bb.fatal('%s: %s' % (d.getVar('P'), exc))
320
321 incompatible_lic = []
322 for l in licenses:
323 license = canonical_license(d, l)
324 if not oe.license.license_ok(license, dont_want_licenses):
325 incompatible_lic.append(license)
326
327 return sorted(incompatible_lic)
328
329def incompatible_license(d, dont_want_licenses, package=None):
330 """
331 This function checks if a recipe has only incompatible licenses. It also
332 take into consideration 'or' operand. dont_want_licenses should be passed
333 as canonical (SPDX) names.
334 """
335 import oe.license
336 license = d.getVar("LICENSE_%s" % package) if package else None
337 if not license:
338 license = d.getVar('LICENSE')
339
340 return incompatible_pkg_license(d, dont_want_licenses, license)
341
342def check_license_flags(d):
343 """
344 This function checks if a recipe has any LICENSE_FLAGS that
345 aren't whitelisted.
346
347 If it does, it returns the all LICENSE_FLAGS missing from the whitelist, or
348 all of the LICENSE_FLAGS if there is no whitelist.
349
350 If everything is is properly whitelisted, it returns None.
351 """
352
353 def license_flag_matches(flag, whitelist, pn):
354 """
355 Return True if flag matches something in whitelist, None if not.
356
357 Before we test a flag against the whitelist, we append _${PN}
358 to it. We then try to match that string against the
359 whitelist. This covers the normal case, where we expect
360 LICENSE_FLAGS to be a simple string like 'commercial', which
361 the user typically matches exactly in the whitelist by
362 explicitly appending the package name e.g 'commercial_foo'.
363 If we fail the match however, we then split the flag across
364 '_' and append each fragment and test until we either match or
365 run out of fragments.
366 """
367 flag_pn = ("%s_%s" % (flag, pn))
368 for candidate in whitelist:
369 if flag_pn == candidate:
370 return True
371
372 flag_cur = ""
373 flagments = flag_pn.split("_")
374 flagments.pop() # we've already tested the full string
375 for flagment in flagments:
376 if flag_cur:
377 flag_cur += "_"
378 flag_cur += flagment
379 for candidate in whitelist:
380 if flag_cur == candidate:
381 return True
382 return False
383
384 def all_license_flags_match(license_flags, whitelist):
385 """ Return all unmatched flags, None if all flags match """
386 pn = d.getVar('PN')
387 split_whitelist = whitelist.split()
388 flags = []
389 for flag in license_flags.split():
390 if not license_flag_matches(flag, split_whitelist, pn):
391 flags.append(flag)
392 return flags if flags else None
393
394 license_flags = d.getVar('LICENSE_FLAGS')
395 if license_flags:
396 whitelist = d.getVar('LICENSE_FLAGS_WHITELIST')
397 if not whitelist:
398 return license_flags.split()
399 unmatched_flags = all_license_flags_match(license_flags, whitelist)
400 if unmatched_flags:
401 return unmatched_flags
402 return None
403
404def check_license_format(d):
405 """
406 This function checks if LICENSE is well defined,
407 Validate operators in LICENSES.
408 No spaces are allowed between LICENSES.
409 """
410 pn = d.getVar('PN')
411 licenses = d.getVar('LICENSE')
412 from oe.license import license_operator, license_operator_chars, license_pattern
413
414 elements = list(filter(lambda x: x.strip(), license_operator.split(licenses)))
415 for pos, element in enumerate(elements):
416 if license_pattern.match(element):
417 if pos > 0 and license_pattern.match(elements[pos - 1]):
418 bb.warn('%s: LICENSE value "%s" has an invalid format - license names ' \
419 'must be separated by the following characters to indicate ' \
420 'the license selection: %s' %
421 (pn, licenses, license_operator_chars))
422 elif not license_operator.match(element):
423 bb.warn('%s: LICENSE value "%s" has an invalid separator "%s" that is not ' \
424 'in the valid list of separators (%s)' %
425 (pn, licenses, element, license_operator_chars))
426
427SSTATETASKS += "do_populate_lic"
428do_populate_lic[sstate-inputdirs] = "${LICSSTATEDIR}"
429do_populate_lic[sstate-outputdirs] = "${LICENSE_DIRECTORY}/"
430
431IMAGE_CLASSES_append = " license_image"
432
433python do_populate_lic_setscene () {
434 sstate_setscene(d)
435}
436addtask do_populate_lic_setscene
diff --git a/meta/classes/license_image.bbclass b/meta/classes/license_image.bbclass
deleted file mode 100644
index c96b032ebd..0000000000
--- a/meta/classes/license_image.bbclass
+++ /dev/null
@@ -1,269 +0,0 @@
1python write_package_manifest() {
2 # Get list of installed packages
3 license_image_dir = d.expand('${LICENSE_DIRECTORY}/${IMAGE_NAME}')
4 bb.utils.mkdirhier(license_image_dir)
5 from oe.rootfs import image_list_installed_packages
6 from oe.utils import format_pkg_list
7
8 pkgs = image_list_installed_packages(d)
9 output = format_pkg_list(pkgs)
10 open(os.path.join(license_image_dir, 'package.manifest'),
11 'w+').write(output)
12}
13
14python license_create_manifest() {
15 import oe.packagedata
16 from oe.rootfs import image_list_installed_packages
17
18 build_images_from_feeds = d.getVar('BUILD_IMAGES_FROM_FEEDS')
19 if build_images_from_feeds == "1":
20 return 0
21
22 pkg_dic = {}
23 for pkg in sorted(image_list_installed_packages(d)):
24 pkg_info = os.path.join(d.getVar('PKGDATA_DIR'),
25 'runtime-reverse', pkg)
26 pkg_name = os.path.basename(os.readlink(pkg_info))
27
28 pkg_dic[pkg_name] = oe.packagedata.read_pkgdatafile(pkg_info)
29 if not "LICENSE" in pkg_dic[pkg_name].keys():
30 pkg_lic_name = "LICENSE_" + pkg_name
31 pkg_dic[pkg_name]["LICENSE"] = pkg_dic[pkg_name][pkg_lic_name]
32
33 rootfs_license_manifest = os.path.join(d.getVar('LICENSE_DIRECTORY'),
34 d.getVar('IMAGE_NAME'), 'license.manifest')
35 write_license_files(d, rootfs_license_manifest, pkg_dic, rootfs=True)
36}
37
38def write_license_files(d, license_manifest, pkg_dic, rootfs=True):
39 import re
40 import stat
41
42 bad_licenses = (d.getVar("INCOMPATIBLE_LICENSE") or "").split()
43 bad_licenses = expand_wildcard_licenses(d, bad_licenses)
44
45 whitelist = []
46 for lic in bad_licenses:
47 whitelist.extend((d.getVar("WHITELIST_" + lic) or "").split())
48
49 with open(license_manifest, "w") as license_file:
50 for pkg in sorted(pkg_dic):
51 if bad_licenses and pkg not in whitelist:
52 try:
53 licenses = incompatible_pkg_license(d, bad_licenses, pkg_dic[pkg]["LICENSE"])
54 if licenses:
55 bb.fatal("Package %s cannot be installed into the image because it has incompatible license(s): %s" %(pkg, ' '.join(licenses)))
56 (pkg_dic[pkg]["LICENSE"], pkg_dic[pkg]["LICENSES"]) = \
57 oe.license.manifest_licenses(pkg_dic[pkg]["LICENSE"],
58 bad_licenses, canonical_license, d)
59 except oe.license.LicenseError as exc:
60 bb.fatal('%s: %s' % (d.getVar('P'), exc))
61 else:
62 pkg_dic[pkg]["LICENSES"] = re.sub(r'[|&()*]', ' ', pkg_dic[pkg]["LICENSE"])
63 pkg_dic[pkg]["LICENSES"] = re.sub(r' *', ' ', pkg_dic[pkg]["LICENSES"])
64 pkg_dic[pkg]["LICENSES"] = pkg_dic[pkg]["LICENSES"].split()
65 if pkg in whitelist:
66 bb.warn("Including %s with an incompatible license %s into the image, because it has been whitelisted." %(pkg, pkg_dic[pkg]["LICENSE"]))
67
68 if not "IMAGE_MANIFEST" in pkg_dic[pkg]:
69 # Rootfs manifest
70 license_file.write("PACKAGE NAME: %s\n" % pkg)
71 license_file.write("PACKAGE VERSION: %s\n" % pkg_dic[pkg]["PV"])
72 license_file.write("RECIPE NAME: %s\n" % pkg_dic[pkg]["PN"])
73 license_file.write("LICENSE: %s\n\n" % pkg_dic[pkg]["LICENSE"])
74
75 # If the package doesn't contain any file, that is, its size is 0, the license
76 # isn't relevant as far as the final image is concerned. So doing license check
77 # doesn't make much sense, skip it.
78 if pkg_dic[pkg]["PKGSIZE_%s" % pkg] == "0":
79 continue
80 else:
81 # Image manifest
82 license_file.write("RECIPE NAME: %s\n" % pkg_dic[pkg]["PN"])
83 license_file.write("VERSION: %s\n" % pkg_dic[pkg]["PV"])
84 license_file.write("LICENSE: %s\n" % pkg_dic[pkg]["LICENSE"])
85 license_file.write("FILES: %s\n\n" % pkg_dic[pkg]["FILES"])
86
87 for lic in pkg_dic[pkg]["LICENSES"]:
88 lic_file = os.path.join(d.getVar('LICENSE_DIRECTORY'),
89 pkg_dic[pkg]["PN"], "generic_%s" %
90 re.sub(r'\+', '', lic))
91 # add explicity avoid of CLOSED license because isn't generic
92 if lic == "CLOSED":
93 continue
94
95 if not os.path.exists(lic_file):
96 bb.warn("The license listed %s was not in the "\
97 "licenses collected for recipe %s"
98 % (lic, pkg_dic[pkg]["PN"]))
99
100 # Two options here:
101 # - Just copy the manifest
102 # - Copy the manifest and the license directories
103 # With both options set we see a .5 M increase in core-image-minimal
104 copy_lic_manifest = d.getVar('COPY_LIC_MANIFEST')
105 copy_lic_dirs = d.getVar('COPY_LIC_DIRS')
106 if rootfs and copy_lic_manifest == "1":
107 rootfs_license_dir = os.path.join(d.getVar('IMAGE_ROOTFS'),
108 'usr', 'share', 'common-licenses')
109 bb.utils.mkdirhier(rootfs_license_dir)
110 rootfs_license_manifest = os.path.join(rootfs_license_dir,
111 os.path.split(license_manifest)[1])
112 if not os.path.exists(rootfs_license_manifest):
113 oe.path.copyhardlink(license_manifest, rootfs_license_manifest)
114
115 if copy_lic_dirs == "1":
116 for pkg in sorted(pkg_dic):
117 pkg_rootfs_license_dir = os.path.join(rootfs_license_dir, pkg)
118 bb.utils.mkdirhier(pkg_rootfs_license_dir)
119 pkg_license_dir = os.path.join(d.getVar('LICENSE_DIRECTORY'),
120 pkg_dic[pkg]["PN"])
121
122 pkg_manifest_licenses = [canonical_license(d, lic) \
123 for lic in pkg_dic[pkg]["LICENSES"]]
124
125 licenses = os.listdir(pkg_license_dir)
126 for lic in licenses:
127 pkg_license = os.path.join(pkg_license_dir, lic)
128 pkg_rootfs_license = os.path.join(pkg_rootfs_license_dir, lic)
129
130 if re.match(r"^generic_.*$", lic):
131 generic_lic = canonical_license(d,
132 re.search(r"^generic_(.*)$", lic).group(1))
133
134 # Do not copy generic license into package if isn't
135 # declared into LICENSES of the package.
136 if not re.sub(r'\+$', '', generic_lic) in \
137 [re.sub(r'\+', '', lic) for lic in \
138 pkg_manifest_licenses]:
139 continue
140
141 if oe.license.license_ok(generic_lic,
142 bad_licenses) == False:
143 continue
144
145 # Make sure we use only canonical name for the license file
146 rootfs_license = os.path.join(rootfs_license_dir, "generic_%s" % generic_lic)
147 if not os.path.exists(rootfs_license):
148 oe.path.copyhardlink(pkg_license, rootfs_license)
149
150 if not os.path.exists(pkg_rootfs_license):
151 os.symlink(os.path.join('..', lic), pkg_rootfs_license)
152 else:
153 if (oe.license.license_ok(canonical_license(d,
154 lic), bad_licenses) == False or
155 os.path.exists(pkg_rootfs_license)):
156 continue
157
158 oe.path.copyhardlink(pkg_license, pkg_rootfs_license)
159 # Fixup file ownership and permissions
160 for walkroot, dirs, files in os.walk(rootfs_license_dir):
161 for f in files:
162 p = os.path.join(walkroot, f)
163 os.lchown(p, 0, 0)
164 if not os.path.islink(p):
165 os.chmod(p, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH)
166 for dir in dirs:
167 p = os.path.join(walkroot, dir)
168 os.lchown(p, 0, 0)
169 os.chmod(p, stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH)
170
171
172
173def license_deployed_manifest(d):
174 """
175 Write the license manifest for the deployed recipes.
176 The deployed recipes usually includes the bootloader
177 and extra files to boot the target.
178 """
179
180 dep_dic = {}
181 man_dic = {}
182 lic_dir = d.getVar("LICENSE_DIRECTORY")
183
184 dep_dic = get_deployed_dependencies(d)
185 for dep in dep_dic.keys():
186 man_dic[dep] = {}
187 # It is necessary to mark this will be used for image manifest
188 man_dic[dep]["IMAGE_MANIFEST"] = True
189 man_dic[dep]["PN"] = dep
190 man_dic[dep]["FILES"] = \
191 " ".join(get_deployed_files(dep_dic[dep]))
192 with open(os.path.join(lic_dir, dep, "recipeinfo"), "r") as f:
193 for line in f.readlines():
194 key,val = line.split(": ", 1)
195 man_dic[dep][key] = val[:-1]
196
197 lic_manifest_dir = os.path.join(d.getVar('LICENSE_DIRECTORY'),
198 d.getVar('IMAGE_NAME'))
199 bb.utils.mkdirhier(lic_manifest_dir)
200 image_license_manifest = os.path.join(lic_manifest_dir, 'image_license.manifest')
201 write_license_files(d, image_license_manifest, man_dic, rootfs=False)
202
203 link_name = d.getVar('IMAGE_LINK_NAME')
204 if link_name:
205 lic_manifest_symlink_dir = os.path.join(d.getVar('LICENSE_DIRECTORY'),
206 link_name)
207 # remove old symlink
208 if os.path.islink(lic_manifest_symlink_dir):
209 os.unlink(lic_manifest_symlink_dir)
210
211 # create the image dir symlink
212 if lic_manifest_dir != lic_manifest_symlink_dir:
213 os.symlink(lic_manifest_dir, lic_manifest_symlink_dir)
214
215def get_deployed_dependencies(d):
216 """
217 Get all the deployed dependencies of an image
218 """
219
220 deploy = {}
221 # Get all the dependencies for the current task (rootfs).
222 taskdata = d.getVar("BB_TASKDEPDATA", False)
223 pn = d.getVar("PN", True)
224 depends = list(set([dep[0] for dep
225 in list(taskdata.values())
226 if not dep[0].endswith("-native") and not dep[0] == pn]))
227
228 # To verify what was deployed it checks the rootfs dependencies against
229 # the SSTATE_MANIFESTS for "deploy" task.
230 # The manifest file name contains the arch. Because we are not running
231 # in the recipe context it is necessary to check every arch used.
232 sstate_manifest_dir = d.getVar("SSTATE_MANIFESTS")
233 archs = list(set(d.getVar("SSTATE_ARCHS").split()))
234 for dep in depends:
235 for arch in archs:
236 sstate_manifest_file = os.path.join(sstate_manifest_dir,
237 "manifest-%s-%s.deploy" % (arch, dep))
238 if os.path.exists(sstate_manifest_file):
239 deploy[dep] = sstate_manifest_file
240 break
241
242 return deploy
243get_deployed_dependencies[vardepsexclude] = "BB_TASKDEPDATA"
244
245def get_deployed_files(man_file):
246 """
247 Get the files deployed from the sstate manifest
248 """
249
250 dep_files = []
251 excluded_files = []
252 with open(man_file, "r") as manifest:
253 all_files = manifest.read()
254 for f in all_files.splitlines():
255 if ((not (os.path.islink(f) or os.path.isdir(f))) and
256 not os.path.basename(f) in excluded_files):
257 dep_files.append(os.path.basename(f))
258 return dep_files
259
260ROOTFS_POSTPROCESS_COMMAND_prepend = "write_package_manifest; license_create_manifest; "
261do_rootfs[recrdeptask] += "do_populate_lic"
262
263python do_populate_lic_deploy() {
264 license_deployed_manifest(d)
265}
266
267addtask populate_lic_deploy before do_build after do_image_complete
268do_populate_lic_deploy[recrdeptask] += "do_populate_lic do_deploy"
269
diff --git a/meta/classes/linux-kernel-base.bbclass b/meta/classes/linux-kernel-base.bbclass
deleted file mode 100644
index ba59222c24..0000000000
--- a/meta/classes/linux-kernel-base.bbclass
+++ /dev/null
@@ -1,41 +0,0 @@
1# parse kernel ABI version out of <linux/version.h>
2def get_kernelversion_headers(p):
3 import re
4
5 fn = p + '/include/linux/utsrelease.h'
6 if not os.path.isfile(fn):
7 # after 2.6.33-rc1
8 fn = p + '/include/generated/utsrelease.h'
9 if not os.path.isfile(fn):
10 fn = p + '/include/linux/version.h'
11
12 try:
13 f = open(fn, 'r')
14 except IOError:
15 return None
16
17 l = f.readlines()
18 f.close()
19 r = re.compile("#define UTS_RELEASE \"(.*)\"")
20 for s in l:
21 m = r.match(s)
22 if m:
23 return m.group(1)
24 return None
25
26
27def get_kernelversion_file(p):
28 fn = p + '/kernel-abiversion'
29
30 try:
31 with open(fn, 'r') as f:
32 return f.readlines()[0].strip()
33 except IOError:
34 return None
35
36def linux_module_packages(s, d):
37 suffix = ""
38 return " ".join(map(lambda s: "kernel-module-%s%s" % (s.lower().replace('_', '-').replace('@', '+'), suffix), s.split()))
39
40# that's all
41
diff --git a/meta/classes/linuxloader.bbclass b/meta/classes/linuxloader.bbclass
deleted file mode 100644
index 30925ac87d..0000000000
--- a/meta/classes/linuxloader.bbclass
+++ /dev/null
@@ -1,72 +0,0 @@
1def get_musl_loader_arch(d):
2 import re
3 ldso_arch = "NotSupported"
4
5 targetarch = d.getVar("TARGET_ARCH")
6 if targetarch.startswith("microblaze"):
7 ldso_arch = "microblaze${@bb.utils.contains('TUNE_FEATURES', 'bigendian', '', 'el', d)}"
8 elif targetarch.startswith("mips"):
9 ldso_arch = "mips${ABIEXTENSION}${MIPSPKGSFX_BYTE}${MIPSPKGSFX_R6}${MIPSPKGSFX_ENDIAN}${@['', '-sf'][d.getVar('TARGET_FPU') == 'soft']}"
10 elif targetarch == "powerpc":
11 ldso_arch = "powerpc${@['', '-sf'][d.getVar('TARGET_FPU') == 'soft']}"
12 elif targetarch.startswith("powerpc64"):
13 ldso_arch = "powerpc64${@bb.utils.contains('TUNE_FEATURES', 'bigendian', '', 'le', d)}"
14 elif targetarch == "x86_64":
15 ldso_arch = "x86_64"
16 elif re.search("i.86", targetarch):
17 ldso_arch = "i386"
18 elif targetarch.startswith("arm"):
19 ldso_arch = "arm${ARMPKGSFX_ENDIAN}${ARMPKGSFX_EABI}"
20 elif targetarch.startswith("aarch64"):
21 ldso_arch = "aarch64${ARMPKGSFX_ENDIAN_64}"
22 elif targetarch.startswith("riscv64"):
23 ldso_arch = "riscv64${@['', '-sf'][d.getVar('TARGET_FPU') == 'soft']}"
24 elif targetarch.startswith("riscv32"):
25 ldso_arch = "riscv32${@['', '-sf'][d.getVar('TARGET_FPU') == 'soft']}"
26 return ldso_arch
27
28def get_musl_loader(d):
29 import re
30 return "/lib/ld-musl-" + get_musl_loader_arch(d) + ".so.1"
31
32def get_glibc_loader(d):
33 import re
34
35 dynamic_loader = "NotSupported"
36 targetarch = d.getVar("TARGET_ARCH")
37 if targetarch in ["powerpc", "microblaze"]:
38 dynamic_loader = "${base_libdir}/ld.so.1"
39 elif targetarch in ["mipsisa32r6el", "mipsisa32r6", "mipsisa64r6el", "mipsisa64r6"]:
40 dynamic_loader = "${base_libdir}/ld-linux-mipsn8.so.1"
41 elif targetarch.startswith("mips"):
42 dynamic_loader = "${base_libdir}/ld.so.1"
43 elif targetarch == "powerpc64":
44 dynamic_loader = "${base_libdir}/ld64.so.1"
45 elif targetarch == "x86_64":
46 dynamic_loader = "${base_libdir}/ld-linux-x86-64.so.2"
47 elif re.search("i.86", targetarch):
48 dynamic_loader = "${base_libdir}/ld-linux.so.2"
49 elif targetarch == "arm":
50 dynamic_loader = "${base_libdir}/ld-linux${@['-armhf', ''][d.getVar('TARGET_FPU') == 'soft']}.so.3"
51 elif targetarch.startswith("aarch64"):
52 dynamic_loader = "${base_libdir}/ld-linux-aarch64${ARMPKGSFX_ENDIAN_64}.so.1"
53 elif targetarch.startswith("riscv64"):
54 dynamic_loader = "${base_libdir}/ld-linux-riscv64-lp64${@['d', ''][d.getVar('TARGET_FPU') == 'soft']}.so.1"
55 return dynamic_loader
56
57def get_linuxloader(d):
58 overrides = d.getVar("OVERRIDES").split(":")
59
60 if "libc-baremetal" in overrides:
61 return "NotSupported"
62
63 if "libc-musl" in overrides:
64 dynamic_loader = get_musl_loader(d)
65 else:
66 dynamic_loader = get_glibc_loader(d)
67 return dynamic_loader
68
69get_linuxloader[vardepvalue] = "${@get_linuxloader(d)}"
70get_musl_loader[vardepvalue] = "${@get_musl_loader(d)}"
71get_musl_loader_arch[vardepvalue] = "${@get_musl_loader_arch(d)}"
72get_glibc_loader[vardepvalue] = "${@get_glibc_loader(d)}"
diff --git a/meta/classes/live-vm-common.bbclass b/meta/classes/live-vm-common.bbclass
deleted file mode 100644
index 74e7074a53..0000000000
--- a/meta/classes/live-vm-common.bbclass
+++ /dev/null
@@ -1,94 +0,0 @@
1# Some of the vars for vm and live image are conflicted, this function
2# is used for fixing the problem.
3def set_live_vm_vars(d, suffix):
4 vars = ['GRUB_CFG', 'SYSLINUX_CFG', 'ROOT', 'LABELS', 'INITRD']
5 for var in vars:
6 var_with_suffix = var + '_' + suffix
7 if d.getVar(var):
8 bb.warn('Found potential conflicted var %s, please use %s rather than %s' % \
9 (var, var_with_suffix, var))
10 elif d.getVar(var_with_suffix):
11 d.setVar(var, d.getVar(var_with_suffix))
12
13
14EFI = "${@bb.utils.contains("MACHINE_FEATURES", "efi", "1", "0", d)}"
15EFI_PROVIDER ?= "grub-efi"
16EFI_CLASS = "${@bb.utils.contains("MACHINE_FEATURES", "efi", "${EFI_PROVIDER}", "", d)}"
17
18MKDOSFS_EXTRAOPTS ??= "-S 512"
19
20# Include legacy boot if MACHINE_FEATURES includes "pcbios" or if it does not
21# contain "efi". This way legacy is supported by default if neither is
22# specified, maintaining the original behavior.
23def pcbios(d):
24 pcbios = bb.utils.contains("MACHINE_FEATURES", "pcbios", "1", "0", d)
25 if pcbios == "0":
26 pcbios = bb.utils.contains("MACHINE_FEATURES", "efi", "0", "1", d)
27 return pcbios
28
29PCBIOS = "${@pcbios(d)}"
30PCBIOS_CLASS = "${@['','syslinux'][d.getVar('PCBIOS') == '1']}"
31
32# efi_populate_common DEST BOOTLOADER
33efi_populate_common() {
34 # DEST must be the root of the image so that EFIDIR is not
35 # nested under a top level directory.
36 DEST=$1
37
38 install -d ${DEST}${EFIDIR}
39
40 install -m 0644 ${DEPLOY_DIR_IMAGE}/$2-${EFI_BOOT_IMAGE} ${DEST}${EFIDIR}/${EFI_BOOT_IMAGE}
41 EFIPATH=$(echo "${EFIDIR}" | sed 's/\//\\/g')
42 printf 'fs0:%s\%s\n' "$EFIPATH" "${EFI_BOOT_IMAGE}" >${DEST}/startup.nsh
43}
44
45efi_iso_populate() {
46 iso_dir=$1
47 efi_populate $iso_dir
48 # Build a EFI directory to create efi.img
49 mkdir -p ${EFIIMGDIR}/${EFIDIR}
50 cp $iso_dir/${EFIDIR}/* ${EFIIMGDIR}${EFIDIR}
51 cp $iso_dir/${KERNEL_IMAGETYPE} ${EFIIMGDIR}
52
53 EFIPATH=$(echo "${EFIDIR}" | sed 's/\//\\/g')
54 printf 'fs0:%s\%s\n' "$EFIPATH" "${EFI_BOOT_IMAGE}" >${EFIIMGDIR}/startup.nsh
55
56 if [ -f "$iso_dir/initrd" ] ; then
57 cp $iso_dir/initrd ${EFIIMGDIR}
58 fi
59}
60
61efi_hddimg_populate() {
62 efi_populate $1
63}
64
65inherit ${EFI_CLASS}
66inherit ${PCBIOS_CLASS}
67
68populate_kernel() {
69 dest=$1
70 install -d $dest
71
72 # Install bzImage, initrd, and rootfs.img in DEST for all loaders to use.
73 bbnote "Trying to install ${DEPLOY_DIR_IMAGE}/${KERNEL_IMAGETYPE} as $dest/${KERNEL_IMAGETYPE}"
74 if [ -e ${DEPLOY_DIR_IMAGE}/${KERNEL_IMAGETYPE} ]; then
75 install -m 0644 ${DEPLOY_DIR_IMAGE}/${KERNEL_IMAGETYPE} $dest/${KERNEL_IMAGETYPE}
76 else
77 bbwarn "${DEPLOY_DIR_IMAGE}/${KERNEL_IMAGETYPE} doesn't exist"
78 fi
79
80 # initrd is made of concatenation of multiple filesystem images
81 if [ -n "${INITRD}" ]; then
82 rm -f $dest/initrd
83 for fs in ${INITRD}
84 do
85 if [ -s "$fs" ]; then
86 cat $fs >> $dest/initrd
87 else
88 bbfatal "$fs is invalid. initrd image creation failed."
89 fi
90 done
91 chmod 0644 $dest/initrd
92 fi
93}
94
diff --git a/meta/classes/logging.bbclass b/meta/classes/logging.bbclass
deleted file mode 100644
index a0c94e98c7..0000000000
--- a/meta/classes/logging.bbclass
+++ /dev/null
@@ -1,101 +0,0 @@
1# The following logging mechanisms are to be used in bash functions of recipes.
2# They are intended to map one to one in intention and output format with the
3# python recipe logging functions of a similar naming convention: bb.plain(),
4# bb.note(), etc.
5
6LOGFIFO = "${T}/fifo.${@os.getpid()}"
7
8# Print the output exactly as it is passed in. Typically used for output of
9# tasks that should be seen on the console. Use sparingly.
10# Output: logs console
11bbplain() {
12 if [ -p ${LOGFIFO} ] ; then
13 printf "%b\0" "bbplain $*" > ${LOGFIFO}
14 else
15 echo "$*"
16 fi
17}
18
19# Notify the user of a noteworthy condition.
20# Output: logs
21bbnote() {
22 if [ -p ${LOGFIFO} ] ; then
23 printf "%b\0" "bbnote $*" > ${LOGFIFO}
24 else
25 echo "NOTE: $*"
26 fi
27}
28
29# Print a warning to the log. Warnings are non-fatal, and do not
30# indicate a build failure.
31# Output: logs console
32bbwarn() {
33 if [ -p ${LOGFIFO} ] ; then
34 printf "%b\0" "bbwarn $*" > ${LOGFIFO}
35 else
36 echo "WARNING: $*"
37 fi
38}
39
40# Print an error to the log. Errors are non-fatal in that the build can
41# continue, but they do indicate a build failure.
42# Output: logs console
43bberror() {
44 if [ -p ${LOGFIFO} ] ; then
45 printf "%b\0" "bberror $*" > ${LOGFIFO}
46 else
47 echo "ERROR: $*"
48 fi
49}
50
51# Print a fatal error to the log. Fatal errors indicate build failure
52# and halt the build, exiting with an error code.
53# Output: logs console
54bbfatal() {
55 if [ -p ${LOGFIFO} ] ; then
56 printf "%b\0" "bbfatal $*" > ${LOGFIFO}
57 else
58 echo "ERROR: $*"
59 fi
60 exit 1
61}
62
63# Like bbfatal, except prevents the suppression of the error log by
64# bitbake's UI.
65# Output: logs console
66bbfatal_log() {
67 if [ -p ${LOGFIFO} ] ; then
68 printf "%b\0" "bbfatal_log $*" > ${LOGFIFO}
69 else
70 echo "ERROR: $*"
71 fi
72 exit 1
73}
74
75# Print debug messages. These are appropriate for progress checkpoint
76# messages to the logs. Depending on the debug log level, they may also
77# go to the console.
78# Output: logs console
79# Usage: bbdebug 1 "first level debug message"
80# bbdebug 2 "second level debug message"
81bbdebug() {
82 USAGE='Usage: bbdebug [123] "message"'
83 if [ $# -lt 2 ]; then
84 bbfatal "$USAGE"
85 fi
86
87 # Strip off the debug level and ensure it is an integer
88 DBGLVL=$1; shift
89 NONDIGITS=$(echo "$DBGLVL" | tr -d "[:digit:]")
90 if [ "$NONDIGITS" ]; then
91 bbfatal "$USAGE"
92 fi
93
94 # All debug output is printed to the logs
95 if [ -p ${LOGFIFO} ] ; then
96 printf "%b\0" "bbdebug $DBGLVL $*" > ${LOGFIFO}
97 else
98 echo "DEBUG: $*"
99 fi
100}
101
diff --git a/meta/classes/manpages.bbclass b/meta/classes/manpages.bbclass
deleted file mode 100644
index 1e66780646..0000000000
--- a/meta/classes/manpages.bbclass
+++ /dev/null
@@ -1,44 +0,0 @@
1# Inherit this class to enable or disable building and installation of manpages
2# depending on whether 'api-documentation' is in DISTRO_FEATURES. Such building
3# tends to pull in the entire XML stack and other tools, so it's not enabled
4# by default.
5PACKAGECONFIG_append_class-target = " ${@bb.utils.contains('DISTRO_FEATURES', 'api-documentation', 'manpages', '', d)}"
6
7inherit qemu
8
9# usually manual files are packaged to ${PN}-doc except man-pages
10MAN_PKG ?= "${PN}-doc"
11
12# only add man-db to RDEPENDS when manual files are built and installed
13RDEPENDS_${MAN_PKG} += "${@bb.utils.contains('PACKAGECONFIG', 'manpages', 'man-db', '', d)}"
14
15pkg_postinst_append_${MAN_PKG} () {
16 # only update manual page index caches when manual files are built and installed
17 if ${@bb.utils.contains('PACKAGECONFIG', 'manpages', 'true', 'false', d)}; then
18 if test -n "$D"; then
19 if ${@bb.utils.contains('MACHINE_FEATURES', 'qemu-usermode', 'true','false', d)}; then
20 sed "s:\(\s\)/:\1$D/:g" $D${sysconfdir}/man_db.conf | ${@qemu_run_binary(d, '$D', '${bindir}/mandb')} -C - -u -q $D${mandir}
21 chown -R root:root $D${mandir}
22 mkdir -p $D${localstatedir}/cache/man
23 cd $D${mandir}
24 find . -name index.db | while read index; do
25 mkdir -p $D${localstatedir}/cache/man/$(dirname ${index})
26 mv ${index} $D${localstatedir}/cache/man/${index}
27 chown man:man $D${localstatedir}/cache/man/${index}
28 done
29 cd -
30 else
31 $INTERCEPT_DIR/postinst_intercept delay_to_first_boot ${PKG} mlprefix=${MLPREFIX}
32 fi
33 else
34 mandb -q
35 fi
36 fi
37}
38
39pkg_postrm_append_${MAN_PKG} () {
40 # only update manual page index caches when manual files are built and installed
41 if ${@bb.utils.contains('PACKAGECONFIG', 'manpages', 'true', 'false', d)}; then
42 mandb -q
43 fi
44}
diff --git a/meta/classes/mcextend.bbclass b/meta/classes/mcextend.bbclass
index 0f8f962298..a489eeb3c7 100644
--- a/meta/classes/mcextend.bbclass
+++ b/meta/classes/mcextend.bbclass
@@ -1,3 +1,9 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
1python mcextend_virtclass_handler () { 7python mcextend_virtclass_handler () {
2 cls = e.data.getVar("BBEXTENDCURR") 8 cls = e.data.getVar("BBEXTENDCURR")
3 variant = e.data.getVar("BBEXTENDVARIANT") 9 variant = e.data.getVar("BBEXTENDVARIANT")
diff --git a/meta/classes/meson.bbclass b/meta/classes/meson.bbclass
deleted file mode 100644
index a7644e70cb..0000000000
--- a/meta/classes/meson.bbclass
+++ /dev/null
@@ -1,189 +0,0 @@
1inherit siteinfo python3native
2
3DEPENDS_append = " meson-native ninja-native"
4
5# As Meson enforces out-of-tree builds we can just use cleandirs
6B = "${WORKDIR}/build"
7do_configure[cleandirs] = "${B}"
8
9# Where the meson.build build configuration is
10MESON_SOURCEPATH = "${S}"
11
12def noprefix(var, d):
13 return d.getVar(var).replace(d.getVar('prefix') + '/', '', 1)
14
15MESON_BUILDTYPE ?= "plain"
16MESONOPTS = " --prefix ${prefix} \
17 --buildtype ${MESON_BUILDTYPE} \
18 --bindir ${@noprefix('bindir', d)} \
19 --sbindir ${@noprefix('sbindir', d)} \
20 --datadir ${@noprefix('datadir', d)} \
21 --libdir ${@noprefix('libdir', d)} \
22 --libexecdir ${@noprefix('libexecdir', d)} \
23 --includedir ${@noprefix('includedir', d)} \
24 --mandir ${@noprefix('mandir', d)} \
25 --infodir ${@noprefix('infodir', d)} \
26 --sysconfdir ${sysconfdir} \
27 --localstatedir ${localstatedir} \
28 --sharedstatedir ${sharedstatedir} \
29 --wrap-mode nodownload"
30
31EXTRA_OEMESON_append = " ${PACKAGECONFIG_CONFARGS}"
32
33MESON_CROSS_FILE = ""
34MESON_CROSS_FILE_class-target = "--cross-file ${WORKDIR}/meson.cross"
35MESON_CROSS_FILE_class-nativesdk = "--cross-file ${WORKDIR}/meson.cross"
36
37def meson_array(var, d):
38 items = d.getVar(var).split()
39 return repr(items[0] if len(items) == 1 else items)
40
41# Map our ARCH values to what Meson expects:
42# http://mesonbuild.com/Reference-tables.html#cpu-families
43def meson_cpu_family(var, d):
44 import re
45 arch = d.getVar(var)
46 if arch == 'powerpc':
47 return 'ppc'
48 elif arch == 'powerpc64' or arch == 'powerpc64le':
49 return 'ppc64'
50 elif arch == 'armeb':
51 return 'arm'
52 elif arch == 'aarch64_be':
53 return 'aarch64'
54 elif arch == 'mipsel':
55 return 'mips'
56 elif arch == 'mips64el':
57 return 'mips64'
58 elif re.match(r"i[3-6]86", arch):
59 return "x86"
60 elif arch == "microblazeel":
61 return "microblaze"
62 else:
63 return arch
64
65# Map our OS values to what Meson expects:
66# https://mesonbuild.com/Reference-tables.html#operating-system-names
67def meson_operating_system(var, d):
68 os = d.getVar(var)
69 if "mingw" in os:
70 return "windows"
71 # avoid e.g 'linux-gnueabi'
72 elif "linux" in os:
73 return "linux"
74 else:
75 return os
76
77def meson_endian(prefix, d):
78 arch, os = d.getVar(prefix + "_ARCH"), d.getVar(prefix + "_OS")
79 sitedata = siteinfo_data_for_machine(arch, os, d)
80 if "endian-little" in sitedata:
81 return "little"
82 elif "endian-big" in sitedata:
83 return "big"
84 else:
85 bb.fatal("Cannot determine endianism for %s-%s" % (arch, os))
86
87addtask write_config before do_configure
88do_write_config[vardeps] += "CC CXX LD AR NM STRIP READELF CFLAGS CXXFLAGS LDFLAGS"
89do_write_config() {
90 # This needs to be Py to split the args into single-element lists
91 cat >${WORKDIR}/meson.cross <<EOF
92[binaries]
93c = ${@meson_array('CC', d)}
94cpp = ${@meson_array('CXX', d)}
95ar = ${@meson_array('AR', d)}
96nm = ${@meson_array('NM', d)}
97strip = ${@meson_array('STRIP', d)}
98readelf = ${@meson_array('READELF', d)}
99pkgconfig = 'pkg-config'
100llvm-config = 'llvm-config${LLVMVERSION}'
101cups-config = 'cups-config'
102g-ir-scanner = '${STAGING_BINDIR}/g-ir-scanner-wrapper'
103g-ir-compiler = '${STAGING_BINDIR}/g-ir-compiler-wrapper'
104
105[properties]
106needs_exe_wrapper = true
107c_args = ${@meson_array('CFLAGS', d)}
108c_link_args = ${@meson_array('LDFLAGS', d)}
109cpp_args = ${@meson_array('CXXFLAGS', d)}
110cpp_link_args = ${@meson_array('LDFLAGS', d)}
111gtkdoc_exe_wrapper = '${B}/gtkdoc-qemuwrapper'
112
113[host_machine]
114system = '${@meson_operating_system('HOST_OS', d)}'
115cpu_family = '${@meson_cpu_family('HOST_ARCH', d)}'
116cpu = '${HOST_ARCH}'
117endian = '${@meson_endian('HOST', d)}'
118
119[target_machine]
120system = '${@meson_operating_system('TARGET_OS', d)}'
121cpu_family = '${@meson_cpu_family('TARGET_ARCH', d)}'
122cpu = '${TARGET_ARCH}'
123endian = '${@meson_endian('TARGET', d)}'
124EOF
125}
126
127CONFIGURE_FILES = "meson.build"
128
129meson_do_configure() {
130 # Meson requires this to be 'bfd, 'lld' or 'gold' from 0.53 onwards
131 # https://github.com/mesonbuild/meson/commit/ef9aeb188ea2bc7353e59916c18901cde90fa2b3
132 unset LD
133
134 # Work around "Meson fails if /tmp is mounted with noexec #2972"
135 mkdir -p "${B}/meson-private/tmp"
136 export TMPDIR="${B}/meson-private/tmp"
137 bbnote Executing meson ${EXTRA_OEMESON}...
138 if ! meson ${MESONOPTS} "${MESON_SOURCEPATH}" "${B}" ${MESON_CROSS_FILE} ${EXTRA_OEMESON}; then
139 bbfatal_log meson failed
140 fi
141}
142
143override_native_tools() {
144 # Set these so that meson uses the native tools for its build sanity tests,
145 # which require executables to be runnable. The cross file will still
146 # override these for the target build.
147 export CC="${BUILD_CC}"
148 export CXX="${BUILD_CXX}"
149 export LD="${BUILD_LD}"
150 export AR="${BUILD_AR}"
151 export STRIP="${BUILD_STRIP}"
152 # These contain *target* flags but will be used as *native* flags. The
153 # correct native flags will be passed via -Dc_args and so on, unset them so
154 # they don't interfere with tools invoked by Meson (such as g-ir-scanner)
155 unset CPPFLAGS CFLAGS CXXFLAGS LDFLAGS
156}
157
158meson_do_configure_prepend_class-target() {
159 override_native_tools
160}
161
162meson_do_configure_prepend_class-nativesdk() {
163 override_native_tools
164}
165
166meson_do_configure_prepend_class-native() {
167 export PKG_CONFIG="pkg-config-native"
168}
169
170python meson_do_qa_configure() {
171 import re
172 warn_re = re.compile(r"^WARNING: Cross property (.+) is using default value (.+)$", re.MULTILINE)
173 with open(d.expand("${B}/meson-logs/meson-log.txt")) as logfile:
174 log = logfile.read()
175 for (prop, value) in warn_re.findall(log):
176 bb.warn("Meson cross property %s used without explicit assignment, defaulting to %s" % (prop, value))
177}
178do_configure[postfuncs] += "meson_do_qa_configure"
179
180do_compile[progress] = "outof:^\[(\d+)/(\d+)\]\s+"
181meson_do_compile() {
182 ninja -v ${PARALLEL_MAKE}
183}
184
185meson_do_install() {
186 DESTDIR='${D}' ninja -v ${PARALLEL_MAKEINST} install
187}
188
189EXPORT_FUNCTIONS do_configure do_compile do_install
diff --git a/meta/classes/meta.bbclass b/meta/classes/meta.bbclass
deleted file mode 100644
index 5e6890238b..0000000000
--- a/meta/classes/meta.bbclass
+++ /dev/null
@@ -1,4 +0,0 @@
1
2PACKAGES = ""
3
4do_build[recrdeptask] = "do_build"
diff --git a/meta/classes/metadata_scm.bbclass b/meta/classes/metadata_scm.bbclass
index 2608a7ef7b..6842119b6b 100644
--- a/meta/classes/metadata_scm.bbclass
+++ b/meta/classes/metadata_scm.bbclass
@@ -1,44 +1,10 @@
1METADATA_BRANCH ?= "${@base_detect_branch(d)}" 1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7METADATA_BRANCH := "${@oe.buildcfg.detect_branch(d)}"
2METADATA_BRANCH[vardepvalue] = "${METADATA_BRANCH}" 8METADATA_BRANCH[vardepvalue] = "${METADATA_BRANCH}"
3METADATA_REVISION ?= "${@base_detect_revision(d)}" 9METADATA_REVISION := "${@oe.buildcfg.detect_revision(d)}"
4METADATA_REVISION[vardepvalue] = "${METADATA_REVISION}" 10METADATA_REVISION[vardepvalue] = "${METADATA_REVISION}"
5
6def base_detect_revision(d):
7 path = base_get_scmbasepath(d)
8 return base_get_metadata_git_revision(path, d)
9
10def base_detect_branch(d):
11 path = base_get_scmbasepath(d)
12 return base_get_metadata_git_branch(path, d)
13
14def base_get_scmbasepath(d):
15 return os.path.join(d.getVar('COREBASE'), 'meta')
16
17def base_get_metadata_svn_revision(path, d):
18 # This only works with older subversion. For newer versions
19 # this function will need to be fixed by someone interested
20 revision = "<unknown>"
21 try:
22 with open("%s/.svn/entries" % path) as f:
23 revision = f.readlines()[3].strip()
24 except (IOError, IndexError):
25 pass
26 return revision
27
28def base_get_metadata_git_branch(path, d):
29 import bb.process
30
31 try:
32 rev, _ = bb.process.run('git rev-parse --abbrev-ref HEAD', cwd=path)
33 except bb.process.ExecutionError:
34 rev = '<unknown>'
35 return rev.strip()
36
37def base_get_metadata_git_revision(path, d):
38 import bb.process
39
40 try:
41 rev, _ = bb.process.run('git rev-parse HEAD', cwd=path)
42 except bb.process.ExecutionError:
43 rev = '<unknown>'
44 return rev.strip()
diff --git a/meta/classes/migrate_localcount.bbclass b/meta/classes/migrate_localcount.bbclass
index 810a541316..1d00c110e2 100644
--- a/meta/classes/migrate_localcount.bbclass
+++ b/meta/classes/migrate_localcount.bbclass
@@ -1,3 +1,9 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
1PRSERV_DUMPDIR ??= "${LOG_DIR}/db" 7PRSERV_DUMPDIR ??= "${LOG_DIR}/db"
2LOCALCOUNT_DUMPFILE ??= "${PRSERV_DUMPDIR}/prserv-localcount-exports.inc" 8LOCALCOUNT_DUMPFILE ??= "${PRSERV_DUMPDIR}/prserv-localcount-exports.inc"
3 9
diff --git a/meta/classes/mime-xdg.bbclass b/meta/classes/mime-xdg.bbclass
deleted file mode 100644
index 642a5b7595..0000000000
--- a/meta/classes/mime-xdg.bbclass
+++ /dev/null
@@ -1,74 +0,0 @@
1#
2# This class creates mime <-> application associations based on entry
3# 'MimeType' in *.desktop files
4#
5
6DEPENDS += "desktop-file-utils"
7PACKAGE_WRITE_DEPS += "desktop-file-utils-native"
8DESKTOPDIR = "${datadir}/applications"
9
10# There are recipes out there installing their .desktop files as absolute
11# symlinks. For us these are dangling and cannot be introspected for "MimeType"
12# easily. By addding package-names to MIME_XDG_PACKAGES, packager can force
13# proper update-desktop-database handling. Note that all introspection is
14# skipped for MIME_XDG_PACKAGES not empty
15MIME_XDG_PACKAGES ?= ""
16
17mime_xdg_postinst() {
18if [ "x$D" != "x" ]; then
19 $INTERCEPT_DIR/postinst_intercept update_desktop_database ${PKG} \
20 mlprefix=${MLPREFIX} \
21 desktop_dir=${DESKTOPDIR}
22else
23 update-desktop-database $D${DESKTOPDIR}
24fi
25}
26
27mime_xdg_postrm() {
28if [ "x$D" != "x" ]; then
29 $INTERCEPT_DIR/postinst_intercept update_desktop_database ${PKG} \
30 mlprefix=${MLPREFIX} \
31 desktop_dir=${DESKTOPDIR}
32else
33 update-desktop-database $D${DESKTOPDIR}
34fi
35}
36
37python populate_packages_append () {
38 packages = d.getVar('PACKAGES').split()
39 pkgdest = d.getVar('PKGDEST')
40 desktop_base = d.getVar('DESKTOPDIR')
41 forced_mime_xdg_pkgs = (d.getVar('MIME_XDG_PACKAGES') or '').split()
42
43 for pkg in packages:
44 desktops_with_mime_found = pkg in forced_mime_xdg_pkgs
45 if d.getVar('MIME_XDG_PACKAGES') == '':
46 desktop_dir = '%s/%s%s' % (pkgdest, pkg, desktop_base)
47 if os.path.exists(desktop_dir):
48 for df in os.listdir(desktop_dir):
49 if df.endswith('.desktop'):
50 try:
51 with open(desktop_dir + '/'+ df, 'r') as f:
52 for line in f.read().split('\n'):
53 if 'MimeType' in line:
54 desktops_with_mime_found = True
55 break;
56 except:
57 bb.warn('Could not open %s. Set MIME_XDG_PACKAGES in recipe or add mime-xdg to INSANE_SKIP.' % desktop_dir + '/'+ df)
58 if desktops_with_mime_found:
59 break
60 if desktops_with_mime_found:
61 bb.note("adding mime-xdg postinst and postrm scripts to %s" % pkg)
62 postinst = d.getVar('pkg_postinst_%s' % pkg)
63 if not postinst:
64 postinst = '#!/bin/sh\n'
65 postinst += d.getVar('mime_xdg_postinst')
66 d.setVar('pkg_postinst_%s' % pkg, postinst)
67 postrm = d.getVar('pkg_postrm_%s' % pkg)
68 if not postrm:
69 postrm = '#!/bin/sh\n'
70 postrm += d.getVar('mime_xdg_postrm')
71 d.setVar('pkg_postrm_%s' % pkg, postrm)
72 bb.note("adding desktop-file-utils dependency to %s" % pkg)
73 d.appendVar('RDEPENDS_' + pkg, " " + d.getVar('MLPREFIX')+"desktop-file-utils")
74}
diff --git a/meta/classes/mime.bbclass b/meta/classes/mime.bbclass
deleted file mode 100644
index bb99bc35cb..0000000000
--- a/meta/classes/mime.bbclass
+++ /dev/null
@@ -1,70 +0,0 @@
1#
2# This class is used by recipes installing mime types
3#
4
5DEPENDS += "${@bb.utils.contains('BPN', 'shared-mime-info', '', 'shared-mime-info', d)}"
6PACKAGE_WRITE_DEPS += "shared-mime-info-native"
7MIMEDIR = "${datadir}/mime"
8
9mime_postinst() {
10if [ "x$D" != "x" ]; then
11 $INTERCEPT_DIR/postinst_intercept update_mime_database ${PKG} \
12 mlprefix=${MLPREFIX} \
13 mimedir=${MIMEDIR}
14else
15 echo "Updating MIME database... this may take a while."
16 update-mime-database $D${MIMEDIR}
17fi
18}
19
20mime_postrm() {
21if [ "x$D" != "x" ]; then
22 $INTERCEPT_DIR/postinst_intercept update_mime_database ${PKG} \
23 mlprefix=${MLPREFIX} \
24 mimedir=${MIMEDIR}
25else
26 echo "Updating MIME database... this may take a while."
27 # $D${MIMEDIR}/packages belong to package shared-mime-info-data,
28 # packages like libfm-mime depend on shared-mime-info-data.
29 # after shared-mime-info-data uninstalled, $D${MIMEDIR}/packages
30 # is removed, but update-mime-database need this dir to update
31 # database, workaround to create one and remove it later
32 if [ ! -d $D${MIMEDIR}/packages ]; then
33 mkdir -p $D${MIMEDIR}/packages
34 update-mime-database $D${MIMEDIR}
35 rmdir --ignore-fail-on-non-empty $D${MIMEDIR}/packages
36 else
37 update-mime-database $D${MIMEDIR}
38fi
39fi
40}
41
42python populate_packages_append () {
43 packages = d.getVar('PACKAGES').split()
44 pkgdest = d.getVar('PKGDEST')
45 mimedir = d.getVar('MIMEDIR')
46
47 for pkg in packages:
48 mime_packages_dir = '%s/%s%s/packages' % (pkgdest, pkg, mimedir)
49 mimes_types_found = False
50 if os.path.exists(mime_packages_dir):
51 for f in os.listdir(mime_packages_dir):
52 if f.endswith('.xml'):
53 mimes_types_found = True
54 break
55 if mimes_types_found:
56 bb.note("adding mime postinst and postrm scripts to %s" % pkg)
57 postinst = d.getVar('pkg_postinst_%s' % pkg)
58 if not postinst:
59 postinst = '#!/bin/sh\n'
60 postinst += d.getVar('mime_postinst')
61 d.setVar('pkg_postinst_%s' % pkg, postinst)
62 postrm = d.getVar('pkg_postrm_%s' % pkg)
63 if not postrm:
64 postrm = '#!/bin/sh\n'
65 postrm += d.getVar('mime_postrm')
66 d.setVar('pkg_postrm_%s' % pkg, postrm)
67 if pkg != 'shared-mime-info-data':
68 bb.note("adding shared-mime-info-data dependency to %s" % pkg)
69 d.appendVar('RDEPENDS_' + pkg, " " + d.getVar('MLPREFIX')+"shared-mime-info-data")
70}
diff --git a/meta/classes/mirrors.bbclass b/meta/classes/mirrors.bbclass
deleted file mode 100644
index 87bba41472..0000000000
--- a/meta/classes/mirrors.bbclass
+++ /dev/null
@@ -1,76 +0,0 @@
1MIRRORS += "\
2${DEBIAN_MIRROR} http://snapshot.debian.org/archive/debian/20180310T215105Z/pool \n \
3${DEBIAN_MIRROR} http://snapshot.debian.org/archive/debian-archive/20120328T092752Z/debian/pool \n \
4${DEBIAN_MIRROR} http://snapshot.debian.org/archive/debian-archive/20110127T084257Z/debian/pool \n \
5${DEBIAN_MIRROR} http://snapshot.debian.org/archive/debian-archive/20090802T004153Z/debian/pool \n \
6${DEBIAN_MIRROR} http://ftp.de.debian.org/debian/pool \n \
7${DEBIAN_MIRROR} http://ftp.au.debian.org/debian/pool \n \
8${DEBIAN_MIRROR} http://ftp.cl.debian.org/debian/pool \n \
9${DEBIAN_MIRROR} http://ftp.hr.debian.org/debian/pool \n \
10${DEBIAN_MIRROR} http://ftp.fi.debian.org/debian/pool \n \
11${DEBIAN_MIRROR} http://ftp.hk.debian.org/debian/pool \n \
12${DEBIAN_MIRROR} http://ftp.hu.debian.org/debian/pool \n \
13${DEBIAN_MIRROR} http://ftp.ie.debian.org/debian/pool \n \
14${DEBIAN_MIRROR} http://ftp.it.debian.org/debian/pool \n \
15${DEBIAN_MIRROR} http://ftp.jp.debian.org/debian/pool \n \
16${DEBIAN_MIRROR} http://ftp.no.debian.org/debian/pool \n \
17${DEBIAN_MIRROR} http://ftp.pl.debian.org/debian/pool \n \
18${DEBIAN_MIRROR} http://ftp.ro.debian.org/debian/pool \n \
19${DEBIAN_MIRROR} http://ftp.si.debian.org/debian/pool \n \
20${DEBIAN_MIRROR} http://ftp.es.debian.org/debian/pool \n \
21${DEBIAN_MIRROR} http://ftp.se.debian.org/debian/pool \n \
22${DEBIAN_MIRROR} http://ftp.tr.debian.org/debian/pool \n \
23${GNU_MIRROR} https://mirrors.kernel.org/gnu \n \
24${KERNELORG_MIRROR} http://www.kernel.org/pub \n \
25${GNUPG_MIRROR} ftp://ftp.gnupg.org/gcrypt \n \
26${GNUPG_MIRROR} ftp://ftp.franken.de/pub/crypt/mirror/ftp.gnupg.org/gcrypt \n \
27${GNUPG_MIRROR} ftp://mirrors.dotsrc.org/gcrypt \n \
28ftp://dante.ctan.org/tex-archive ftp://ftp.fu-berlin.de/tex/CTAN \n \
29ftp://dante.ctan.org/tex-archive http://sunsite.sut.ac.jp/pub/archives/ctan/ \n \
30ftp://dante.ctan.org/tex-archive http://ctan.unsw.edu.au/ \n \
31ftp://ftp.gnutls.org/gcrypt/gnutls ${GNUPG_MIRROR}/gnutls \n \
32http://ftp.info-zip.org/pub/infozip/src/ http://mirror.switch.ch/ftp/mirror/infozip/src/ \n \
33http://ftp.info-zip.org/pub/infozip/src/ ftp://sunsite.icm.edu.pl/pub/unix/archiving/info-zip/src/ \n \
34http://www.mirrorservice.org/sites/lsof.itap.purdue.edu/pub/tools/unix/lsof/ http://www.mirrorservice.org/sites/lsof.itap.purdue.edu/pub/tools/unix/lsof/OLD/ \n \
35${APACHE_MIRROR} http://www.us.apache.org/dist \n \
36${APACHE_MIRROR} http://archive.apache.org/dist \n \
37http://downloads.sourceforge.net/watchdog/ http://fossies.org/linux/misc/ \n \
38${SAVANNAH_GNU_MIRROR} http://download-mirror.savannah.gnu.org/releases \n \
39${SAVANNAH_NONGNU_MIRROR} http://download-mirror.savannah.nongnu.org/releases \n \
40ftp://sourceware.org/pub http://mirrors.kernel.org/sourceware \n \
41ftp://sourceware.org/pub http://gd.tuwien.ac.at/gnu/sourceware \n \
42ftp://sourceware.org/pub http://ftp.gwdg.de/pub/linux/sources.redhat.com/sourceware \n \
43cvs://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
44svn://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
45git://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
46hg://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
47bzr://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
48p4://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
49osc://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
50https?$://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
51ftp://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
52npm://.*/?.* http://downloads.yoctoproject.org/mirror/sources/ \n \
53cvs://.*/.* http://sources.openembedded.org/ \n \
54svn://.*/.* http://sources.openembedded.org/ \n \
55git://.*/.* http://sources.openembedded.org/ \n \
56hg://.*/.* http://sources.openembedded.org/ \n \
57bzr://.*/.* http://sources.openembedded.org/ \n \
58p4://.*/.* http://sources.openembedded.org/ \n \
59osc://.*/.* http://sources.openembedded.org/ \n \
60https?$://.*/.* http://sources.openembedded.org/ \n \
61ftp://.*/.* http://sources.openembedded.org/ \n \
62npm://.*/?.* http://sources.openembedded.org/ \n \
63${CPAN_MIRROR} http://cpan.metacpan.org/ \n \
64${CPAN_MIRROR} http://search.cpan.org/CPAN/ \n \
65"
66
67# Use MIRRORS to provide git repo fallbacks using the https protocol, for cases
68# where git native protocol fetches may fail due to local firewall rules, etc.
69
70MIRRORS += "\
71git://salsa.debian.org/.* git://salsa.debian.org/PATH;protocol=https \n \
72git://git.gnome.org/.* git://gitlab.gnome.org/GNOME/PATH;protocol=https \n \
73git://git.savannah.gnu.org/.* git://git.savannah.gnu.org/git/PATH;protocol=https \n \
74git://git.yoctoproject.org/.* git://git.yoctoproject.org/git/PATH;protocol=https \n \
75git://.*/.* git://HOST/PATH;protocol=https \n \
76"
diff --git a/meta/classes/module-base.bbclass b/meta/classes/module-base.bbclass
deleted file mode 100644
index 27bd69ff33..0000000000
--- a/meta/classes/module-base.bbclass
+++ /dev/null
@@ -1,21 +0,0 @@
1inherit kernel-arch
2
3# We do the dependency this way because the output is not preserved
4# in sstate, so we must force do_compile to run (once).
5do_configure[depends] += "make-mod-scripts:do_compile"
6
7export OS = "${TARGET_OS}"
8export CROSS_COMPILE = "${TARGET_PREFIX}"
9
10# This points to the build artefacts from the main kernel build
11# such as .config and System.map
12# Confusingly it is not the module build output (which is ${B}) but
13# we didn't pick the name.
14export KBUILD_OUTPUT = "${STAGING_KERNEL_BUILDDIR}"
15
16export KERNEL_VERSION = "${@oe.utils.read_file('${STAGING_KERNEL_BUILDDIR}/kernel-abiversion')}"
17KERNEL_OBJECT_SUFFIX = ".ko"
18
19# kernel modules are generally machine specific
20PACKAGE_ARCH = "${MACHINE_ARCH}"
21
diff --git a/meta/classes/module.bbclass b/meta/classes/module.bbclass
deleted file mode 100644
index c0dfa35061..0000000000
--- a/meta/classes/module.bbclass
+++ /dev/null
@@ -1,74 +0,0 @@
1inherit module-base kernel-module-split pkgconfig
2
3EXTRA_OEMAKE += "KERNEL_SRC=${STAGING_KERNEL_DIR}"
4
5MODULES_INSTALL_TARGET ?= "modules_install"
6MODULES_MODULE_SYMVERS_LOCATION ?= ""
7
8python __anonymous () {
9 depends = d.getVar('DEPENDS')
10 extra_symbols = []
11 for dep in depends.split():
12 if dep.startswith("kernel-module-"):
13 extra_symbols.append("${STAGING_INCDIR}/" + dep + "/Module.symvers")
14 d.setVar('KBUILD_EXTRA_SYMBOLS', " ".join(extra_symbols))
15}
16
17python do_devshell_prepend () {
18 os.environ['CFLAGS'] = ''
19 os.environ['CPPFLAGS'] = ''
20 os.environ['CXXFLAGS'] = ''
21 os.environ['LDFLAGS'] = ''
22
23 os.environ['KERNEL_PATH'] = d.getVar('STAGING_KERNEL_DIR')
24 os.environ['KERNEL_SRC'] = d.getVar('STAGING_KERNEL_DIR')
25 os.environ['KERNEL_VERSION'] = d.getVar('KERNEL_VERSION')
26 os.environ['CC'] = d.getVar('KERNEL_CC')
27 os.environ['LD'] = d.getVar('KERNEL_LD')
28 os.environ['AR'] = d.getVar('KERNEL_AR')
29 os.environ['O'] = d.getVar('STAGING_KERNEL_BUILDDIR')
30 kbuild_extra_symbols = d.getVar('KBUILD_EXTRA_SYMBOLS')
31 if kbuild_extra_symbols:
32 os.environ['KBUILD_EXTRA_SYMBOLS'] = kbuild_extra_symbols
33 else:
34 os.environ['KBUILD_EXTRA_SYMBOLS'] = ''
35}
36
37module_do_compile() {
38 unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS
39 oe_runmake KERNEL_PATH=${STAGING_KERNEL_DIR} \
40 KERNEL_VERSION=${KERNEL_VERSION} \
41 CC="${KERNEL_CC}" LD="${KERNEL_LD}" \
42 AR="${KERNEL_AR}" \
43 O=${STAGING_KERNEL_BUILDDIR} \
44 KBUILD_EXTRA_SYMBOLS="${KBUILD_EXTRA_SYMBOLS}" \
45 ${MAKE_TARGETS}
46}
47
48module_do_install() {
49 unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS
50 oe_runmake DEPMOD=echo MODLIB="${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION}" \
51 INSTALL_FW_PATH="${D}${nonarch_base_libdir}/firmware" \
52 CC="${KERNEL_CC}" LD="${KERNEL_LD}" \
53 O=${STAGING_KERNEL_BUILDDIR} \
54 ${MODULES_INSTALL_TARGET}
55
56 if [ ! -e "${B}/${MODULES_MODULE_SYMVERS_LOCATION}/Module.symvers" ] ; then
57 bbwarn "Module.symvers not found in ${B}/${MODULES_MODULE_SYMVERS_LOCATION}"
58 bbwarn "Please consider setting MODULES_MODULE_SYMVERS_LOCATION to a"
59 bbwarn "directory below B to get correct inter-module dependencies"
60 else
61 install -Dm0644 "${B}/${MODULES_MODULE_SYMVERS_LOCATION}"/Module.symvers ${D}${includedir}/${BPN}/Module.symvers
62 # Module.symvers contains absolute path to the build directory.
63 # While it doesn't actually seem to matter which path is specified,
64 # clear them out to avoid confusion
65 sed -e 's:${B}/::g' -i ${D}${includedir}/${BPN}/Module.symvers
66 fi
67}
68
69EXPORT_FUNCTIONS do_compile do_install
70
71# add all splitted modules to PN RDEPENDS, PN can be empty now
72KERNEL_MODULES_META_PACKAGE = "${PN}"
73FILES_${PN} = ""
74ALLOW_EMPTY_${PN} = "1"
diff --git a/meta/classes/multilib.bbclass b/meta/classes/multilib.bbclass
index 9f726e4537..b6c09969b1 100644
--- a/meta/classes/multilib.bbclass
+++ b/meta/classes/multilib.bbclass
@@ -1,3 +1,9 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
1python multilib_virtclass_handler () { 7python multilib_virtclass_handler () {
2 cls = e.data.getVar("BBEXTENDCURR") 8 cls = e.data.getVar("BBEXTENDCURR")
3 variant = e.data.getVar("BBEXTENDVARIANT") 9 variant = e.data.getVar("BBEXTENDVARIANT")
@@ -24,6 +30,9 @@ python multilib_virtclass_handler () {
24 if val: 30 if val:
25 e.data.setVar(name + "_MULTILIB_ORIGINAL", val) 31 e.data.setVar(name + "_MULTILIB_ORIGINAL", val)
26 32
33 # We nearly don't need this but dependencies on NON_MULTILIB_RECIPES don't work without it
34 d.setVar("SSTATE_ARCHS_TUNEPKG", "${@all_multilib_tune_values(d, 'TUNE_PKGARCH')}")
35
27 overrides = e.data.getVar("OVERRIDES", False) 36 overrides = e.data.getVar("OVERRIDES", False)
28 pn = e.data.getVar("PN", False) 37 pn = e.data.getVar("PN", False)
29 overrides = overrides.replace("pn-${PN}", "pn-${PN}:pn-" + pn) 38 overrides = overrides.replace("pn-${PN}", "pn-${PN}:pn-" + pn)
@@ -35,7 +44,7 @@ python multilib_virtclass_handler () {
35 e.data.setVar('SDKTARGETSYSROOT', e.data.getVar('SDKTARGETSYSROOT')) 44 e.data.setVar('SDKTARGETSYSROOT', e.data.getVar('SDKTARGETSYSROOT'))
36 override = ":virtclass-multilib-" + variant 45 override = ":virtclass-multilib-" + variant
37 e.data.setVar("OVERRIDES", e.data.getVar("OVERRIDES", False) + override) 46 e.data.setVar("OVERRIDES", e.data.getVar("OVERRIDES", False) + override)
38 target_vendor = e.data.getVar("TARGET_VENDOR_" + "virtclass-multilib-" + variant, False) 47 target_vendor = e.data.getVar("TARGET_VENDOR:" + "virtclass-multilib-" + variant, False)
39 if target_vendor: 48 if target_vendor:
40 e.data.setVar("TARGET_VENDOR", target_vendor) 49 e.data.setVar("TARGET_VENDOR", target_vendor)
41 return 50 return
@@ -45,6 +54,7 @@ python multilib_virtclass_handler () {
45 e.data.setVar("RECIPE_SYSROOT", "${WORKDIR}/recipe-sysroot") 54 e.data.setVar("RECIPE_SYSROOT", "${WORKDIR}/recipe-sysroot")
46 e.data.setVar("STAGING_DIR_TARGET", "${WORKDIR}/recipe-sysroot") 55 e.data.setVar("STAGING_DIR_TARGET", "${WORKDIR}/recipe-sysroot")
47 e.data.setVar("STAGING_DIR_HOST", "${WORKDIR}/recipe-sysroot") 56 e.data.setVar("STAGING_DIR_HOST", "${WORKDIR}/recipe-sysroot")
57 e.data.setVar("RECIPE_SYSROOT_MANIFEST_SUBDIR", "nativesdk-" + variant)
48 e.data.setVar("MLPREFIX", variant + "-") 58 e.data.setVar("MLPREFIX", variant + "-")
49 override = ":virtclass-multilib-" + variant 59 override = ":virtclass-multilib-" + variant
50 e.data.setVar("OVERRIDES", e.data.getVar("OVERRIDES", False) + override) 60 e.data.setVar("OVERRIDES", e.data.getVar("OVERRIDES", False) + override)
@@ -65,24 +75,25 @@ python multilib_virtclass_handler () {
65 75
66 override = ":virtclass-multilib-" + variant 76 override = ":virtclass-multilib-" + variant
67 77
68 blacklist = e.data.getVarFlag('PNBLACKLIST', e.data.getVar('PN')) 78 skip_msg = e.data.getVarFlag('SKIP_RECIPE', e.data.getVar('PN'))
69 if blacklist: 79 if skip_msg:
70 pn_new = variant + "-" + e.data.getVar('PN') 80 pn_new = variant + "-" + e.data.getVar('PN')
71 if not e.data.getVarFlag('PNBLACKLIST', pn_new): 81 if not e.data.getVarFlag('SKIP_RECIPE', pn_new):
72 e.data.setVarFlag('PNBLACKLIST', pn_new, blacklist) 82 e.data.setVarFlag('SKIP_RECIPE', pn_new, skip_msg)
73 83
74 e.data.setVar("MLPREFIX", variant + "-") 84 e.data.setVar("MLPREFIX", variant + "-")
75 e.data.setVar("PN", variant + "-" + e.data.getVar("PN", False)) 85 e.data.setVar("PN", variant + "-" + e.data.getVar("PN", False))
76 e.data.setVar("OVERRIDES", e.data.getVar("OVERRIDES", False) + override) 86 e.data.setVar("OVERRIDES", e.data.getVar("OVERRIDES", False) + override)
77 87
78 # Expand WHITELIST_GPL-3.0 with multilib prefix 88 # Expand INCOMPATIBLE_LICENSE_EXCEPTIONS with multilib prefix
79 pkgs = e.data.getVar("WHITELIST_GPL-3.0") 89 pkgs = e.data.getVar("INCOMPATIBLE_LICENSE_EXCEPTIONS")
80 for pkg in pkgs.split(): 90 if pkgs:
81 pkgs += " " + variant + "-" + pkg 91 for pkg in pkgs.split():
82 e.data.setVar("WHITELIST_GPL-3.0", pkgs) 92 pkgs += " " + variant + "-" + pkg
93 e.data.setVar("INCOMPATIBLE_LICENSE_EXCEPTIONS", pkgs)
83 94
84 # DEFAULTTUNE can change TARGET_ARCH override so expand this now before update_data 95 # DEFAULTTUNE can change TARGET_ARCH override so expand this now before update_data
85 newtune = e.data.getVar("DEFAULTTUNE_" + "virtclass-multilib-" + variant, False) 96 newtune = e.data.getVar("DEFAULTTUNE:" + "virtclass-multilib-" + variant, False)
86 if newtune: 97 if newtune:
87 e.data.setVar("DEFAULTTUNE", newtune) 98 e.data.setVar("DEFAULTTUNE", newtune)
88} 99}
@@ -92,6 +103,10 @@ multilib_virtclass_handler[eventmask] = "bb.event.RecipePreFinalise"
92 103
93python __anonymous () { 104python __anonymous () {
94 if bb.data.inherits_class('image', d): 105 if bb.data.inherits_class('image', d):
106 # set rpm preferred file color for 32-bit multilib image
107 if d.getVar("SITEINFO_BITS") == "32":
108 d.setVar("RPM_PREFER_ELF_ARCH", "1")
109
95 variant = d.getVar("BBEXTENDVARIANT") 110 variant = d.getVar("BBEXTENDVARIANT")
96 import oe.classextend 111 import oe.classextend
97 112
@@ -105,7 +120,6 @@ python __anonymous () {
105 d.setVar("LINGUAS_INSTALL", "") 120 d.setVar("LINGUAS_INSTALL", "")
106 # FIXME, we need to map this to something, not delete it! 121 # FIXME, we need to map this to something, not delete it!
107 d.setVar("PACKAGE_INSTALL_ATTEMPTONLY", "") 122 d.setVar("PACKAGE_INSTALL_ATTEMPTONLY", "")
108 bb.build.deltask('do_populate_sdk', d)
109 bb.build.deltask('do_populate_sdk_ext', d) 123 bb.build.deltask('do_populate_sdk_ext', d)
110 return 124 return
111} 125}
@@ -126,6 +140,7 @@ python multilib_virtclass_handler_postkeyexp () {
126 return 140 return
127 141
128 clsextend.map_depends_variable("DEPENDS") 142 clsextend.map_depends_variable("DEPENDS")
143 clsextend.map_depends_variable("PACKAGE_WRITE_DEPS")
129 clsextend.map_variable("PROVIDES") 144 clsextend.map_variable("PROVIDES")
130 145
131 if bb.data.inherits_class('cross-canadian', d): 146 if bb.data.inherits_class('cross-canadian', d):
@@ -177,7 +192,7 @@ def reset_alternative_priority(d):
177 bb.debug(1, '%s: Setting ALTERNATIVE_PRIORITY_%s to %s' % (pkg, pkg, reset_priority)) 192 bb.debug(1, '%s: Setting ALTERNATIVE_PRIORITY_%s to %s' % (pkg, pkg, reset_priority))
178 d.setVar('ALTERNATIVE_PRIORITY_%s' % pkg, reset_priority) 193 d.setVar('ALTERNATIVE_PRIORITY_%s' % pkg, reset_priority)
179 194
180 for alt_name in (d.getVar('ALTERNATIVE_%s' % pkg) or "").split(): 195 for alt_name in (d.getVar('ALTERNATIVE:%s' % pkg) or "").split():
181 # ALTERNATIVE_PRIORITY_pkg[tool] = priority 196 # ALTERNATIVE_PRIORITY_pkg[tool] = priority
182 alt_priority_pkg_name = d.getVarFlag('ALTERNATIVE_PRIORITY_%s' % pkg, alt_name) 197 alt_priority_pkg_name = d.getVarFlag('ALTERNATIVE_PRIORITY_%s' % pkg, alt_name)
183 # ALTERNATIVE_PRIORITY[tool] = priority 198 # ALTERNATIVE_PRIORITY[tool] = priority
@@ -192,12 +207,12 @@ def reset_alternative_priority(d):
192 bb.debug(1, '%s: Setting ALTERNATIVE_PRIORITY[%s] to %s' % (pkg, alt_name, reset_priority)) 207 bb.debug(1, '%s: Setting ALTERNATIVE_PRIORITY[%s] to %s' % (pkg, alt_name, reset_priority))
193 d.setVarFlag('ALTERNATIVE_PRIORITY', alt_name, reset_priority) 208 d.setVarFlag('ALTERNATIVE_PRIORITY', alt_name, reset_priority)
194 209
195PACKAGEFUNCS_append = " do_package_qa_multilib" 210PACKAGEFUNCS:append = " do_package_qa_multilib"
196 211
197python do_package_qa_multilib() { 212python do_package_qa_multilib() {
198 213
199 def check_mlprefix(pkg, var, mlprefix): 214 def check_mlprefix(pkg, var, mlprefix):
200 values = bb.utils.explode_deps(d.getVar('%s_%s' % (var, pkg)) or d.getVar(var) or "") 215 values = bb.utils.explode_deps(d.getVar('%s:%s' % (var, pkg)) or d.getVar(var) or "")
201 candidates = [] 216 candidates = []
202 for i in values: 217 for i in values:
203 if i.startswith('virtual/'): 218 if i.startswith('virtual/'):
@@ -211,7 +226,7 @@ python do_package_qa_multilib() {
211 if len(candidates) > 0: 226 if len(candidates) > 0:
212 msg = "%s package %s - suspicious values '%s' in %s" \ 227 msg = "%s package %s - suspicious values '%s' in %s" \
213 % (d.getVar('PN'), pkg, ' '.join(candidates), var) 228 % (d.getVar('PN'), pkg, ' '.join(candidates), var)
214 package_qa_handle_error("multilib", msg, d) 229 oe.qa.handle_error("multilib", msg, d)
215 230
216 ml = d.getVar('MLPREFIX') 231 ml = d.getVar('MLPREFIX')
217 if not ml: 232 if not ml:
@@ -229,4 +244,5 @@ python do_package_qa_multilib() {
229 check_mlprefix(pkg, 'RSUGGESTS', ml) 244 check_mlprefix(pkg, 'RSUGGESTS', ml)
230 check_mlprefix(pkg, 'RREPLACES', ml) 245 check_mlprefix(pkg, 'RREPLACES', ml)
231 check_mlprefix(pkg, 'RCONFLICTS', ml) 246 check_mlprefix(pkg, 'RCONFLICTS', ml)
247 oe.qa.exit_if_errors(d)
232} 248}
diff --git a/meta/classes/multilib_global.bbclass b/meta/classes/multilib_global.bbclass
index 98f65c8aae..6095d278dd 100644
--- a/meta/classes/multilib_global.bbclass
+++ b/meta/classes/multilib_global.bbclass
@@ -1,6 +1,13 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
1def preferred_ml_updates(d): 7def preferred_ml_updates(d):
2 # If any PREFERRED_PROVIDER or PREFERRED_VERSION are set, 8 # If any of PREFERRED_PROVIDER, PREFERRED_RPROVIDER, REQUIRED_VERSION
3 # we need to mirror these variables in the multilib case; 9 # or PREFERRED_VERSION are set, we need to mirror these variables in
10 # the multilib case;
4 multilibs = d.getVar('MULTILIBS') or "" 11 multilibs = d.getVar('MULTILIBS') or ""
5 if not multilibs: 12 if not multilibs:
6 return 13 return
@@ -11,43 +18,54 @@ def preferred_ml_updates(d):
11 if len(eext) > 1 and eext[0] == 'multilib': 18 if len(eext) > 1 and eext[0] == 'multilib':
12 prefixes.append(eext[1]) 19 prefixes.append(eext[1])
13 20
14 versions = [] 21 required_versions = []
22 preferred_versions = []
15 providers = [] 23 providers = []
16 rproviders = [] 24 rproviders = []
17 for v in d.keys(): 25 for v in d.keys():
26 if v.startswith("REQUIRED_VERSION_"):
27 required_versions.append(v)
18 if v.startswith("PREFERRED_VERSION_"): 28 if v.startswith("PREFERRED_VERSION_"):
19 versions.append(v) 29 preferred_versions.append(v)
20 if v.startswith("PREFERRED_PROVIDER_"): 30 if v.startswith("PREFERRED_PROVIDER_"):
21 providers.append(v) 31 providers.append(v)
22 if v.startswith("PREFERRED_RPROVIDER_"): 32 if v.startswith("PREFERRED_RPROVIDER_"):
23 rproviders.append(v) 33 rproviders.append(v)
24 34
25 for v in versions: 35 def sort_versions(versions, keyword):
26 val = d.getVar(v, False) 36 version_str = "_".join([keyword, "VERSION", ""])
27 pkg = v.replace("PREFERRED_VERSION_", "") 37 for v in versions:
28 if pkg.endswith("-native") or "-crosssdk-" in pkg or pkg.startswith(("nativesdk-", "virtual/nativesdk-")): 38 val = d.getVar(v, False)
29 continue 39 pkg = v.replace(version_str, "")
30 if '-cross-' in pkg and '${' in pkg: 40 if pkg.endswith("-native") or "-crosssdk-" in pkg or pkg.startswith(("nativesdk-", "virtual/nativesdk-")):
41 continue
42 if '-cross-' in pkg and '${' in pkg:
43 for p in prefixes:
44 localdata = bb.data.createCopy(d)
45 override = ":virtclass-multilib-" + p
46 localdata.setVar("OVERRIDES", localdata.getVar("OVERRIDES", False) + override)
47 if "-canadian-" in pkg:
48 newtune = localdata.getVar("DEFAULTTUNE:" + "virtclass-multilib-" + p, False)
49 if newtune:
50 localdata.setVar("DEFAULTTUNE", newtune)
51 newname = localdata.expand(v)
52 else:
53 newname = localdata.expand(v).replace(version_str, version_str + p + '-')
54 if newname != v:
55 newval = localdata.expand(val)
56 d.setVar(newname, newval)
57 # Avoid future variable key expansion
58 vexp = d.expand(v)
59 if v != vexp and d.getVar(v, False):
60 d.renameVar(v, vexp)
61 continue
31 for p in prefixes: 62 for p in prefixes:
32 localdata = bb.data.createCopy(d) 63 newname = version_str + p + "-" + pkg
33 override = ":virtclass-multilib-" + p 64 if not d.getVar(newname, False):
34 localdata.setVar("OVERRIDES", localdata.getVar("OVERRIDES", False) + override) 65 d.setVar(newname, val)
35 if "-canadian-" in pkg: 66
36 newname = localdata.expand(v) 67 sort_versions(required_versions, "REQUIRED")
37 else: 68 sort_versions(preferred_versions, "PREFERRED")
38 newname = localdata.expand(v).replace("PREFERRED_VERSION_", "PREFERRED_VERSION_" + p + '-')
39 if newname != v:
40 newval = localdata.expand(val)
41 d.setVar(newname, newval)
42 # Avoid future variable key expansion
43 vexp = d.expand(v)
44 if v != vexp and d.getVar(v, False):
45 d.renameVar(v, vexp)
46 continue
47 for p in prefixes:
48 newname = "PREFERRED_VERSION_" + p + "-" + pkg
49 if not d.getVar(newname, False):
50 d.setVar(newname, val)
51 69
52 for prov in providers: 70 for prov in providers:
53 val = d.getVar(prov, False) 71 val = d.getVar(prov, False)
@@ -128,14 +146,14 @@ def preferred_ml_updates(d):
128 prov = prov.replace("virtual/", "") 146 prov = prov.replace("virtual/", "")
129 return "virtual/" + prefix + "-" + prov 147 return "virtual/" + prefix + "-" + prov
130 148
131 mp = (d.getVar("MULTI_PROVIDER_WHITELIST") or "").split() 149 mp = (d.getVar("BB_MULTI_PROVIDER_ALLOWED") or "").split()
132 extramp = [] 150 extramp = []
133 for p in mp: 151 for p in mp:
134 if p.endswith("-native") or "-crosssdk-" in p or p.startswith(("nativesdk-", "virtual/nativesdk-")) or 'cross-canadian' in p: 152 if p.endswith("-native") or "-crosssdk-" in p or p.startswith(("nativesdk-", "virtual/nativesdk-")) or 'cross-canadian' in p:
135 continue 153 continue
136 for pref in prefixes: 154 for pref in prefixes:
137 extramp.append(translate_provide(pref, p)) 155 extramp.append(translate_provide(pref, p))
138 d.setVar("MULTI_PROVIDER_WHITELIST", " ".join(mp + extramp)) 156 d.setVar("BB_MULTI_PROVIDER_ALLOWED", " ".join(mp + extramp))
139 157
140 abisafe = (d.getVar("SIGGEN_EXCLUDERECIPES_ABISAFE") or "").split() 158 abisafe = (d.getVar("SIGGEN_EXCLUDERECIPES_ABISAFE") or "").split()
141 extras = [] 159 extras = []
@@ -155,8 +173,8 @@ def preferred_ml_updates(d):
155python multilib_virtclass_handler_vendor () { 173python multilib_virtclass_handler_vendor () {
156 if isinstance(e, bb.event.ConfigParsed): 174 if isinstance(e, bb.event.ConfigParsed):
157 for v in e.data.getVar("MULTILIB_VARIANTS").split(): 175 for v in e.data.getVar("MULTILIB_VARIANTS").split():
158 if e.data.getVar("TARGET_VENDOR_virtclass-multilib-" + v, False) is None: 176 if e.data.getVar("TARGET_VENDOR:virtclass-multilib-" + v, False) is None:
159 e.data.setVar("TARGET_VENDOR_virtclass-multilib-" + v, e.data.getVar("TARGET_VENDOR", False) + "ml" + v) 177 e.data.setVar("TARGET_VENDOR:virtclass-multilib-" + v, e.data.getVar("TARGET_VENDOR", False) + "ml" + v)
160 preferred_ml_updates(e.data) 178 preferred_ml_updates(e.data)
161} 179}
162addhandler multilib_virtclass_handler_vendor 180addhandler multilib_virtclass_handler_vendor
@@ -177,6 +195,7 @@ python multilib_virtclass_handler_global () {
177 # from a copy of the datastore 195 # from a copy of the datastore
178 localdata = bb.data.createCopy(d) 196 localdata = bb.data.createCopy(d)
179 localdata.delVar("KERNEL_VERSION") 197 localdata.delVar("KERNEL_VERSION")
198 localdata.delVar("KERNEL_VERSION_PKG_NAME")
180 199
181 variants = (e.data.getVar("MULTILIB_VARIANTS") or "").split() 200 variants = (e.data.getVar("MULTILIB_VARIANTS") or "").split()
182 201
@@ -198,13 +217,13 @@ python multilib_virtclass_handler_global () {
198 if rprovs.strip(): 217 if rprovs.strip():
199 e.data.setVar("RPROVIDES", rprovs) 218 e.data.setVar("RPROVIDES", rprovs)
200 219
201 # Process RPROVIDES_${PN}... 220 # Process RPROVIDES:${PN}...
202 for pkg in (e.data.getVar("PACKAGES") or "").split(): 221 for pkg in (e.data.getVar("PACKAGES") or "").split():
203 origrprovs = rprovs = localdata.getVar("RPROVIDES_%s" % pkg) or "" 222 origrprovs = rprovs = localdata.getVar("RPROVIDES:%s" % pkg) or ""
204 for clsextend in clsextends: 223 for clsextend in clsextends:
205 rprovs = rprovs + " " + clsextend.map_variable("RPROVIDES_%s" % pkg, setvar=False) 224 rprovs = rprovs + " " + clsextend.map_variable("RPROVIDES:%s" % pkg, setvar=False)
206 rprovs = rprovs + " " + clsextend.extname + "-" + pkg 225 rprovs = rprovs + " " + clsextend.extname + "-" + pkg
207 e.data.setVar("RPROVIDES_%s" % pkg, rprovs) 226 e.data.setVar("RPROVIDES:%s" % pkg, rprovs)
208} 227}
209 228
210addhandler multilib_virtclass_handler_global 229addhandler multilib_virtclass_handler_global
diff --git a/meta/classes/multilib_header.bbclass b/meta/classes/multilib_header.bbclass
deleted file mode 100644
index e03f5b13b2..0000000000
--- a/meta/classes/multilib_header.bbclass
+++ /dev/null
@@ -1,52 +0,0 @@
1inherit siteinfo
2
3# If applicable on the architecture, this routine will rename the header and
4# add a unique identifier to the name for the ABI/bitsize that is being used.
5# A wrapper will be generated for the architecture that knows how to call
6# all of the ABI variants for that given architecture.
7#
8oe_multilib_header() {
9
10 case ${HOST_OS} in
11 *-musl*)
12 return
13 ;;
14 *)
15 esac
16 # For MIPS: "n32" is a special case, which needs to be
17 # distinct from both 64-bit and 32-bit.
18 case ${TARGET_ARCH} in
19 mips*) case "${MIPSPKGSFX_ABI}" in
20 "-n32")
21 ident=n32
22 ;;
23 *)
24 ident=${SITEINFO_BITS}
25 ;;
26 esac
27 ;;
28 *) ident=${SITEINFO_BITS}
29 esac
30 for each_header in "$@" ; do
31 if [ ! -f "${D}/${includedir}/$each_header" ]; then
32 bberror "oe_multilib_header: Unable to find header $each_header."
33 continue
34 fi
35 stem=$(echo $each_header | sed 's#\.h$##')
36 # if mips64/n32 set ident to n32
37 mv ${D}/${includedir}/$each_header ${D}/${includedir}/${stem}-${ident}.h
38
39 sed -e "s#ENTER_HEADER_FILENAME_HERE#${stem}#g" ${COREBASE}/scripts/multilib_header_wrapper.h > ${D}/${includedir}/$each_header
40 done
41}
42
43# Dependencies on arch variables like MIPSPKGSFX_ABI can be problematic.
44# We don't need multilib headers for native builds so brute force things.
45oe_multilib_header_class-native () {
46 return
47}
48
49# Nor do we need multilib headers for nativesdk builds.
50oe_multilib_header_class-nativesdk () {
51 return
52}
diff --git a/meta/classes/multilib_script.bbclass b/meta/classes/multilib_script.bbclass
deleted file mode 100644
index b11efc1ec5..0000000000
--- a/meta/classes/multilib_script.bbclass
+++ /dev/null
@@ -1,34 +0,0 @@
1#
2# Recipe needs to set MULTILIB_SCRIPTS in the form <pkgname>:<scriptname>, e.g.
3# MULTILIB_SCRIPTS = "${PN}-dev:${bindir}/file1 ${PN}:${base_bindir}/file2"
4# to indicate which script files to process from which packages.
5#
6
7inherit update-alternatives
8
9MULTILIB_SUFFIX = "${@d.getVar('base_libdir',1).split('/')[-1]}"
10
11PACKAGE_PREPROCESS_FUNCS += "multilibscript_rename"
12
13multilibscript_rename() {
14 :
15}
16
17python () {
18 # Do nothing if multilib isn't being used
19 if not d.getVar("MULTILIB_VARIANTS"):
20 return
21 # Do nothing for native/cross
22 if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross', d):
23 return
24
25 for entry in (d.getVar("MULTILIB_SCRIPTS", False) or "").split():
26 pkg, script = entry.split(":")
27 epkg = d.expand(pkg)
28 scriptname = os.path.basename(script)
29 d.appendVar("ALTERNATIVE_" + epkg, " " + scriptname + " ")
30 d.setVarFlag("ALTERNATIVE_LINK_NAME", scriptname, script)
31 d.setVarFlag("ALTERNATIVE_TARGET", scriptname, script + "-${MULTILIB_SUFFIX}")
32 d.appendVar("multilibscript_rename", "\n mv ${PKGD}" + script + " ${PKGD}" + script + "-${MULTILIB_SUFFIX}")
33 d.appendVar("FILES_" + epkg, " " + script + "-${MULTILIB_SUFFIX}")
34}
diff --git a/meta/classes/native.bbclass b/meta/classes/native.bbclass
deleted file mode 100644
index a0838e41b9..0000000000
--- a/meta/classes/native.bbclass
+++ /dev/null
@@ -1,193 +0,0 @@
1# We want native packages to be relocatable
2inherit relocatable
3
4# Native packages are built indirectly via dependency,
5# no need for them to be a direct target of 'world'
6EXCLUDE_FROM_WORLD = "1"
7
8PACKAGE_ARCH = "${BUILD_ARCH}"
9
10# used by cmake class
11OECMAKE_RPATH = "${libdir}"
12OECMAKE_RPATH_class-native = "${libdir}"
13
14TARGET_ARCH = "${BUILD_ARCH}"
15TARGET_OS = "${BUILD_OS}"
16TARGET_VENDOR = "${BUILD_VENDOR}"
17TARGET_PREFIX = "${BUILD_PREFIX}"
18TARGET_CC_ARCH = "${BUILD_CC_ARCH}"
19TARGET_LD_ARCH = "${BUILD_LD_ARCH}"
20TARGET_AS_ARCH = "${BUILD_AS_ARCH}"
21TARGET_CPPFLAGS = "${BUILD_CPPFLAGS}"
22TARGET_CFLAGS = "${BUILD_CFLAGS}"
23TARGET_CXXFLAGS = "${BUILD_CXXFLAGS}"
24TARGET_LDFLAGS = "${BUILD_LDFLAGS}"
25TARGET_FPU = ""
26
27HOST_ARCH = "${BUILD_ARCH}"
28HOST_OS = "${BUILD_OS}"
29HOST_VENDOR = "${BUILD_VENDOR}"
30HOST_PREFIX = "${BUILD_PREFIX}"
31HOST_CC_ARCH = "${BUILD_CC_ARCH}"
32HOST_LD_ARCH = "${BUILD_LD_ARCH}"
33HOST_AS_ARCH = "${BUILD_AS_ARCH}"
34
35CPPFLAGS = "${BUILD_CPPFLAGS}"
36CFLAGS = "${BUILD_CFLAGS}"
37CXXFLAGS = "${BUILD_CXXFLAGS}"
38LDFLAGS = "${BUILD_LDFLAGS}"
39
40STAGING_BINDIR = "${STAGING_BINDIR_NATIVE}"
41STAGING_BINDIR_CROSS = "${STAGING_BINDIR_NATIVE}"
42
43# native pkg doesn't need the TOOLCHAIN_OPTIONS.
44TOOLCHAIN_OPTIONS = ""
45
46# Don't build ptest natively
47PTEST_ENABLED = "0"
48
49# Don't use site files for native builds
50export CONFIG_SITE = "${COREBASE}/meta/site/native"
51
52# set the compiler as well. It could have been set to something else
53export CC = "${BUILD_CC}"
54export CXX = "${BUILD_CXX}"
55export FC = "${BUILD_FC}"
56export CPP = "${BUILD_CPP}"
57export LD = "${BUILD_LD}"
58export CCLD = "${BUILD_CCLD}"
59export AR = "${BUILD_AR}"
60export AS = "${BUILD_AS}"
61export RANLIB = "${BUILD_RANLIB}"
62export STRIP = "${BUILD_STRIP}"
63export NM = "${BUILD_NM}"
64
65# Path prefixes
66base_prefix = "${STAGING_DIR_NATIVE}"
67prefix = "${STAGING_DIR_NATIVE}${prefix_native}"
68exec_prefix = "${STAGING_DIR_NATIVE}${prefix_native}"
69
70bindir = "${STAGING_BINDIR_NATIVE}"
71sbindir = "${STAGING_SBINDIR_NATIVE}"
72base_libdir = "${STAGING_LIBDIR_NATIVE}"
73libdir = "${STAGING_LIBDIR_NATIVE}"
74includedir = "${STAGING_INCDIR_NATIVE}"
75sysconfdir = "${STAGING_ETCDIR_NATIVE}"
76datadir = "${STAGING_DATADIR_NATIVE}"
77
78baselib = "lib"
79
80export lt_cv_sys_lib_dlsearch_path_spec = "${libdir} ${base_libdir} /lib /lib64 /usr/lib /usr/lib64"
81
82NATIVE_PACKAGE_PATH_SUFFIX ?= ""
83bindir .= "${NATIVE_PACKAGE_PATH_SUFFIX}"
84sbindir .= "${NATIVE_PACKAGE_PATH_SUFFIX}"
85base_libdir .= "${NATIVE_PACKAGE_PATH_SUFFIX}"
86libdir .= "${NATIVE_PACKAGE_PATH_SUFFIX}"
87libexecdir .= "${NATIVE_PACKAGE_PATH_SUFFIX}"
88
89do_populate_sysroot[sstate-inputdirs] = "${SYSROOT_DESTDIR}/${STAGING_DIR_NATIVE}/"
90do_populate_sysroot[sstate-outputdirs] = "${COMPONENTS_DIR}/${PACKAGE_ARCH}/${PN}"
91
92# Since we actually install these into situ there is no staging prefix
93STAGING_DIR_HOST = ""
94STAGING_DIR_TARGET = ""
95PKG_CONFIG_DIR = "${libdir}/pkgconfig"
96
97EXTRA_NATIVE_PKGCONFIG_PATH ?= ""
98PKG_CONFIG_PATH .= "${EXTRA_NATIVE_PKGCONFIG_PATH}"
99PKG_CONFIG_SYSROOT_DIR = ""
100PKG_CONFIG_SYSTEM_LIBRARY_PATH[unexport] = "1"
101PKG_CONFIG_SYSTEM_INCLUDE_PATH[unexport] = "1"
102
103# we dont want libc-*libc to kick in for native recipes
104LIBCOVERRIDE = ""
105CLASSOVERRIDE = "class-native"
106MACHINEOVERRIDES = ""
107MACHINE_FEATURES = ""
108
109PATH_prepend = "${COREBASE}/scripts/native-intercept:"
110
111# This class encodes staging paths into its scripts data so can only be
112# reused if we manipulate the paths.
113SSTATE_SCAN_CMD ?= "${SSTATE_SCAN_CMD_NATIVE}"
114
115# No strip sysroot when DEBUG_BUILD is enabled
116INHIBIT_SYSROOT_STRIP ?= "${@oe.utils.vartrue('DEBUG_BUILD', '1', '', d)}"
117
118python native_virtclass_handler () {
119 pn = e.data.getVar("PN")
120 if not pn.endswith("-native"):
121 return
122
123 # Set features here to prevent appends and distro features backfill
124 # from modifying native distro features
125 features = set(d.getVar("DISTRO_FEATURES_NATIVE").split())
126 filtered = set(bb.utils.filter("DISTRO_FEATURES", d.getVar("DISTRO_FEATURES_FILTER_NATIVE"), d).split())
127 d.setVar("DISTRO_FEATURES", " ".join(sorted(features | filtered)))
128
129 classextend = e.data.getVar('BBCLASSEXTEND') or ""
130 if "native" not in classextend:
131 return
132
133 def map_dependencies(varname, d, suffix = "", selfref=True):
134 if suffix:
135 varname = varname + "_" + suffix
136 deps = d.getVar(varname)
137 if not deps:
138 return
139 deps = bb.utils.explode_deps(deps)
140 newdeps = []
141 for dep in deps:
142 if dep == pn:
143 if not selfref:
144 continue
145 newdeps.append(dep)
146 elif "-cross-" in dep:
147 newdeps.append(dep.replace("-cross", "-native"))
148 elif not dep.endswith("-native"):
149 newdeps.append(dep.replace("-native", "") + "-native")
150 else:
151 newdeps.append(dep)
152 d.setVar(varname, " ".join(newdeps), parsing=True)
153
154 map_dependencies("DEPENDS", e.data, selfref=False)
155 for pkg in e.data.getVar("PACKAGES", False).split():
156 map_dependencies("RDEPENDS", e.data, pkg)
157 map_dependencies("RRECOMMENDS", e.data, pkg)
158 map_dependencies("RSUGGESTS", e.data, pkg)
159 map_dependencies("RPROVIDES", e.data, pkg)
160 map_dependencies("RREPLACES", e.data, pkg)
161 map_dependencies("PACKAGES", e.data)
162
163 provides = e.data.getVar("PROVIDES")
164 nprovides = []
165 for prov in provides.split():
166 if prov.find(pn) != -1:
167 nprovides.append(prov)
168 elif not prov.endswith("-native"):
169 nprovides.append(prov.replace(prov, prov + "-native"))
170 else:
171 nprovides.append(prov)
172 e.data.setVar("PROVIDES", ' '.join(nprovides))
173
174
175}
176
177addhandler native_virtclass_handler
178native_virtclass_handler[eventmask] = "bb.event.RecipePreFinalise"
179
180python do_addto_recipe_sysroot () {
181 bb.build.exec_func("extend_recipe_sysroot", d)
182}
183addtask addto_recipe_sysroot after do_populate_sysroot
184do_addto_recipe_sysroot[deptask] = "do_populate_sysroot"
185
186inherit nopackages
187
188do_packagedata[stamp-extra-info] = ""
189
190USE_NLS = "no"
191
192RECIPERDEPTASK = "do_populate_sysroot"
193do_populate_sysroot[rdeptask] = "${RECIPERDEPTASK}"
diff --git a/meta/classes/nativesdk.bbclass b/meta/classes/nativesdk.bbclass
deleted file mode 100644
index 7f2692c51a..0000000000
--- a/meta/classes/nativesdk.bbclass
+++ /dev/null
@@ -1,115 +0,0 @@
1# SDK packages are built either explicitly by the user,
2# or indirectly via dependency. No need to be in 'world'.
3EXCLUDE_FROM_WORLD = "1"
4
5STAGING_BINDIR_TOOLCHAIN = "${STAGING_DIR_NATIVE}${bindir_native}/${SDK_ARCH}${SDK_VENDOR}-${SDK_OS}"
6
7# libc for the SDK can be different to that of the target
8NATIVESDKLIBC ?= "libc-glibc"
9LIBCOVERRIDE = ":${NATIVESDKLIBC}"
10CLASSOVERRIDE = "class-nativesdk"
11MACHINEOVERRIDES = ""
12MACHINE_FEATURES = ""
13
14MULTILIBS = ""
15
16# we need consistent staging dir whether or not multilib is enabled
17STAGING_DIR_HOST = "${WORKDIR}/recipe-sysroot"
18STAGING_DIR_TARGET = "${WORKDIR}/recipe-sysroot"
19RECIPE_SYSROOT = "${WORKDIR}/recipe-sysroot"
20
21#
22# Update PACKAGE_ARCH and PACKAGE_ARCHS
23#
24PACKAGE_ARCH = "${SDK_ARCH}-${SDKPKGSUFFIX}"
25PACKAGE_ARCHS = "${SDK_PACKAGE_ARCHS}"
26
27#
28# We need chrpath >= 0.14 to ensure we can deal with 32 and 64 bit
29# binaries
30#
31DEPENDS_append = " chrpath-replacement-native"
32EXTRANATIVEPATH += "chrpath-native"
33
34PKGDATA_DIR = "${TMPDIR}/pkgdata/${SDK_SYS}"
35
36HOST_ARCH = "${SDK_ARCH}"
37HOST_VENDOR = "${SDK_VENDOR}"
38HOST_OS = "${SDK_OS}"
39HOST_PREFIX = "${SDK_PREFIX}"
40HOST_CC_ARCH = "${SDK_CC_ARCH}"
41HOST_LD_ARCH = "${SDK_LD_ARCH}"
42HOST_AS_ARCH = "${SDK_AS_ARCH}"
43#HOST_SYS = "${HOST_ARCH}${TARGET_VENDOR}-${HOST_OS}"
44
45TARGET_ARCH = "${SDK_ARCH}"
46TARGET_VENDOR = "${SDK_VENDOR}"
47TARGET_OS = "${SDK_OS}"
48TARGET_PREFIX = "${SDK_PREFIX}"
49TARGET_CC_ARCH = "${SDK_CC_ARCH}"
50TARGET_LD_ARCH = "${SDK_LD_ARCH}"
51TARGET_AS_ARCH = "${SDK_AS_ARCH}"
52TARGET_CPPFLAGS = "${BUILDSDK_CPPFLAGS}"
53TARGET_CFLAGS = "${BUILDSDK_CFLAGS}"
54TARGET_CXXFLAGS = "${BUILDSDK_CXXFLAGS}"
55TARGET_LDFLAGS = "${BUILDSDK_LDFLAGS}"
56TARGET_FPU = ""
57EXTRA_OECONF_GCC_FLOAT = ""
58
59CPPFLAGS = "${BUILDSDK_CPPFLAGS}"
60CFLAGS = "${BUILDSDK_CFLAGS}"
61CXXFLAGS = "${BUILDSDK_CXXFLAGS}"
62LDFLAGS = "${BUILDSDK_LDFLAGS}"
63
64# Change to place files in SDKPATH
65base_prefix = "${SDKPATHNATIVE}"
66prefix = "${SDKPATHNATIVE}${prefix_nativesdk}"
67exec_prefix = "${SDKPATHNATIVE}${prefix_nativesdk}"
68baselib = "lib"
69sbindir = "${bindir}"
70
71export PKG_CONFIG_DIR = "${STAGING_DIR_HOST}${libdir}/pkgconfig"
72export PKG_CONFIG_SYSROOT_DIR = "${STAGING_DIR_HOST}"
73
74python nativesdk_virtclass_handler () {
75 pn = e.data.getVar("PN")
76 if not (pn.endswith("-nativesdk") or pn.startswith("nativesdk-")):
77 return
78
79 # Set features here to prevent appends and distro features backfill
80 # from modifying nativesdk distro features
81 features = set(d.getVar("DISTRO_FEATURES_NATIVESDK").split())
82 filtered = set(bb.utils.filter("DISTRO_FEATURES", d.getVar("DISTRO_FEATURES_FILTER_NATIVESDK"), d).split())
83 d.setVar("DISTRO_FEATURES", " ".join(sorted(features | filtered)))
84
85 e.data.setVar("MLPREFIX", "nativesdk-")
86 e.data.setVar("PN", "nativesdk-" + e.data.getVar("PN").replace("-nativesdk", "").replace("nativesdk-", ""))
87}
88
89python () {
90 pn = d.getVar("PN")
91 if not pn.startswith("nativesdk-"):
92 return
93
94 import oe.classextend
95
96 clsextend = oe.classextend.NativesdkClassExtender("nativesdk", d)
97 clsextend.rename_packages()
98 clsextend.rename_package_variables((d.getVar("PACKAGEVARS") or "").split())
99
100 clsextend.map_depends_variable("DEPENDS")
101 clsextend.map_packagevars()
102 clsextend.map_variable("PROVIDES")
103 clsextend.map_regexp_variable("PACKAGES_DYNAMIC")
104 d.setVar("LIBCEXTENSION", "")
105 d.setVar("ABIEXTENSION", "")
106}
107
108addhandler nativesdk_virtclass_handler
109nativesdk_virtclass_handler[eventmask] = "bb.event.RecipePreFinalise"
110
111do_packagedata[stamp-extra-info] = ""
112
113USE_NLS = "${SDKUSE_NLS}"
114
115OLDEST_KERNEL = "${SDK_OLDEST_KERNEL}"
diff --git a/meta/classes/nopackages.bbclass b/meta/classes/nopackages.bbclass
deleted file mode 100644
index 7a4f632d71..0000000000
--- a/meta/classes/nopackages.bbclass
+++ /dev/null
@@ -1,13 +0,0 @@
1deltask do_package
2deltask do_package_write_rpm
3deltask do_package_write_ipk
4deltask do_package_write_deb
5deltask do_package_write_tar
6deltask do_package_qa
7deltask do_packagedata
8deltask do_package_setscene
9deltask do_package_write_rpm_setscene
10deltask do_package_write_ipk_setscene
11deltask do_package_write_deb_setscene
12deltask do_package_qa_setscene
13deltask do_packagedata_setscene
diff --git a/meta/classes/npm.bbclass b/meta/classes/npm.bbclass
deleted file mode 100644
index 55a6985fb0..0000000000
--- a/meta/classes/npm.bbclass
+++ /dev/null
@@ -1,318 +0,0 @@
1# Copyright (C) 2020 Savoir-Faire Linux
2#
3# SPDX-License-Identifier: GPL-2.0-only
4#
5# This bbclass builds and installs an npm package to the target. The package
6# sources files should be fetched in the calling recipe by using the SRC_URI
7# variable. The ${S} variable should be updated depending of your fetcher.
8#
9# Usage:
10# SRC_URI = "..."
11# inherit npm
12#
13# Optional variables:
14# NPM_ARCH:
15# Override the auto generated npm architecture.
16#
17# NPM_INSTALL_DEV:
18# Set to 1 to also install devDependencies.
19
20inherit python3native
21
22DEPENDS_prepend = "nodejs-native "
23RDEPENDS_${PN}_append_class-target = " nodejs"
24
25NPM_INSTALL_DEV ?= "0"
26
27def npm_target_arch_map(target_arch):
28 """Maps arch names to npm arch names"""
29 import re
30 if re.match("p(pc|owerpc)(|64)", target_arch):
31 return "ppc"
32 elif re.match("i.86$", target_arch):
33 return "ia32"
34 elif re.match("x86_64$", target_arch):
35 return "x64"
36 elif re.match("arm64$", target_arch):
37 return "arm"
38 return target_arch
39
40NPM_ARCH ?= "${@npm_target_arch_map(d.getVar("TARGET_ARCH"))}"
41
42NPM_PACKAGE = "${WORKDIR}/npm-package"
43NPM_CACHE = "${WORKDIR}/npm-cache"
44NPM_BUILD = "${WORKDIR}/npm-build"
45
46def npm_global_configs(d):
47 """Get the npm global configuration"""
48 configs = []
49 # Ensure no network access is done
50 configs.append(("offline", "true"))
51 configs.append(("proxy", "http://invalid"))
52 # Configure the cache directory
53 configs.append(("cache", d.getVar("NPM_CACHE")))
54 return configs
55
56def npm_pack(env, srcdir, workdir):
57 """Run 'npm pack' on a specified directory"""
58 import shlex
59 cmd = "npm pack %s" % shlex.quote(srcdir)
60 configs = [("ignore-scripts", "true")]
61 tarball = env.run(cmd, configs=configs, workdir=workdir).strip("\n")
62 return os.path.join(workdir, tarball)
63
64python npm_do_configure() {
65 """
66 Step one: configure the npm cache and the main npm package
67
68 Every dependencies have been fetched and patched in the source directory.
69 They have to be packed (this remove unneeded files) and added to the npm
70 cache to be available for the next step.
71
72 The main package and its associated manifest file and shrinkwrap file have
73 to be configured to take into account these cached dependencies.
74 """
75 import base64
76 import copy
77 import json
78 import re
79 import shlex
80 import tempfile
81 from bb.fetch2.npm import NpmEnvironment
82 from bb.fetch2.npm import npm_unpack
83 from bb.fetch2.npmsw import foreach_dependencies
84 from bb.progress import OutOfProgressHandler
85
86 bb.utils.remove(d.getVar("NPM_CACHE"), recurse=True)
87 bb.utils.remove(d.getVar("NPM_PACKAGE"), recurse=True)
88
89 env = NpmEnvironment(d, configs=npm_global_configs(d))
90
91 def _npm_cache_add(tarball):
92 """Run 'npm cache add' for a specified tarball"""
93 cmd = "npm cache add %s" % shlex.quote(tarball)
94 env.run(cmd)
95
96 def _npm_integrity(tarball):
97 """Return the npm integrity of a specified tarball"""
98 sha512 = bb.utils.sha512_file(tarball)
99 return "sha512-" + base64.b64encode(bytes.fromhex(sha512)).decode()
100
101 def _npm_version(tarball):
102 """Return the version of a specified tarball"""
103 regex = r"-(\d+\.\d+\.\d+(-.*)?(\+.*)?)\.tgz"
104 return re.search(regex, tarball).group(1)
105
106 def _npmsw_dependency_dict(orig, deptree):
107 """
108 Return the sub dictionary in the 'orig' dictionary corresponding to the
109 'deptree' dependency tree. This function follows the shrinkwrap file
110 format.
111 """
112 ptr = orig
113 for dep in deptree:
114 if "dependencies" not in ptr:
115 ptr["dependencies"] = {}
116 ptr = ptr["dependencies"]
117 if dep not in ptr:
118 ptr[dep] = {}
119 ptr = ptr[dep]
120 return ptr
121
122 # Manage the manifest file and shrinkwrap files
123 orig_manifest_file = d.expand("${S}/package.json")
124 orig_shrinkwrap_file = d.expand("${S}/npm-shrinkwrap.json")
125 cached_manifest_file = d.expand("${NPM_PACKAGE}/package.json")
126 cached_shrinkwrap_file = d.expand("${NPM_PACKAGE}/npm-shrinkwrap.json")
127
128 with open(orig_manifest_file, "r") as f:
129 orig_manifest = json.load(f)
130
131 cached_manifest = copy.deepcopy(orig_manifest)
132 cached_manifest.pop("dependencies", None)
133 cached_manifest.pop("devDependencies", None)
134
135 has_shrinkwrap_file = True
136
137 try:
138 with open(orig_shrinkwrap_file, "r") as f:
139 orig_shrinkwrap = json.load(f)
140 except IOError:
141 has_shrinkwrap_file = False
142
143 if has_shrinkwrap_file:
144 cached_shrinkwrap = copy.deepcopy(orig_shrinkwrap)
145 cached_shrinkwrap.pop("dependencies", None)
146
147 # Manage the dependencies
148 progress = OutOfProgressHandler(d, r"^(\d+)/(\d+)$")
149 progress_total = 1 # also count the main package
150 progress_done = 0
151
152 def _count_dependency(name, params, deptree):
153 nonlocal progress_total
154 progress_total += 1
155
156 def _cache_dependency(name, params, deptree):
157 destsubdirs = [os.path.join("node_modules", dep) for dep in deptree]
158 destsuffix = os.path.join(*destsubdirs)
159 with tempfile.TemporaryDirectory() as tmpdir:
160 # Add the dependency to the npm cache
161 destdir = os.path.join(d.getVar("S"), destsuffix)
162 tarball = npm_pack(env, destdir, tmpdir)
163 _npm_cache_add(tarball)
164 # Add its signature to the cached shrinkwrap
165 dep = _npmsw_dependency_dict(cached_shrinkwrap, deptree)
166 dep["version"] = _npm_version(tarball)
167 dep["integrity"] = _npm_integrity(tarball)
168 if params.get("dev", False):
169 dep["dev"] = True
170 # Display progress
171 nonlocal progress_done
172 progress_done += 1
173 progress.write("%d/%d" % (progress_done, progress_total))
174
175 dev = bb.utils.to_boolean(d.getVar("NPM_INSTALL_DEV"), False)
176
177 if has_shrinkwrap_file:
178 foreach_dependencies(orig_shrinkwrap, _count_dependency, dev)
179 foreach_dependencies(orig_shrinkwrap, _cache_dependency, dev)
180
181 # Configure the main package
182 with tempfile.TemporaryDirectory() as tmpdir:
183 tarball = npm_pack(env, d.getVar("S"), tmpdir)
184 npm_unpack(tarball, d.getVar("NPM_PACKAGE"), d)
185
186 # Configure the cached manifest file and cached shrinkwrap file
187 def _update_manifest(depkey):
188 for name in orig_manifest.get(depkey, {}):
189 version = cached_shrinkwrap["dependencies"][name]["version"]
190 if depkey not in cached_manifest:
191 cached_manifest[depkey] = {}
192 cached_manifest[depkey][name] = version
193
194 if has_shrinkwrap_file:
195 _update_manifest("dependencies")
196
197 if dev:
198 if has_shrinkwrap_file:
199 _update_manifest("devDependencies")
200
201 with open(cached_manifest_file, "w") as f:
202 json.dump(cached_manifest, f, indent=2)
203
204 if has_shrinkwrap_file:
205 with open(cached_shrinkwrap_file, "w") as f:
206 json.dump(cached_shrinkwrap, f, indent=2)
207}
208
209python npm_do_compile() {
210 """
211 Step two: install the npm package
212
213 Use the configured main package and the cached dependencies to run the
214 installation process. The installation is done in a directory which is
215 not the destination directory yet.
216
217 A combination of 'npm pack' and 'npm install' is used to ensure that the
218 installed files are actual copies instead of symbolic links (which is the
219 default npm behavior).
220 """
221 import shlex
222 import tempfile
223 from bb.fetch2.npm import NpmEnvironment
224
225 bb.utils.remove(d.getVar("NPM_BUILD"), recurse=True)
226
227 env = NpmEnvironment(d, configs=npm_global_configs(d))
228
229 dev = bb.utils.to_boolean(d.getVar("NPM_INSTALL_DEV"), False)
230
231 with tempfile.TemporaryDirectory() as tmpdir:
232 args = []
233 configs = []
234
235 if dev:
236 configs.append(("also", "development"))
237 else:
238 configs.append(("only", "production"))
239
240 # Report as many logs as possible for debugging purpose
241 configs.append(("loglevel", "silly"))
242
243 # Configure the installation to be done globally in the build directory
244 configs.append(("global", "true"))
245 configs.append(("prefix", d.getVar("NPM_BUILD")))
246
247 # Add node-gyp configuration
248 configs.append(("arch", d.getVar("NPM_ARCH")))
249 configs.append(("release", "true"))
250 sysroot = d.getVar("RECIPE_SYSROOT_NATIVE")
251 nodedir = os.path.join(sysroot, d.getVar("prefix_native").strip("/"))
252 configs.append(("nodedir", nodedir))
253 configs.append(("python", d.getVar("PYTHON")))
254
255 # Add node-pre-gyp configuration
256 args.append(("target_arch", d.getVar("NPM_ARCH")))
257 args.append(("build-from-source", "true"))
258
259 # Pack and install the main package
260 tarball = npm_pack(env, d.getVar("NPM_PACKAGE"), tmpdir)
261 env.run("npm install %s" % shlex.quote(tarball), args=args, configs=configs)
262}
263
264npm_do_install() {
265 # Step three: final install
266 #
267 # The previous installation have to be filtered to remove some extra files.
268
269 rm -rf ${D}
270
271 # Copy the entire lib and bin directories
272 install -d ${D}/${nonarch_libdir}
273 cp --no-preserve=ownership --recursive ${NPM_BUILD}/lib/. ${D}/${nonarch_libdir}
274
275 if [ -d "${NPM_BUILD}/bin" ]
276 then
277 install -d ${D}/${bindir}
278 cp --no-preserve=ownership --recursive ${NPM_BUILD}/bin/. ${D}/${bindir}
279 fi
280
281 # If the package (or its dependencies) uses node-gyp to build native addons,
282 # object files, static libraries or other temporary files can be hidden in
283 # the lib directory. To reduce the package size and to avoid QA issues
284 # (staticdev with static library files) these files must be removed.
285 local GYP_REGEX=".*/build/Release/[^/]*.node"
286
287 # Remove any node-gyp directory in ${D} to remove temporary build files
288 for GYP_D_FILE in $(find ${D} -regex "${GYP_REGEX}")
289 do
290 local GYP_D_DIR=${GYP_D_FILE%/Release/*}
291
292 rm --recursive --force ${GYP_D_DIR}
293 done
294
295 # Copy only the node-gyp release files
296 for GYP_B_FILE in $(find ${NPM_BUILD} -regex "${GYP_REGEX}")
297 do
298 local GYP_D_FILE=${D}/${prefix}/${GYP_B_FILE#${NPM_BUILD}}
299
300 install -d ${GYP_D_FILE%/*}
301 install -m 755 ${GYP_B_FILE} ${GYP_D_FILE}
302 done
303
304 # Remove the shrinkwrap file which does not need to be packed
305 rm -f ${D}/${nonarch_libdir}/node_modules/*/npm-shrinkwrap.json
306 rm -f ${D}/${nonarch_libdir}/node_modules/@*/*/npm-shrinkwrap.json
307
308 # node(1) is using /usr/lib/node as default include directory and npm(1) is
309 # using /usr/lib/node_modules as install directory. Let's make both happy.
310 ln -fs node_modules ${D}/${nonarch_libdir}/node
311}
312
313FILES_${PN} += " \
314 ${bindir} \
315 ${nonarch_libdir} \
316"
317
318EXPORT_FUNCTIONS do_configure do_compile do_install
diff --git a/meta/classes/oelint.bbclass b/meta/classes/oelint.bbclass
index 2589d34059..458a25ecc3 100644
--- a/meta/classes/oelint.bbclass
+++ b/meta/classes/oelint.bbclass
@@ -1,3 +1,9 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
1addtask lint before do_build 7addtask lint before do_build
2do_lint[nostamp] = "1" 8do_lint[nostamp] = "1"
3python do_lint() { 9python do_lint() {
diff --git a/meta/classes/own-mirrors.bbclass b/meta/classes/own-mirrors.bbclass
index a777835138..36c7f8e3f3 100644
--- a/meta/classes/own-mirrors.bbclass
+++ b/meta/classes/own-mirrors.bbclass
@@ -1,13 +1,22 @@
1PREMIRRORS_prepend = " \ 1#
2cvs://.*/.* ${SOURCE_MIRROR_URL} \n \ 2# Copyright OpenEmbedded Contributors
3svn://.*/.* ${SOURCE_MIRROR_URL} \n \ 3#
4git://.*/.* ${SOURCE_MIRROR_URL} \n \ 4# SPDX-License-Identifier: MIT
5gitsm://.*/.* ${SOURCE_MIRROR_URL} \n \ 5#
6hg://.*/.* ${SOURCE_MIRROR_URL} \n \ 6
7bzr://.*/.* ${SOURCE_MIRROR_URL} \n \ 7PREMIRRORS:prepend = " \
8p4://.*/.* ${SOURCE_MIRROR_URL} \n \ 8cvs://.*/.* ${SOURCE_MIRROR_URL} \
9osc://.*/.* ${SOURCE_MIRROR_URL} \n \ 9svn://.*/.* ${SOURCE_MIRROR_URL} \
10https?$://.*/.* ${SOURCE_MIRROR_URL} \n \ 10git://.*/.* ${SOURCE_MIRROR_URL} \
11ftp://.*/.* ${SOURCE_MIRROR_URL} \n \ 11gitsm://.*/.* ${SOURCE_MIRROR_URL} \
12npm://.*/?.* ${SOURCE_MIRROR_URL} \n \ 12hg://.*/.* ${SOURCE_MIRROR_URL} \
13bzr://.*/.* ${SOURCE_MIRROR_URL} \
14p4://.*/.* ${SOURCE_MIRROR_URL} \
15osc://.*/.* ${SOURCE_MIRROR_URL} \
16https?://.*/.* ${SOURCE_MIRROR_URL} \
17ftp://.*/.* ${SOURCE_MIRROR_URL} \
18npm://.*/?.* ${SOURCE_MIRROR_URL} \
19s3://.*/.* ${SOURCE_MIRROR_URL} \
20crate://.*/.* ${SOURCE_MIRROR_URL} \
21gs://.*/.* ${SOURCE_MIRROR_URL} \
13" 22"
diff --git a/meta/classes/package.bbclass b/meta/classes/package.bbclass
deleted file mode 100644
index e3f0a7060b..0000000000
--- a/meta/classes/package.bbclass
+++ /dev/null
@@ -1,2488 +0,0 @@
1#
2# Packaging process
3#
4# Executive summary: This class iterates over the functions listed in PACKAGEFUNCS
5# Taking D and splitting it up into the packages listed in PACKAGES, placing the
6# resulting output in PKGDEST.
7#
8# There are the following default steps but PACKAGEFUNCS can be extended:
9#
10# a) package_convert_pr_autoinc - convert AUTOINC in PKGV to ${PRSERV_PV_AUTOINC}
11#
12# b) perform_packagecopy - Copy D into PKGD
13#
14# c) package_do_split_locales - Split out the locale files, updates FILES and PACKAGES
15#
16# d) split_and_strip_files - split the files into runtime and debug and strip them.
17# Debug files include debug info split, and associated sources that end up in -dbg packages
18#
19# e) fixup_perms - Fix up permissions in the package before we split it.
20#
21# f) populate_packages - Split the files in PKGD into separate packages in PKGDEST/<pkgname>
22# Also triggers the binary stripping code to put files in -dbg packages.
23#
24# g) package_do_filedeps - Collect perfile run-time dependency metadata
25# The data is stores in FILER{PROVIDES,DEPENDS}_file_pkg variables with
26# a list of affected files in FILER{PROVIDES,DEPENDS}FLIST_pkg
27#
28# h) package_do_shlibs - Look at the shared libraries generated and autotmatically add any
29# dependencies found. Also stores the package name so anyone else using this library
30# knows which package to depend on.
31#
32# i) package_do_pkgconfig - Keep track of which packages need and provide which .pc files
33#
34# j) read_shlibdeps - Reads the stored shlibs information into the metadata
35#
36# k) package_depchains - Adds automatic dependencies to -dbg and -dev packages
37#
38# l) emit_pkgdata - saves the packaging data into PKGDATA_DIR for use in later
39# packaging steps
40
41inherit packagedata
42inherit chrpath
43inherit package_pkgdata
44
45# Need the package_qa_handle_error() in insane.bbclass
46inherit insane
47
48PKGD = "${WORKDIR}/package"
49PKGDEST = "${WORKDIR}/packages-split"
50
51LOCALE_SECTION ?= ''
52
53ALL_MULTILIB_PACKAGE_ARCHS = "${@all_multilib_tune_values(d, 'PACKAGE_ARCHS')}"
54
55# rpm is used for the per-file dependency identification
56# dwarfsrcfiles is used to determine the list of debug source files
57PACKAGE_DEPENDS += "rpm-native dwarfsrcfiles-native"
58
59
60# If your postinstall can execute at rootfs creation time rather than on
61# target but depends on a native/cross tool in order to execute, you need to
62# list that tool in PACKAGE_WRITE_DEPS. Target package dependencies belong
63# in the package dependencies as normal, this is just for native/cross support
64# tools at rootfs build time.
65PACKAGE_WRITE_DEPS ??= ""
66
67def legitimize_package_name(s):
68 """
69 Make sure package names are legitimate strings
70 """
71 import re
72
73 def fixutf(m):
74 cp = m.group(1)
75 if cp:
76 return ('\\u%s' % cp).encode('latin-1').decode('unicode_escape')
77
78 # Handle unicode codepoints encoded as <U0123>, as in glibc locale files.
79 s = re.sub(r'<U([0-9A-Fa-f]{1,4})>', fixutf, s)
80
81 # Remaining package name validity fixes
82 return s.lower().replace('_', '-').replace('@', '+').replace(',', '+').replace('/', '-')
83
84def do_split_packages(d, root, file_regex, output_pattern, description, postinst=None, recursive=False, hook=None, extra_depends=None, aux_files_pattern=None, postrm=None, allow_dirs=False, prepend=False, match_path=False, aux_files_pattern_verbatim=None, allow_links=False, summary=None):
85 """
86 Used in .bb files to split up dynamically generated subpackages of a
87 given package, usually plugins or modules.
88
89 Arguments:
90 root -- the path in which to search
91 file_regex -- regular expression to match searched files. Use
92 parentheses () to mark the part of this expression
93 that should be used to derive the module name (to be
94 substituted where %s is used in other function
95 arguments as noted below)
96 output_pattern -- pattern to use for the package names. Must include %s.
97 description -- description to set for each package. Must include %s.
98 postinst -- postinstall script to use for all packages (as a
99 string)
100 recursive -- True to perform a recursive search - default False
101 hook -- a hook function to be called for every match. The
102 function will be called with the following arguments
103 (in the order listed):
104 f: full path to the file/directory match
105 pkg: the package name
106 file_regex: as above
107 output_pattern: as above
108 modulename: the module name derived using file_regex
109 extra_depends -- extra runtime dependencies (RDEPENDS) to be set for
110 all packages. The default value of None causes a
111 dependency on the main package (${PN}) - if you do
112 not want this, pass '' for this parameter.
113 aux_files_pattern -- extra item(s) to be added to FILES for each
114 package. Can be a single string item or a list of
115 strings for multiple items. Must include %s.
116 postrm -- postrm script to use for all packages (as a string)
117 allow_dirs -- True allow directories to be matched - default False
118 prepend -- if True, prepend created packages to PACKAGES instead
119 of the default False which appends them
120 match_path -- match file_regex on the whole relative path to the
121 root rather than just the file name
122 aux_files_pattern_verbatim -- extra item(s) to be added to FILES for
123 each package, using the actual derived module name
124 rather than converting it to something legal for a
125 package name. Can be a single string item or a list
126 of strings for multiple items. Must include %s.
127 allow_links -- True to allow symlinks to be matched - default False
128 summary -- Summary to set for each package. Must include %s;
129 defaults to description if not set.
130
131 """
132
133 dvar = d.getVar('PKGD')
134 root = d.expand(root)
135 output_pattern = d.expand(output_pattern)
136 extra_depends = d.expand(extra_depends)
137
138 # If the root directory doesn't exist, don't error out later but silently do
139 # no splitting.
140 if not os.path.exists(dvar + root):
141 return []
142
143 ml = d.getVar("MLPREFIX")
144 if ml:
145 if not output_pattern.startswith(ml):
146 output_pattern = ml + output_pattern
147
148 newdeps = []
149 for dep in (extra_depends or "").split():
150 if dep.startswith(ml):
151 newdeps.append(dep)
152 else:
153 newdeps.append(ml + dep)
154 if newdeps:
155 extra_depends = " ".join(newdeps)
156
157
158 packages = d.getVar('PACKAGES').split()
159 split_packages = set()
160
161 if postinst:
162 postinst = '#!/bin/sh\n' + postinst + '\n'
163 if postrm:
164 postrm = '#!/bin/sh\n' + postrm + '\n'
165 if not recursive:
166 objs = os.listdir(dvar + root)
167 else:
168 objs = []
169 for walkroot, dirs, files in os.walk(dvar + root):
170 for file in files:
171 relpath = os.path.join(walkroot, file).replace(dvar + root + '/', '', 1)
172 if relpath:
173 objs.append(relpath)
174
175 if extra_depends == None:
176 extra_depends = d.getVar("PN")
177
178 if not summary:
179 summary = description
180
181 for o in sorted(objs):
182 import re, stat
183 if match_path:
184 m = re.match(file_regex, o)
185 else:
186 m = re.match(file_regex, os.path.basename(o))
187
188 if not m:
189 continue
190 f = os.path.join(dvar + root, o)
191 mode = os.lstat(f).st_mode
192 if not (stat.S_ISREG(mode) or (allow_links and stat.S_ISLNK(mode)) or (allow_dirs and stat.S_ISDIR(mode))):
193 continue
194 on = legitimize_package_name(m.group(1))
195 pkg = output_pattern % on
196 split_packages.add(pkg)
197 if not pkg in packages:
198 if prepend:
199 packages = [pkg] + packages
200 else:
201 packages.append(pkg)
202 oldfiles = d.getVar('FILES_' + pkg)
203 newfile = os.path.join(root, o)
204 # These names will be passed through glob() so if the filename actually
205 # contains * or ? (rare, but possible) we need to handle that specially
206 newfile = newfile.replace('*', '[*]')
207 newfile = newfile.replace('?', '[?]')
208 if not oldfiles:
209 the_files = [newfile]
210 if aux_files_pattern:
211 if type(aux_files_pattern) is list:
212 for fp in aux_files_pattern:
213 the_files.append(fp % on)
214 else:
215 the_files.append(aux_files_pattern % on)
216 if aux_files_pattern_verbatim:
217 if type(aux_files_pattern_verbatim) is list:
218 for fp in aux_files_pattern_verbatim:
219 the_files.append(fp % m.group(1))
220 else:
221 the_files.append(aux_files_pattern_verbatim % m.group(1))
222 d.setVar('FILES_' + pkg, " ".join(the_files))
223 else:
224 d.setVar('FILES_' + pkg, oldfiles + " " + newfile)
225 if extra_depends != '':
226 d.appendVar('RDEPENDS_' + pkg, ' ' + extra_depends)
227 if not d.getVar('DESCRIPTION_' + pkg):
228 d.setVar('DESCRIPTION_' + pkg, description % on)
229 if not d.getVar('SUMMARY_' + pkg):
230 d.setVar('SUMMARY_' + pkg, summary % on)
231 if postinst:
232 d.setVar('pkg_postinst_' + pkg, postinst)
233 if postrm:
234 d.setVar('pkg_postrm_' + pkg, postrm)
235 if callable(hook):
236 hook(f, pkg, file_regex, output_pattern, m.group(1))
237
238 d.setVar('PACKAGES', ' '.join(packages))
239 return list(split_packages)
240
241PACKAGE_DEPENDS += "file-native"
242
243python () {
244 if d.getVar('PACKAGES') != '':
245 deps = ""
246 for dep in (d.getVar('PACKAGE_DEPENDS') or "").split():
247 deps += " %s:do_populate_sysroot" % dep
248 if d.getVar('PACKAGE_MINIDEBUGINFO') == '1':
249 deps += ' xz-native:do_populate_sysroot'
250 d.appendVarFlag('do_package', 'depends', deps)
251
252 # shlibs requires any DEPENDS to have already packaged for the *.list files
253 d.appendVarFlag('do_package', 'deptask', " do_packagedata")
254}
255
256# Get a list of files from file vars by searching files under current working directory
257# The list contains symlinks, directories and normal files.
258def files_from_filevars(filevars):
259 import os,glob
260 cpath = oe.cachedpath.CachedPath()
261 files = []
262 for f in filevars:
263 if os.path.isabs(f):
264 f = '.' + f
265 if not f.startswith("./"):
266 f = './' + f
267 globbed = glob.glob(f)
268 if globbed:
269 if [ f ] != globbed:
270 files += globbed
271 continue
272 files.append(f)
273
274 symlink_paths = []
275 for ind, f in enumerate(files):
276 # Handle directory symlinks. Truncate path to the lowest level symlink
277 parent = ''
278 for dirname in f.split('/')[:-1]:
279 parent = os.path.join(parent, dirname)
280 if dirname == '.':
281 continue
282 if cpath.islink(parent):
283 bb.warn("FILES contains file '%s' which resides under a "
284 "directory symlink. Please fix the recipe and use the "
285 "real path for the file." % f[1:])
286 symlink_paths.append(f)
287 files[ind] = parent
288 f = parent
289 break
290
291 if not cpath.islink(f):
292 if cpath.isdir(f):
293 newfiles = [ os.path.join(f,x) for x in os.listdir(f) ]
294 if newfiles:
295 files += newfiles
296
297 return files, symlink_paths
298
299# Called in package_<rpm,ipk,deb>.bbclass to get the correct list of configuration files
300def get_conffiles(pkg, d):
301 pkgdest = d.getVar('PKGDEST')
302 root = os.path.join(pkgdest, pkg)
303 cwd = os.getcwd()
304 os.chdir(root)
305
306 conffiles = d.getVar('CONFFILES_%s' % pkg);
307 if conffiles == None:
308 conffiles = d.getVar('CONFFILES')
309 if conffiles == None:
310 conffiles = ""
311 conffiles = conffiles.split()
312 conf_orig_list = files_from_filevars(conffiles)[0]
313
314 # Remove links and directories from conf_orig_list to get conf_list which only contains normal files
315 conf_list = []
316 for f in conf_orig_list:
317 if os.path.isdir(f):
318 continue
319 if os.path.islink(f):
320 continue
321 if not os.path.exists(f):
322 continue
323 conf_list.append(f)
324
325 # Remove the leading './'
326 for i in range(0, len(conf_list)):
327 conf_list[i] = conf_list[i][1:]
328
329 os.chdir(cwd)
330 return conf_list
331
332def checkbuildpath(file, d):
333 tmpdir = d.getVar('TMPDIR')
334 with open(file) as f:
335 file_content = f.read()
336 if tmpdir in file_content:
337 return True
338
339 return False
340
341def parse_debugsources_from_dwarfsrcfiles_output(dwarfsrcfiles_output):
342 debugfiles = {}
343
344 for line in dwarfsrcfiles_output.splitlines():
345 if line.startswith("\t"):
346 debugfiles[os.path.normpath(line.split()[0])] = ""
347
348 return debugfiles.keys()
349
350def source_info(file, d, fatal=True):
351 import subprocess
352
353 cmd = ["dwarfsrcfiles", file]
354 try:
355 output = subprocess.check_output(cmd, universal_newlines=True, stderr=subprocess.STDOUT)
356 retval = 0
357 except subprocess.CalledProcessError as exc:
358 output = exc.output
359 retval = exc.returncode
360
361 # 255 means a specific file wasn't fully parsed to get the debug file list, which is not a fatal failure
362 if retval != 0 and retval != 255:
363 msg = "dwarfsrcfiles failed with exit code %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else "")
364 if fatal:
365 bb.fatal(msg)
366 bb.note(msg)
367
368 debugsources = parse_debugsources_from_dwarfsrcfiles_output(output)
369
370 return list(debugsources)
371
372def splitdebuginfo(file, dvar, debugdir, debuglibdir, debugappend, debugsrcdir, d):
373 # Function to split a single file into two components, one is the stripped
374 # target system binary, the other contains any debugging information. The
375 # two files are linked to reference each other.
376 #
377 # return a mapping of files:debugsources
378
379 import stat
380 import subprocess
381
382 src = file[len(dvar):]
383 dest = debuglibdir + os.path.dirname(src) + debugdir + "/" + os.path.basename(src) + debugappend
384 debugfile = dvar + dest
385 sources = []
386
387 # Split the file...
388 bb.utils.mkdirhier(os.path.dirname(debugfile))
389 #bb.note("Split %s -> %s" % (file, debugfile))
390 # Only store off the hard link reference if we successfully split!
391
392 dvar = d.getVar('PKGD')
393 objcopy = d.getVar("OBJCOPY")
394
395 # We ignore kernel modules, we don't generate debug info files.
396 if file.find("/lib/modules/") != -1 and file.endswith(".ko"):
397 return (file, sources)
398
399 newmode = None
400 if not os.access(file, os.W_OK) or os.access(file, os.R_OK):
401 origmode = os.stat(file)[stat.ST_MODE]
402 newmode = origmode | stat.S_IWRITE | stat.S_IREAD
403 os.chmod(file, newmode)
404
405 # We need to extract the debug src information here...
406 if debugsrcdir:
407 sources = source_info(file, d)
408
409 bb.utils.mkdirhier(os.path.dirname(debugfile))
410
411 subprocess.check_output([objcopy, '--only-keep-debug', file, debugfile], stderr=subprocess.STDOUT)
412
413 # Set the debuglink to have the view of the file path on the target
414 subprocess.check_output([objcopy, '--add-gnu-debuglink', debugfile, file], stderr=subprocess.STDOUT)
415
416 if newmode:
417 os.chmod(file, origmode)
418
419 return (file, sources)
420
421def splitstaticdebuginfo(file, dvar, debugstaticdir, debugstaticlibdir, debugstaticappend, debugsrcdir, d):
422 # Unlike the function above, there is no way to split a static library
423 # two components. So to get similar results we will copy the unmodified
424 # static library (containing the debug symbols) into a new directory.
425 # We will then strip (preserving symbols) the static library in the
426 # typical location.
427 #
428 # return a mapping of files:debugsources
429
430 import stat
431 import shutil
432
433 src = file[len(dvar):]
434 dest = debugstaticlibdir + os.path.dirname(src) + debugstaticdir + "/" + os.path.basename(src) + debugstaticappend
435 debugfile = dvar + dest
436 sources = []
437
438 # Copy the file...
439 bb.utils.mkdirhier(os.path.dirname(debugfile))
440 #bb.note("Copy %s -> %s" % (file, debugfile))
441
442 dvar = d.getVar('PKGD')
443
444 newmode = None
445 if not os.access(file, os.W_OK) or os.access(file, os.R_OK):
446 origmode = os.stat(file)[stat.ST_MODE]
447 newmode = origmode | stat.S_IWRITE | stat.S_IREAD
448 os.chmod(file, newmode)
449
450 # We need to extract the debug src information here...
451 if debugsrcdir:
452 sources = source_info(file, d)
453
454 bb.utils.mkdirhier(os.path.dirname(debugfile))
455
456 # Copy the unmodified item to the debug directory
457 shutil.copy2(file, debugfile)
458
459 if newmode:
460 os.chmod(file, origmode)
461
462 return (file, sources)
463
464def inject_minidebuginfo(file, dvar, debugdir, debuglibdir, debugappend, debugsrcdir, d):
465 # Extract just the symbols from debuginfo into minidebuginfo,
466 # compress it with xz and inject it back into the binary in a .gnu_debugdata section.
467 # https://sourceware.org/gdb/onlinedocs/gdb/MiniDebugInfo.html
468
469 import subprocess
470
471 readelf = d.getVar('READELF')
472 nm = d.getVar('NM')
473 objcopy = d.getVar('OBJCOPY')
474
475 minidebuginfodir = d.expand('${WORKDIR}/minidebuginfo')
476
477 src = file[len(dvar):]
478 dest = debuglibdir + os.path.dirname(src) + debugdir + "/" + os.path.basename(src) + debugappend
479 debugfile = dvar + dest
480 minidebugfile = minidebuginfodir + src + '.minidebug'
481 bb.utils.mkdirhier(os.path.dirname(minidebugfile))
482
483 # If we didn't produce debuginfo for any reason, we can't produce minidebuginfo either
484 # so skip it.
485 if not os.path.exists(debugfile):
486 bb.debug(1, 'ELF file {} has no debuginfo, skipping minidebuginfo injection'.format(file))
487 return
488
489 # Find non-allocated PROGBITS, NOTE, and NOBITS sections in the debuginfo.
490 # We will exclude all of these from minidebuginfo to save space.
491 remove_section_names = []
492 for line in subprocess.check_output([readelf, '-W', '-S', debugfile], universal_newlines=True).splitlines():
493 fields = line.split()
494 if len(fields) < 8:
495 continue
496 name = fields[0]
497 type = fields[1]
498 flags = fields[7]
499 # .debug_ sections will be removed by objcopy -S so no need to explicitly remove them
500 if name.startswith('.debug_'):
501 continue
502 if 'A' not in flags and type in ['PROGBITS', 'NOTE', 'NOBITS']:
503 remove_section_names.append(name)
504
505 # List dynamic symbols in the binary. We can exclude these from minidebuginfo
506 # because they are always present in the binary.
507 dynsyms = set()
508 for line in subprocess.check_output([nm, '-D', file, '--format=posix', '--defined-only'], universal_newlines=True).splitlines():
509 dynsyms.add(line.split()[0])
510
511 # Find all function symbols from debuginfo which aren't in the dynamic symbols table.
512 # These are the ones we want to keep in minidebuginfo.
513 keep_symbols_file = minidebugfile + '.symlist'
514 found_any_symbols = False
515 with open(keep_symbols_file, 'w') as f:
516 for line in subprocess.check_output([nm, debugfile, '--format=sysv', '--defined-only'], universal_newlines=True).splitlines():
517 fields = line.split('|')
518 if len(fields) < 7:
519 continue
520 name = fields[0].strip()
521 type = fields[3].strip()
522 if type == 'FUNC' and name not in dynsyms:
523 f.write('{}\n'.format(name))
524 found_any_symbols = True
525
526 if not found_any_symbols:
527 bb.debug(1, 'ELF file {} contains no symbols, skipping minidebuginfo injection'.format(file))
528 return
529
530 bb.utils.remove(minidebugfile)
531 bb.utils.remove(minidebugfile + '.xz')
532
533 subprocess.check_call([objcopy, '-S'] +
534 ['--remove-section={}'.format(s) for s in remove_section_names] +
535 ['--keep-symbols={}'.format(keep_symbols_file), debugfile, minidebugfile])
536
537 subprocess.check_call(['xz', '--keep', minidebugfile])
538
539 subprocess.check_call([objcopy, '--add-section', '.gnu_debugdata={}.xz'.format(minidebugfile), file])
540
541def copydebugsources(debugsrcdir, sources, d):
542 # The debug src information written out to sourcefile is further processed
543 # and copied to the destination here.
544
545 import stat
546 import subprocess
547
548 if debugsrcdir and sources:
549 sourcefile = d.expand("${WORKDIR}/debugsources.list")
550 bb.utils.remove(sourcefile)
551
552 # filenames are null-separated - this is an artefact of the previous use
553 # of rpm's debugedit, which was writing them out that way, and the code elsewhere
554 # is still assuming that.
555 debuglistoutput = '\0'.join(sources) + '\0'
556 with open(sourcefile, 'a') as sf:
557 sf.write(debuglistoutput)
558
559 dvar = d.getVar('PKGD')
560 strip = d.getVar("STRIP")
561 objcopy = d.getVar("OBJCOPY")
562 workdir = d.getVar("WORKDIR")
563 workparentdir = os.path.dirname(os.path.dirname(workdir))
564 workbasedir = os.path.basename(os.path.dirname(workdir)) + "/" + os.path.basename(workdir)
565
566 # If build path exists in sourcefile, it means toolchain did not use
567 # -fdebug-prefix-map to compile
568 if checkbuildpath(sourcefile, d):
569 localsrc_prefix = workparentdir + "/"
570 else:
571 localsrc_prefix = "/usr/src/debug/"
572
573 nosuchdir = []
574 basepath = dvar
575 for p in debugsrcdir.split("/"):
576 basepath = basepath + "/" + p
577 if not cpath.exists(basepath):
578 nosuchdir.append(basepath)
579 bb.utils.mkdirhier(basepath)
580 cpath.updatecache(basepath)
581
582 # Ignore files from the recipe sysroots (target and native)
583 processdebugsrc = "LC_ALL=C ; sort -z -u '%s' | egrep -v -z '((<internal>|<built-in>)$|/.*recipe-sysroot.*/)' | "
584 # We need to ignore files that are not actually ours
585 # we do this by only paying attention to items from this package
586 processdebugsrc += "fgrep -zw '%s' | "
587 # Remove prefix in the source paths
588 processdebugsrc += "sed 's#%s##g' | "
589 processdebugsrc += "(cd '%s' ; cpio -pd0mlL --no-preserve-owner '%s%s' 2>/dev/null)"
590
591 cmd = processdebugsrc % (sourcefile, workbasedir, localsrc_prefix, workparentdir, dvar, debugsrcdir)
592 try:
593 subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
594 except subprocess.CalledProcessError:
595 # Can "fail" if internal headers/transient sources are attempted
596 pass
597
598 # cpio seems to have a bug with -lL together and symbolic links are just copied, not dereferenced.
599 # Work around this by manually finding and copying any symbolic links that made it through.
600 cmd = "find %s%s -type l -print0 -delete | sed s#%s%s/##g | (cd '%s' ; cpio -pd0mL --no-preserve-owner '%s%s')" % \
601 (dvar, debugsrcdir, dvar, debugsrcdir, workparentdir, dvar, debugsrcdir)
602 subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
603
604 # The copy by cpio may have resulted in some empty directories! Remove these
605 cmd = "find %s%s -empty -type d -delete" % (dvar, debugsrcdir)
606 subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
607
608 # Also remove debugsrcdir if its empty
609 for p in nosuchdir[::-1]:
610 if os.path.exists(p) and not os.listdir(p):
611 os.rmdir(p)
612
613#
614# Package data handling routines
615#
616
617def get_package_mapping (pkg, basepkg, d, depversions=None):
618 import oe.packagedata
619
620 data = oe.packagedata.read_subpkgdata(pkg, d)
621 key = "PKG_%s" % pkg
622
623 if key in data:
624 # Have to avoid undoing the write_extra_pkgs(global_variants...)
625 if bb.data.inherits_class('allarch', d) and not d.getVar('MULTILIB_VARIANTS') \
626 and data[key] == basepkg:
627 return pkg
628 if depversions == []:
629 # Avoid returning a mapping if the renamed package rprovides its original name
630 rprovkey = "RPROVIDES_%s" % pkg
631 if rprovkey in data:
632 if pkg in bb.utils.explode_dep_versions2(data[rprovkey]):
633 bb.note("%s rprovides %s, not replacing the latter" % (data[key], pkg))
634 return pkg
635 # Do map to rewritten package name
636 return data[key]
637
638 return pkg
639
640def get_package_additional_metadata (pkg_type, d):
641 base_key = "PACKAGE_ADD_METADATA"
642 for key in ("%s_%s" % (base_key, pkg_type.upper()), base_key):
643 if d.getVar(key, False) is None:
644 continue
645 d.setVarFlag(key, "type", "list")
646 if d.getVarFlag(key, "separator") is None:
647 d.setVarFlag(key, "separator", "\\n")
648 metadata_fields = [field.strip() for field in oe.data.typed_value(key, d)]
649 return "\n".join(metadata_fields).strip()
650
651def runtime_mapping_rename (varname, pkg, d):
652 #bb.note("%s before: %s" % (varname, d.getVar(varname)))
653
654 new_depends = {}
655 deps = bb.utils.explode_dep_versions2(d.getVar(varname) or "")
656 for depend, depversions in deps.items():
657 new_depend = get_package_mapping(depend, pkg, d, depversions)
658 if depend != new_depend:
659 bb.note("package name mapping done: %s -> %s" % (depend, new_depend))
660 new_depends[new_depend] = deps[depend]
661
662 d.setVar(varname, bb.utils.join_deps(new_depends, commasep=False))
663
664 #bb.note("%s after: %s" % (varname, d.getVar(varname)))
665
666#
667# Used by do_packagedata (and possibly other routines post do_package)
668#
669
670package_get_auto_pr[vardepsexclude] = "BB_TASKDEPDATA"
671python package_get_auto_pr() {
672 import oe.prservice
673
674 def get_do_package_hash(pn):
675 if d.getVar("BB_RUNTASK") != "do_package":
676 taskdepdata = d.getVar("BB_TASKDEPDATA", False)
677 for dep in taskdepdata:
678 if taskdepdata[dep][1] == "do_package" and taskdepdata[dep][0] == pn:
679 return taskdepdata[dep][6]
680 return None
681
682 # Support per recipe PRSERV_HOST
683 pn = d.getVar('PN')
684 host = d.getVar("PRSERV_HOST_" + pn)
685 if not (host is None):
686 d.setVar("PRSERV_HOST", host)
687
688 pkgv = d.getVar("PKGV")
689
690 # PR Server not active, handle AUTOINC
691 if not d.getVar('PRSERV_HOST'):
692 d.setVar("PRSERV_PV_AUTOINC", "0")
693 return
694
695 auto_pr = None
696 pv = d.getVar("PV")
697 version = d.getVar("PRAUTOINX")
698 pkgarch = d.getVar("PACKAGE_ARCH")
699 checksum = get_do_package_hash(pn)
700
701 # If do_package isn't in the dependencies, we can't get the checksum...
702 if not checksum:
703 bb.warn('Task %s requested do_package unihash, but it was not available.' % d.getVar('BB_RUNTASK'))
704 #taskdepdata = d.getVar("BB_TASKDEPDATA", False)
705 #for dep in taskdepdata:
706 # bb.warn('%s:%s = %s' % (taskdepdata[dep][0], taskdepdata[dep][1], taskdepdata[dep][6]))
707 return
708
709 if d.getVar('PRSERV_LOCKDOWN'):
710 auto_pr = d.getVar('PRAUTO_' + version + '_' + pkgarch) or d.getVar('PRAUTO_' + version) or None
711 if auto_pr is None:
712 bb.fatal("Can NOT get PRAUTO from lockdown exported file")
713 d.setVar('PRAUTO',str(auto_pr))
714 return
715
716 try:
717 conn = d.getVar("__PRSERV_CONN")
718 if conn is None:
719 conn = oe.prservice.prserv_make_conn(d)
720 if conn is not None:
721 if "AUTOINC" in pkgv:
722 srcpv = bb.fetch2.get_srcrev(d)
723 base_ver = "AUTOINC-%s" % version[:version.find(srcpv)]
724 value = conn.getPR(base_ver, pkgarch, srcpv)
725 d.setVar("PRSERV_PV_AUTOINC", str(value))
726
727 auto_pr = conn.getPR(version, pkgarch, checksum)
728 except Exception as e:
729 bb.fatal("Can NOT get PRAUTO, exception %s" % str(e))
730 if auto_pr is None:
731 bb.fatal("Can NOT get PRAUTO from remote PR service")
732 d.setVar('PRAUTO',str(auto_pr))
733}
734
735#
736# Package functions suitable for inclusion in PACKAGEFUNCS
737#
738
739python package_convert_pr_autoinc() {
740 pkgv = d.getVar("PKGV")
741
742 # Adjust pkgv as necessary...
743 if 'AUTOINC' in pkgv:
744 d.setVar("PKGV", pkgv.replace("AUTOINC", "${PRSERV_PV_AUTOINC}"))
745
746 # Change PRSERV_PV_AUTOINC and EXTENDPRAUTO usage to special values
747 d.setVar('PRSERV_PV_AUTOINC', '@PRSERV_PV_AUTOINC@')
748 d.setVar('EXTENDPRAUTO', '@EXTENDPRAUTO@')
749}
750
751LOCALEBASEPN ??= "${PN}"
752
753python package_do_split_locales() {
754 if (d.getVar('PACKAGE_NO_LOCALE') == '1'):
755 bb.debug(1, "package requested not splitting locales")
756 return
757
758 packages = (d.getVar('PACKAGES') or "").split()
759
760 datadir = d.getVar('datadir')
761 if not datadir:
762 bb.note("datadir not defined")
763 return
764
765 dvar = d.getVar('PKGD')
766 pn = d.getVar('LOCALEBASEPN')
767
768 if pn + '-locale' in packages:
769 packages.remove(pn + '-locale')
770
771 localedir = os.path.join(dvar + datadir, 'locale')
772
773 if not cpath.isdir(localedir):
774 bb.debug(1, "No locale files in this package")
775 return
776
777 locales = os.listdir(localedir)
778
779 summary = d.getVar('SUMMARY') or pn
780 description = d.getVar('DESCRIPTION') or ""
781 locale_section = d.getVar('LOCALE_SECTION')
782 mlprefix = d.getVar('MLPREFIX') or ""
783 for l in sorted(locales):
784 ln = legitimize_package_name(l)
785 pkg = pn + '-locale-' + ln
786 packages.append(pkg)
787 d.setVar('FILES_' + pkg, os.path.join(datadir, 'locale', l))
788 d.setVar('RRECOMMENDS_' + pkg, '%svirtual-locale-%s' % (mlprefix, ln))
789 d.setVar('RPROVIDES_' + pkg, '%s-locale %s%s-translation' % (pn, mlprefix, ln))
790 d.setVar('SUMMARY_' + pkg, '%s - %s translations' % (summary, l))
791 d.setVar('DESCRIPTION_' + pkg, '%s This package contains language translation files for the %s locale.' % (description, l))
792 if locale_section:
793 d.setVar('SECTION_' + pkg, locale_section)
794
795 d.setVar('PACKAGES', ' '.join(packages))
796
797 # Disabled by RP 18/06/07
798 # Wildcards aren't supported in debian
799 # They break with ipkg since glibc-locale* will mean that
800 # glibc-localedata-translit* won't install as a dependency
801 # for some other package which breaks meta-toolchain
802 # Probably breaks since virtual-locale- isn't provided anywhere
803 #rdep = (d.getVar('RDEPENDS_%s' % pn) or "").split()
804 #rdep.append('%s-locale*' % pn)
805 #d.setVar('RDEPENDS_%s' % pn, ' '.join(rdep))
806}
807
808python perform_packagecopy () {
809 import subprocess
810 import shutil
811
812 dest = d.getVar('D')
813 dvar = d.getVar('PKGD')
814
815 # Remove ${D}/sysroot-only if present
816 sysroot_only = os.path.join(dest, 'sysroot-only')
817 if cpath.exists(sysroot_only) and cpath.isdir(sysroot_only):
818 shutil.rmtree(sysroot_only)
819
820 # Start by package population by taking a copy of the installed
821 # files to operate on
822 # Preserve sparse files and hard links
823 cmd = 'tar -cf - -C %s -p -S . | tar -xf - -C %s' % (dest, dvar)
824 subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
825
826 # replace RPATHs for the nativesdk binaries, to make them relocatable
827 if bb.data.inherits_class('nativesdk', d) or bb.data.inherits_class('cross-canadian', d):
828 rpath_replace (dvar, d)
829}
830perform_packagecopy[cleandirs] = "${PKGD}"
831perform_packagecopy[dirs] = "${PKGD}"
832
833# We generate a master list of directories to process, we start by
834# seeding this list with reasonable defaults, then load from
835# the fs-perms.txt files
836python fixup_perms () {
837 import pwd, grp
838
839 # init using a string with the same format as a line as documented in
840 # the fs-perms.txt file
841 # <path> <mode> <uid> <gid> <walk> <fmode> <fuid> <fgid>
842 # <path> link <link target>
843 #
844 # __str__ can be used to print out an entry in the input format
845 #
846 # if fs_perms_entry.path is None:
847 # an error occurred
848 # if fs_perms_entry.link, you can retrieve:
849 # fs_perms_entry.path = path
850 # fs_perms_entry.link = target of link
851 # if not fs_perms_entry.link, you can retrieve:
852 # fs_perms_entry.path = path
853 # fs_perms_entry.mode = expected dir mode or None
854 # fs_perms_entry.uid = expected uid or -1
855 # fs_perms_entry.gid = expected gid or -1
856 # fs_perms_entry.walk = 'true' or something else
857 # fs_perms_entry.fmode = expected file mode or None
858 # fs_perms_entry.fuid = expected file uid or -1
859 # fs_perms_entry_fgid = expected file gid or -1
860 class fs_perms_entry():
861 def __init__(self, line):
862 lsplit = line.split()
863 if len(lsplit) == 3 and lsplit[1].lower() == "link":
864 self._setlink(lsplit[0], lsplit[2])
865 elif len(lsplit) == 8:
866 self._setdir(lsplit[0], lsplit[1], lsplit[2], lsplit[3], lsplit[4], lsplit[5], lsplit[6], lsplit[7])
867 else:
868 msg = "Fixup Perms: invalid config line %s" % line
869 package_qa_handle_error("perm-config", msg, d)
870 self.path = None
871 self.link = None
872
873 def _setdir(self, path, mode, uid, gid, walk, fmode, fuid, fgid):
874 self.path = os.path.normpath(path)
875 self.link = None
876 self.mode = self._procmode(mode)
877 self.uid = self._procuid(uid)
878 self.gid = self._procgid(gid)
879 self.walk = walk.lower()
880 self.fmode = self._procmode(fmode)
881 self.fuid = self._procuid(fuid)
882 self.fgid = self._procgid(fgid)
883
884 def _setlink(self, path, link):
885 self.path = os.path.normpath(path)
886 self.link = link
887
888 def _procmode(self, mode):
889 if not mode or (mode and mode == "-"):
890 return None
891 else:
892 return int(mode,8)
893
894 # Note uid/gid -1 has special significance in os.lchown
895 def _procuid(self, uid):
896 if uid is None or uid == "-":
897 return -1
898 elif uid.isdigit():
899 return int(uid)
900 else:
901 return pwd.getpwnam(uid).pw_uid
902
903 def _procgid(self, gid):
904 if gid is None or gid == "-":
905 return -1
906 elif gid.isdigit():
907 return int(gid)
908 else:
909 return grp.getgrnam(gid).gr_gid
910
911 # Use for debugging the entries
912 def __str__(self):
913 if self.link:
914 return "%s link %s" % (self.path, self.link)
915 else:
916 mode = "-"
917 if self.mode:
918 mode = "0%o" % self.mode
919 fmode = "-"
920 if self.fmode:
921 fmode = "0%o" % self.fmode
922 uid = self._mapugid(self.uid)
923 gid = self._mapugid(self.gid)
924 fuid = self._mapugid(self.fuid)
925 fgid = self._mapugid(self.fgid)
926 return "%s %s %s %s %s %s %s %s" % (self.path, mode, uid, gid, self.walk, fmode, fuid, fgid)
927
928 def _mapugid(self, id):
929 if id is None or id == -1:
930 return "-"
931 else:
932 return "%d" % id
933
934 # Fix the permission, owner and group of path
935 def fix_perms(path, mode, uid, gid, dir):
936 if mode and not os.path.islink(path):
937 #bb.note("Fixup Perms: chmod 0%o %s" % (mode, dir))
938 os.chmod(path, mode)
939 # -1 is a special value that means don't change the uid/gid
940 # if they are BOTH -1, don't bother to lchown
941 if not (uid == -1 and gid == -1):
942 #bb.note("Fixup Perms: lchown %d:%d %s" % (uid, gid, dir))
943 os.lchown(path, uid, gid)
944
945 # Return a list of configuration files based on either the default
946 # files/fs-perms.txt or the contents of FILESYSTEM_PERMS_TABLES
947 # paths are resolved via BBPATH
948 def get_fs_perms_list(d):
949 str = ""
950 bbpath = d.getVar('BBPATH')
951 fs_perms_tables = d.getVar('FILESYSTEM_PERMS_TABLES') or ""
952 for conf_file in fs_perms_tables.split():
953 confpath = bb.utils.which(bbpath, conf_file)
954 if confpath:
955 str += " %s" % bb.utils.which(bbpath, conf_file)
956 else:
957 bb.warn("cannot find %s specified in FILESYSTEM_PERMS_TABLES" % conf_file)
958 return str
959
960
961
962 dvar = d.getVar('PKGD')
963
964 fs_perms_table = {}
965 fs_link_table = {}
966
967 # By default all of the standard directories specified in
968 # bitbake.conf will get 0755 root:root.
969 target_path_vars = [ 'base_prefix',
970 'prefix',
971 'exec_prefix',
972 'base_bindir',
973 'base_sbindir',
974 'base_libdir',
975 'datadir',
976 'sysconfdir',
977 'servicedir',
978 'sharedstatedir',
979 'localstatedir',
980 'infodir',
981 'mandir',
982 'docdir',
983 'bindir',
984 'sbindir',
985 'libexecdir',
986 'libdir',
987 'includedir',
988 'oldincludedir' ]
989
990 for path in target_path_vars:
991 dir = d.getVar(path) or ""
992 if dir == "":
993 continue
994 fs_perms_table[dir] = fs_perms_entry(d.expand("%s 0755 root root false - - -" % (dir)))
995
996 # Now we actually load from the configuration files
997 for conf in get_fs_perms_list(d).split():
998 if not os.path.exists(conf):
999 continue
1000 with open(conf) as f:
1001 for line in f:
1002 if line.startswith('#'):
1003 continue
1004 lsplit = line.split()
1005 if len(lsplit) == 0:
1006 continue
1007 if len(lsplit) != 8 and not (len(lsplit) == 3 and lsplit[1].lower() == "link"):
1008 msg = "Fixup perms: %s invalid line: %s" % (conf, line)
1009 package_qa_handle_error("perm-line", msg, d)
1010 continue
1011 entry = fs_perms_entry(d.expand(line))
1012 if entry and entry.path:
1013 if entry.link:
1014 fs_link_table[entry.path] = entry
1015 if entry.path in fs_perms_table:
1016 fs_perms_table.pop(entry.path)
1017 else:
1018 fs_perms_table[entry.path] = entry
1019 if entry.path in fs_link_table:
1020 fs_link_table.pop(entry.path)
1021
1022 # Debug -- list out in-memory table
1023 #for dir in fs_perms_table:
1024 # bb.note("Fixup Perms: %s: %s" % (dir, str(fs_perms_table[dir])))
1025 #for link in fs_link_table:
1026 # bb.note("Fixup Perms: %s: %s" % (link, str(fs_link_table[link])))
1027
1028 # We process links first, so we can go back and fixup directory ownership
1029 # for any newly created directories
1030 # Process in sorted order so /run gets created before /run/lock, etc.
1031 for entry in sorted(fs_link_table.values(), key=lambda x: x.link):
1032 link = entry.link
1033 dir = entry.path
1034 origin = dvar + dir
1035 if not (cpath.exists(origin) and cpath.isdir(origin) and not cpath.islink(origin)):
1036 continue
1037
1038 if link[0] == "/":
1039 target = dvar + link
1040 ptarget = link
1041 else:
1042 target = os.path.join(os.path.dirname(origin), link)
1043 ptarget = os.path.join(os.path.dirname(dir), link)
1044 if os.path.exists(target):
1045 msg = "Fixup Perms: Unable to correct directory link, target already exists: %s -> %s" % (dir, ptarget)
1046 package_qa_handle_error("perm-link", msg, d)
1047 continue
1048
1049 # Create path to move directory to, move it, and then setup the symlink
1050 bb.utils.mkdirhier(os.path.dirname(target))
1051 #bb.note("Fixup Perms: Rename %s -> %s" % (dir, ptarget))
1052 os.rename(origin, target)
1053 #bb.note("Fixup Perms: Link %s -> %s" % (dir, link))
1054 os.symlink(link, origin)
1055
1056 for dir in fs_perms_table:
1057 origin = dvar + dir
1058 if not (cpath.exists(origin) and cpath.isdir(origin)):
1059 continue
1060
1061 fix_perms(origin, fs_perms_table[dir].mode, fs_perms_table[dir].uid, fs_perms_table[dir].gid, dir)
1062
1063 if fs_perms_table[dir].walk == 'true':
1064 for root, dirs, files in os.walk(origin):
1065 for dr in dirs:
1066 each_dir = os.path.join(root, dr)
1067 fix_perms(each_dir, fs_perms_table[dir].mode, fs_perms_table[dir].uid, fs_perms_table[dir].gid, dir)
1068 for f in files:
1069 each_file = os.path.join(root, f)
1070 fix_perms(each_file, fs_perms_table[dir].fmode, fs_perms_table[dir].fuid, fs_perms_table[dir].fgid, dir)
1071}
1072
1073python split_and_strip_files () {
1074 import stat, errno
1075 import subprocess
1076
1077 dvar = d.getVar('PKGD')
1078 pn = d.getVar('PN')
1079 hostos = d.getVar('HOST_OS')
1080
1081 oldcwd = os.getcwd()
1082 os.chdir(dvar)
1083
1084 # We default to '.debug' style
1085 if d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-file-directory':
1086 # Single debug-file-directory style debug info
1087 debugappend = ".debug"
1088 debugstaticappend = ""
1089 debugdir = ""
1090 debugstaticdir = ""
1091 debuglibdir = "/usr/lib/debug"
1092 debugstaticlibdir = "/usr/lib/debug-static"
1093 debugsrcdir = "/usr/src/debug"
1094 elif d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-without-src':
1095 # Original OE-core, a.k.a. ".debug", style debug info, but without sources in /usr/src/debug
1096 debugappend = ""
1097 debugstaticappend = ""
1098 debugdir = "/.debug"
1099 debugstaticdir = "/.debug-static"
1100 debuglibdir = ""
1101 debugstaticlibdir = ""
1102 debugsrcdir = ""
1103 elif d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-with-srcpkg':
1104 debugappend = ""
1105 debugstaticappend = ""
1106 debugdir = "/.debug"
1107 debugstaticdir = "/.debug-static"
1108 debuglibdir = ""
1109 debugstaticlibdir = ""
1110 debugsrcdir = "/usr/src/debug"
1111 else:
1112 # Original OE-core, a.k.a. ".debug", style debug info
1113 debugappend = ""
1114 debugstaticappend = ""
1115 debugdir = "/.debug"
1116 debugstaticdir = "/.debug-static"
1117 debuglibdir = ""
1118 debugstaticlibdir = ""
1119 debugsrcdir = "/usr/src/debug"
1120
1121 #
1122 # First lets figure out all of the files we may have to process ... do this only once!
1123 #
1124 elffiles = {}
1125 symlinks = {}
1126 kernmods = []
1127 staticlibs = []
1128 inodes = {}
1129 libdir = os.path.abspath(dvar + os.sep + d.getVar("libdir"))
1130 baselibdir = os.path.abspath(dvar + os.sep + d.getVar("base_libdir"))
1131 skipfiles = (d.getVar("INHIBIT_PACKAGE_STRIP_FILES") or "").split()
1132 if (d.getVar('INHIBIT_PACKAGE_STRIP') != '1' or \
1133 d.getVar('INHIBIT_PACKAGE_DEBUG_SPLIT') != '1'):
1134 checkelf = {}
1135 checkelflinks = {}
1136 for root, dirs, files in cpath.walk(dvar):
1137 for f in files:
1138 file = os.path.join(root, f)
1139
1140 # Skip debug files
1141 if debugappend and file.endswith(debugappend):
1142 continue
1143 if debugdir and debugdir in os.path.dirname(file[len(dvar):]):
1144 continue
1145
1146 if file in skipfiles:
1147 continue
1148
1149 if file.endswith(".ko") and file.find("/lib/modules/") != -1:
1150 kernmods.append(file)
1151 continue
1152 if oe.package.is_static_lib(file):
1153 staticlibs.append(file)
1154 continue
1155
1156 try:
1157 ltarget = cpath.realpath(file, dvar, False)
1158 s = cpath.lstat(ltarget)
1159 except OSError as e:
1160 (err, strerror) = e.args
1161 if err != errno.ENOENT:
1162 raise
1163 # Skip broken symlinks
1164 continue
1165 if not s:
1166 continue
1167 # Check its an executable
1168 if (s[stat.ST_MODE] & stat.S_IXUSR) or (s[stat.ST_MODE] & stat.S_IXGRP) or (s[stat.ST_MODE] & stat.S_IXOTH) \
1169 or ((file.startswith(libdir) or file.startswith(baselibdir)) and (".so" in f or ".node" in f)):
1170
1171 if cpath.islink(file):
1172 checkelflinks[file] = ltarget
1173 continue
1174 # Use a reference of device ID and inode number to identify files
1175 file_reference = "%d_%d" % (s.st_dev, s.st_ino)
1176 checkelf[file] = (file, file_reference)
1177
1178 results = oe.utils.multiprocess_launch(oe.package.is_elf, checkelflinks.values(), d)
1179 results_map = {}
1180 for (ltarget, elf_file) in results:
1181 results_map[ltarget] = elf_file
1182 for file in checkelflinks:
1183 ltarget = checkelflinks[file]
1184 # If it's a symlink, and points to an ELF file, we capture the readlink target
1185 if results_map[ltarget]:
1186 target = os.readlink(file)
1187 #bb.note("Sym: %s (%d)" % (ltarget, results_map[ltarget]))
1188 symlinks[file] = target
1189
1190 results = oe.utils.multiprocess_launch(oe.package.is_elf, checkelf.keys(), d)
1191
1192 # Sort results by file path. This ensures that the files are always
1193 # processed in the same order, which is important to make sure builds
1194 # are reproducible when dealing with hardlinks
1195 results.sort(key=lambda x: x[0])
1196
1197 for (file, elf_file) in results:
1198 # It's a file (or hardlink), not a link
1199 # ...but is it ELF, and is it already stripped?
1200 if elf_file & 1:
1201 if elf_file & 2:
1202 if 'already-stripped' in (d.getVar('INSANE_SKIP_' + pn) or "").split():
1203 bb.note("Skipping file %s from %s for already-stripped QA test" % (file[len(dvar):], pn))
1204 else:
1205 msg = "File '%s' from %s was already stripped, this will prevent future debugging!" % (file[len(dvar):], pn)
1206 package_qa_handle_error("already-stripped", msg, d)
1207 continue
1208
1209 # At this point we have an unstripped elf file. We need to:
1210 # a) Make sure any file we strip is not hardlinked to anything else outside this tree
1211 # b) Only strip any hardlinked file once (no races)
1212 # c) Track any hardlinks between files so that we can reconstruct matching debug file hardlinks
1213
1214 # Use a reference of device ID and inode number to identify files
1215 file_reference = checkelf[file][1]
1216 if file_reference in inodes:
1217 os.unlink(file)
1218 os.link(inodes[file_reference][0], file)
1219 inodes[file_reference].append(file)
1220 else:
1221 inodes[file_reference] = [file]
1222 # break hardlink
1223 bb.utils.break_hardlinks(file)
1224 elffiles[file] = elf_file
1225 # Modified the file so clear the cache
1226 cpath.updatecache(file)
1227
1228 #
1229 # First lets process debug splitting
1230 #
1231 if (d.getVar('INHIBIT_PACKAGE_DEBUG_SPLIT') != '1'):
1232 results = oe.utils.multiprocess_launch(splitdebuginfo, list(elffiles), d, extraargs=(dvar, debugdir, debuglibdir, debugappend, debugsrcdir, d))
1233
1234 if debugsrcdir and not hostos.startswith("mingw"):
1235 if (d.getVar('PACKAGE_DEBUG_STATIC_SPLIT') == '1'):
1236 results = oe.utils.multiprocess_launch(splitstaticdebuginfo, staticlibs, d, extraargs=(dvar, debugstaticdir, debugstaticlibdir, debugstaticappend, debugsrcdir, d))
1237 else:
1238 for file in staticlibs:
1239 results.append( (file,source_info(file, d)) )
1240
1241 sources = set()
1242 for r in results:
1243 sources.update(r[1])
1244
1245 # Hardlink our debug symbols to the other hardlink copies
1246 for ref in inodes:
1247 if len(inodes[ref]) == 1:
1248 continue
1249
1250 target = inodes[ref][0][len(dvar):]
1251 for file in inodes[ref][1:]:
1252 src = file[len(dvar):]
1253 dest = debuglibdir + os.path.dirname(src) + debugdir + "/" + os.path.basename(target) + debugappend
1254 fpath = dvar + dest
1255 ftarget = dvar + debuglibdir + os.path.dirname(target) + debugdir + "/" + os.path.basename(target) + debugappend
1256 bb.utils.mkdirhier(os.path.dirname(fpath))
1257 # Only one hardlink of separated debug info file in each directory
1258 if not os.access(fpath, os.R_OK):
1259 #bb.note("Link %s -> %s" % (fpath, ftarget))
1260 os.link(ftarget, fpath)
1261
1262 # Create symlinks for all cases we were able to split symbols
1263 for file in symlinks:
1264 src = file[len(dvar):]
1265 dest = debuglibdir + os.path.dirname(src) + debugdir + "/" + os.path.basename(src) + debugappend
1266 fpath = dvar + dest
1267 # Skip it if the target doesn't exist
1268 try:
1269 s = os.stat(fpath)
1270 except OSError as e:
1271 (err, strerror) = e.args
1272 if err != errno.ENOENT:
1273 raise
1274 continue
1275
1276 ltarget = symlinks[file]
1277 lpath = os.path.dirname(ltarget)
1278 lbase = os.path.basename(ltarget)
1279 ftarget = ""
1280 if lpath and lpath != ".":
1281 ftarget += lpath + debugdir + "/"
1282 ftarget += lbase + debugappend
1283 if lpath.startswith(".."):
1284 ftarget = os.path.join("..", ftarget)
1285 bb.utils.mkdirhier(os.path.dirname(fpath))
1286 #bb.note("Symlink %s -> %s" % (fpath, ftarget))
1287 os.symlink(ftarget, fpath)
1288
1289 # Process the debugsrcdir if requested...
1290 # This copies and places the referenced sources for later debugging...
1291 copydebugsources(debugsrcdir, sources, d)
1292 #
1293 # End of debug splitting
1294 #
1295
1296 #
1297 # Now lets go back over things and strip them
1298 #
1299 if (d.getVar('INHIBIT_PACKAGE_STRIP') != '1'):
1300 strip = d.getVar("STRIP")
1301 sfiles = []
1302 for file in elffiles:
1303 elf_file = int(elffiles[file])
1304 #bb.note("Strip %s" % file)
1305 sfiles.append((file, elf_file, strip))
1306 for f in kernmods:
1307 sfiles.append((f, 16, strip))
1308 if (d.getVar('PACKAGE_STRIP_STATIC') == '1' or d.getVar('PACKAGE_DEBUG_STATIC_SPLIT') == '1'):
1309 for f in staticlibs:
1310 sfiles.append((f, 16, strip))
1311
1312 oe.utils.multiprocess_launch(oe.package.runstrip, sfiles, d)
1313
1314 # Build "minidebuginfo" and reinject it back into the stripped binaries
1315 if d.getVar('PACKAGE_MINIDEBUGINFO') == '1':
1316 oe.utils.multiprocess_launch(inject_minidebuginfo, list(elffiles), d,
1317 extraargs=(dvar, debugdir, debuglibdir, debugappend, debugsrcdir, d))
1318
1319 #
1320 # End of strip
1321 #
1322 os.chdir(oldcwd)
1323}
1324
1325python populate_packages () {
1326 import glob, re
1327
1328 workdir = d.getVar('WORKDIR')
1329 outdir = d.getVar('DEPLOY_DIR')
1330 dvar = d.getVar('PKGD')
1331 packages = d.getVar('PACKAGES').split()
1332 pn = d.getVar('PN')
1333
1334 bb.utils.mkdirhier(outdir)
1335 os.chdir(dvar)
1336
1337 autodebug = not (d.getVar("NOAUTOPACKAGEDEBUG") or False)
1338
1339 split_source_package = (d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-with-srcpkg')
1340
1341 # If debug-with-srcpkg mode is enabled then add the source package if it
1342 # doesn't exist and add the source file contents to the source package.
1343 if split_source_package:
1344 src_package_name = ('%s-src' % d.getVar('PN'))
1345 if not src_package_name in packages:
1346 packages.append(src_package_name)
1347 d.setVar('FILES_%s' % src_package_name, '/usr/src/debug')
1348
1349 # Sanity check PACKAGES for duplicates
1350 # Sanity should be moved to sanity.bbclass once we have the infrastructure
1351 package_dict = {}
1352
1353 for i, pkg in enumerate(packages):
1354 if pkg in package_dict:
1355 msg = "%s is listed in PACKAGES multiple times, this leads to packaging errors." % pkg
1356 package_qa_handle_error("packages-list", msg, d)
1357 # Ensure the source package gets the chance to pick up the source files
1358 # before the debug package by ordering it first in PACKAGES. Whether it
1359 # actually picks up any source files is controlled by
1360 # PACKAGE_DEBUG_SPLIT_STYLE.
1361 elif pkg.endswith("-src"):
1362 package_dict[pkg] = (10, i)
1363 elif autodebug and pkg.endswith("-dbg"):
1364 package_dict[pkg] = (30, i)
1365 else:
1366 package_dict[pkg] = (50, i)
1367 packages = sorted(package_dict.keys(), key=package_dict.get)
1368 d.setVar('PACKAGES', ' '.join(packages))
1369 pkgdest = d.getVar('PKGDEST')
1370
1371 seen = []
1372
1373 # os.mkdir masks the permissions with umask so we have to unset it first
1374 oldumask = os.umask(0)
1375
1376 debug = []
1377 for root, dirs, files in cpath.walk(dvar):
1378 dir = root[len(dvar):]
1379 if not dir:
1380 dir = os.sep
1381 for f in (files + dirs):
1382 path = "." + os.path.join(dir, f)
1383 if "/.debug/" in path or "/.debug-static/" in path or path.endswith("/.debug"):
1384 debug.append(path)
1385
1386 for pkg in packages:
1387 root = os.path.join(pkgdest, pkg)
1388 bb.utils.mkdirhier(root)
1389
1390 filesvar = d.getVar('FILES_%s' % pkg) or ""
1391 if "//" in filesvar:
1392 msg = "FILES variable for package %s contains '//' which is invalid. Attempting to fix this but you should correct the metadata.\n" % pkg
1393 package_qa_handle_error("files-invalid", msg, d)
1394 filesvar.replace("//", "/")
1395
1396 origfiles = filesvar.split()
1397 files, symlink_paths = files_from_filevars(origfiles)
1398
1399 if autodebug and pkg.endswith("-dbg"):
1400 files.extend(debug)
1401
1402 for file in files:
1403 if (not cpath.islink(file)) and (not cpath.exists(file)):
1404 continue
1405 if file in seen:
1406 continue
1407 seen.append(file)
1408
1409 def mkdir(src, dest, p):
1410 src = os.path.join(src, p)
1411 dest = os.path.join(dest, p)
1412 fstat = cpath.stat(src)
1413 os.mkdir(dest)
1414 os.chmod(dest, fstat.st_mode)
1415 os.chown(dest, fstat.st_uid, fstat.st_gid)
1416 if p not in seen:
1417 seen.append(p)
1418 cpath.updatecache(dest)
1419
1420 def mkdir_recurse(src, dest, paths):
1421 if cpath.exists(dest + '/' + paths):
1422 return
1423 while paths.startswith("./"):
1424 paths = paths[2:]
1425 p = "."
1426 for c in paths.split("/"):
1427 p = os.path.join(p, c)
1428 if not cpath.exists(os.path.join(dest, p)):
1429 mkdir(src, dest, p)
1430
1431 if cpath.isdir(file) and not cpath.islink(file):
1432 mkdir_recurse(dvar, root, file)
1433 continue
1434
1435 mkdir_recurse(dvar, root, os.path.dirname(file))
1436 fpath = os.path.join(root,file)
1437 if not cpath.islink(file):
1438 os.link(file, fpath)
1439 continue
1440 ret = bb.utils.copyfile(file, fpath)
1441 if ret is False or ret == 0:
1442 bb.fatal("File population failed")
1443
1444 # Check if symlink paths exist
1445 for file in symlink_paths:
1446 if not os.path.exists(os.path.join(root,file)):
1447 bb.fatal("File '%s' cannot be packaged into '%s' because its "
1448 "parent directory structure does not exist. One of "
1449 "its parent directories is a symlink whose target "
1450 "directory is not included in the package." %
1451 (file, pkg))
1452
1453 os.umask(oldumask)
1454 os.chdir(workdir)
1455
1456 # Handle LICENSE_EXCLUSION
1457 package_list = []
1458 for pkg in packages:
1459 licenses = d.getVar('LICENSE_EXCLUSION-' + pkg)
1460 if licenses:
1461 msg = "Excluding %s from packaging as it has incompatible license(s): %s" % (pkg, licenses)
1462 package_qa_handle_error("incompatible-license", msg, d)
1463 else:
1464 package_list.append(pkg)
1465 d.setVar('PACKAGES', ' '.join(package_list))
1466
1467 unshipped = []
1468 for root, dirs, files in cpath.walk(dvar):
1469 dir = root[len(dvar):]
1470 if not dir:
1471 dir = os.sep
1472 for f in (files + dirs):
1473 path = os.path.join(dir, f)
1474 if ('.' + path) not in seen:
1475 unshipped.append(path)
1476
1477 if unshipped != []:
1478 msg = pn + ": Files/directories were installed but not shipped in any package:"
1479 if "installed-vs-shipped" in (d.getVar('INSANE_SKIP_' + pn) or "").split():
1480 bb.note("Package %s skipping QA tests: installed-vs-shipped" % pn)
1481 else:
1482 for f in unshipped:
1483 msg = msg + "\n " + f
1484 msg = msg + "\nPlease set FILES such that these items are packaged. Alternatively if they are unneeded, avoid installing them or delete them within do_install.\n"
1485 msg = msg + "%s: %d installed and not shipped files." % (pn, len(unshipped))
1486 package_qa_handle_error("installed-vs-shipped", msg, d)
1487}
1488populate_packages[dirs] = "${D}"
1489
1490python package_fixsymlinks () {
1491 import errno
1492 pkgdest = d.getVar('PKGDEST')
1493 packages = d.getVar("PACKAGES", False).split()
1494
1495 dangling_links = {}
1496 pkg_files = {}
1497 for pkg in packages:
1498 dangling_links[pkg] = []
1499 pkg_files[pkg] = []
1500 inst_root = os.path.join(pkgdest, pkg)
1501 for path in pkgfiles[pkg]:
1502 rpath = path[len(inst_root):]
1503 pkg_files[pkg].append(rpath)
1504 rtarget = cpath.realpath(path, inst_root, True, assume_dir = True)
1505 if not cpath.lexists(rtarget):
1506 dangling_links[pkg].append(os.path.normpath(rtarget[len(inst_root):]))
1507
1508 newrdepends = {}
1509 for pkg in dangling_links:
1510 for l in dangling_links[pkg]:
1511 found = False
1512 bb.debug(1, "%s contains dangling link %s" % (pkg, l))
1513 for p in packages:
1514 if l in pkg_files[p]:
1515 found = True
1516 bb.debug(1, "target found in %s" % p)
1517 if p == pkg:
1518 break
1519 if pkg not in newrdepends:
1520 newrdepends[pkg] = []
1521 newrdepends[pkg].append(p)
1522 break
1523 if found == False:
1524 bb.note("%s contains dangling symlink to %s" % (pkg, l))
1525
1526 for pkg in newrdepends:
1527 rdepends = bb.utils.explode_dep_versions2(d.getVar('RDEPENDS_' + pkg) or "")
1528 for p in newrdepends[pkg]:
1529 if p not in rdepends:
1530 rdepends[p] = []
1531 d.setVar('RDEPENDS_' + pkg, bb.utils.join_deps(rdepends, commasep=False))
1532}
1533
1534
1535python package_package_name_hook() {
1536 """
1537 A package_name_hook function can be used to rewrite the package names by
1538 changing PKG. For an example, see debian.bbclass.
1539 """
1540 pass
1541}
1542
1543EXPORT_FUNCTIONS package_name_hook
1544
1545
1546PKGDESTWORK = "${WORKDIR}/pkgdata"
1547
1548PKGDATA_VARS = "PN PE PV PR PKGE PKGV PKGR LICENSE DESCRIPTION SUMMARY RDEPENDS RPROVIDES RRECOMMENDS RSUGGESTS RREPLACES RCONFLICTS SECTION PKG ALLOW_EMPTY FILES CONFFILES FILES_INFO PACKAGE_ADD_METADATA pkg_postinst pkg_postrm pkg_preinst pkg_prerm"
1549
1550python emit_pkgdata() {
1551 from glob import glob
1552 import json
1553
1554 def process_postinst_on_target(pkg, mlprefix):
1555 pkgval = d.getVar('PKG_%s' % pkg)
1556 if pkgval is None:
1557 pkgval = pkg
1558
1559 defer_fragment = """
1560if [ -n "$D" ]; then
1561 $INTERCEPT_DIR/postinst_intercept delay_to_first_boot %s mlprefix=%s
1562 exit 0
1563fi
1564""" % (pkgval, mlprefix)
1565
1566 postinst = d.getVar('pkg_postinst_%s' % pkg)
1567 postinst_ontarget = d.getVar('pkg_postinst_ontarget_%s' % pkg)
1568
1569 if postinst_ontarget:
1570 bb.debug(1, 'adding deferred pkg_postinst_ontarget() to pkg_postinst() for %s' % pkg)
1571 if not postinst:
1572 postinst = '#!/bin/sh\n'
1573 postinst += defer_fragment
1574 postinst += postinst_ontarget
1575 d.setVar('pkg_postinst_%s' % pkg, postinst)
1576
1577 def add_set_e_to_scriptlets(pkg):
1578 for scriptlet_name in ('pkg_preinst', 'pkg_postinst', 'pkg_prerm', 'pkg_postrm'):
1579 scriptlet = d.getVar('%s_%s' % (scriptlet_name, pkg))
1580 if scriptlet:
1581 scriptlet_split = scriptlet.split('\n')
1582 if scriptlet_split[0].startswith("#!"):
1583 scriptlet = scriptlet_split[0] + "\nset -e\n" + "\n".join(scriptlet_split[1:])
1584 else:
1585 scriptlet = "set -e\n" + "\n".join(scriptlet_split[0:])
1586 d.setVar('%s_%s' % (scriptlet_name, pkg), scriptlet)
1587
1588 def write_if_exists(f, pkg, var):
1589 def encode(str):
1590 import codecs
1591 c = codecs.getencoder("unicode_escape")
1592 return c(str)[0].decode("latin1")
1593
1594 val = d.getVar('%s_%s' % (var, pkg))
1595 if val:
1596 f.write('%s_%s: %s\n' % (var, pkg, encode(val)))
1597 return val
1598 val = d.getVar('%s' % (var))
1599 if val:
1600 f.write('%s: %s\n' % (var, encode(val)))
1601 return val
1602
1603 def write_extra_pkgs(variants, pn, packages, pkgdatadir):
1604 for variant in variants:
1605 with open("%s/%s-%s" % (pkgdatadir, variant, pn), 'w') as fd:
1606 fd.write("PACKAGES: %s\n" % ' '.join(
1607 map(lambda pkg: '%s-%s' % (variant, pkg), packages.split())))
1608
1609 def write_extra_runtime_pkgs(variants, packages, pkgdatadir):
1610 for variant in variants:
1611 for pkg in packages.split():
1612 ml_pkg = "%s-%s" % (variant, pkg)
1613 subdata_file = "%s/runtime/%s" % (pkgdatadir, ml_pkg)
1614 with open(subdata_file, 'w') as fd:
1615 fd.write("PKG_%s: %s" % (ml_pkg, pkg))
1616
1617 packages = d.getVar('PACKAGES')
1618 pkgdest = d.getVar('PKGDEST')
1619 pkgdatadir = d.getVar('PKGDESTWORK')
1620
1621 data_file = pkgdatadir + d.expand("/${PN}")
1622 with open(data_file, 'w') as fd:
1623 fd.write("PACKAGES: %s\n" % packages)
1624
1625 pn = d.getVar('PN')
1626 global_variants = (d.getVar('MULTILIB_GLOBAL_VARIANTS') or "").split()
1627 variants = (d.getVar('MULTILIB_VARIANTS') or "").split()
1628
1629 if bb.data.inherits_class('kernel', d) or bb.data.inherits_class('module-base', d):
1630 write_extra_pkgs(variants, pn, packages, pkgdatadir)
1631
1632 if bb.data.inherits_class('allarch', d) and not variants \
1633 and not bb.data.inherits_class('packagegroup', d):
1634 write_extra_pkgs(global_variants, pn, packages, pkgdatadir)
1635
1636 workdir = d.getVar('WORKDIR')
1637
1638 for pkg in packages.split():
1639 pkgval = d.getVar('PKG_%s' % pkg)
1640 if pkgval is None:
1641 pkgval = pkg
1642 d.setVar('PKG_%s' % pkg, pkg)
1643
1644 pkgdestpkg = os.path.join(pkgdest, pkg)
1645 files = {}
1646 total_size = 0
1647 seen = set()
1648 for f in pkgfiles[pkg]:
1649 relpth = os.path.relpath(f, pkgdestpkg)
1650 fstat = os.lstat(f)
1651 files[os.sep + relpth] = fstat.st_size
1652 if fstat.st_ino not in seen:
1653 seen.add(fstat.st_ino)
1654 total_size += fstat.st_size
1655 d.setVar('FILES_INFO', json.dumps(files, sort_keys=True))
1656
1657 process_postinst_on_target(pkg, d.getVar("MLPREFIX"))
1658 add_set_e_to_scriptlets(pkg)
1659
1660 subdata_file = pkgdatadir + "/runtime/%s" % pkg
1661 with open(subdata_file, 'w') as sf:
1662 for var in (d.getVar('PKGDATA_VARS') or "").split():
1663 val = write_if_exists(sf, pkg, var)
1664
1665 write_if_exists(sf, pkg, 'FILERPROVIDESFLIST')
1666 for dfile in (d.getVar('FILERPROVIDESFLIST_' + pkg) or "").split():
1667 write_if_exists(sf, pkg, 'FILERPROVIDES_' + dfile)
1668
1669 write_if_exists(sf, pkg, 'FILERDEPENDSFLIST')
1670 for dfile in (d.getVar('FILERDEPENDSFLIST_' + pkg) or "").split():
1671 write_if_exists(sf, pkg, 'FILERDEPENDS_' + dfile)
1672
1673 sf.write('%s_%s: %d\n' % ('PKGSIZE', pkg, total_size))
1674
1675 # Symlinks needed for rprovides lookup
1676 rprov = d.getVar('RPROVIDES_%s' % pkg) or d.getVar('RPROVIDES')
1677 if rprov:
1678 for p in bb.utils.explode_deps(rprov):
1679 subdata_sym = pkgdatadir + "/runtime-rprovides/%s/%s" % (p, pkg)
1680 bb.utils.mkdirhier(os.path.dirname(subdata_sym))
1681 oe.path.symlink("../../runtime/%s" % pkg, subdata_sym, True)
1682
1683 allow_empty = d.getVar('ALLOW_EMPTY_%s' % pkg)
1684 if not allow_empty:
1685 allow_empty = d.getVar('ALLOW_EMPTY')
1686 root = "%s/%s" % (pkgdest, pkg)
1687 os.chdir(root)
1688 g = glob('*')
1689 if g or allow_empty == "1":
1690 # Symlinks needed for reverse lookups (from the final package name)
1691 subdata_sym = pkgdatadir + "/runtime-reverse/%s" % pkgval
1692 oe.path.symlink("../runtime/%s" % pkg, subdata_sym, True)
1693
1694 packagedfile = pkgdatadir + '/runtime/%s.packaged' % pkg
1695 open(packagedfile, 'w').close()
1696
1697 if bb.data.inherits_class('kernel', d) or bb.data.inherits_class('module-base', d):
1698 write_extra_runtime_pkgs(variants, packages, pkgdatadir)
1699
1700 if bb.data.inherits_class('allarch', d) and not variants \
1701 and not bb.data.inherits_class('packagegroup', d):
1702 write_extra_runtime_pkgs(global_variants, packages, pkgdatadir)
1703
1704}
1705emit_pkgdata[dirs] = "${PKGDESTWORK}/runtime ${PKGDESTWORK}/runtime-reverse ${PKGDESTWORK}/runtime-rprovides"
1706
1707ldconfig_postinst_fragment() {
1708if [ x"$D" = "x" ]; then
1709 if [ -x /sbin/ldconfig ]; then /sbin/ldconfig ; fi
1710fi
1711}
1712
1713RPMDEPS = "${STAGING_LIBDIR_NATIVE}/rpm/rpmdeps --alldeps --define '__font_provides %{nil}'"
1714
1715# Collect perfile run-time dependency metadata
1716# Output:
1717# FILERPROVIDESFLIST_pkg - list of all files w/ deps
1718# FILERPROVIDES_filepath_pkg - per file dep
1719#
1720# FILERDEPENDSFLIST_pkg - list of all files w/ deps
1721# FILERDEPENDS_filepath_pkg - per file dep
1722
1723python package_do_filedeps() {
1724 if d.getVar('SKIP_FILEDEPS') == '1':
1725 return
1726
1727 pkgdest = d.getVar('PKGDEST')
1728 packages = d.getVar('PACKAGES')
1729 rpmdeps = d.getVar('RPMDEPS')
1730
1731 def chunks(files, n):
1732 return [files[i:i+n] for i in range(0, len(files), n)]
1733
1734 pkglist = []
1735 for pkg in packages.split():
1736 if d.getVar('SKIP_FILEDEPS_' + pkg) == '1':
1737 continue
1738 if pkg.endswith('-dbg') or pkg.endswith('-doc') or pkg.find('-locale-') != -1 or pkg.find('-localedata-') != -1 or pkg.find('-gconv-') != -1 or pkg.find('-charmap-') != -1 or pkg.startswith('kernel-module-') or pkg.endswith('-src'):
1739 continue
1740 for files in chunks(pkgfiles[pkg], 100):
1741 pkglist.append((pkg, files, rpmdeps, pkgdest))
1742
1743 processed = oe.utils.multiprocess_launch(oe.package.filedeprunner, pkglist, d)
1744
1745 provides_files = {}
1746 requires_files = {}
1747
1748 for result in processed:
1749 (pkg, provides, requires) = result
1750
1751 if pkg not in provides_files:
1752 provides_files[pkg] = []
1753 if pkg not in requires_files:
1754 requires_files[pkg] = []
1755
1756 for file in sorted(provides):
1757 provides_files[pkg].append(file)
1758 key = "FILERPROVIDES_" + file + "_" + pkg
1759 d.appendVar(key, " " + " ".join(provides[file]))
1760
1761 for file in sorted(requires):
1762 requires_files[pkg].append(file)
1763 key = "FILERDEPENDS_" + file + "_" + pkg
1764 d.appendVar(key, " " + " ".join(requires[file]))
1765
1766 for pkg in requires_files:
1767 d.setVar("FILERDEPENDSFLIST_" + pkg, " ".join(requires_files[pkg]))
1768 for pkg in provides_files:
1769 d.setVar("FILERPROVIDESFLIST_" + pkg, " ".join(provides_files[pkg]))
1770}
1771
1772SHLIBSDIRS = "${WORKDIR_PKGDATA}/${MLPREFIX}shlibs2"
1773SHLIBSWORKDIR = "${PKGDESTWORK}/${MLPREFIX}shlibs2"
1774
1775python package_do_shlibs() {
1776 import itertools
1777 import re, pipes
1778 import subprocess
1779
1780 exclude_shlibs = d.getVar('EXCLUDE_FROM_SHLIBS', False)
1781 if exclude_shlibs:
1782 bb.note("not generating shlibs")
1783 return
1784
1785 lib_re = re.compile(r"^.*\.so")
1786 libdir_re = re.compile(r".*/%s$" % d.getVar('baselib'))
1787
1788 packages = d.getVar('PACKAGES')
1789
1790 shlib_pkgs = []
1791 exclusion_list = d.getVar("EXCLUDE_PACKAGES_FROM_SHLIBS")
1792 if exclusion_list:
1793 for pkg in packages.split():
1794 if pkg not in exclusion_list.split():
1795 shlib_pkgs.append(pkg)
1796 else:
1797 bb.note("not generating shlibs for %s" % pkg)
1798 else:
1799 shlib_pkgs = packages.split()
1800
1801 hostos = d.getVar('HOST_OS')
1802
1803 workdir = d.getVar('WORKDIR')
1804
1805 ver = d.getVar('PKGV')
1806 if not ver:
1807 msg = "PKGV not defined"
1808 package_qa_handle_error("pkgv-undefined", msg, d)
1809 return
1810
1811 pkgdest = d.getVar('PKGDEST')
1812
1813 shlibswork_dir = d.getVar('SHLIBSWORKDIR')
1814
1815 def linux_so(file, pkg, pkgver, d):
1816 needs_ldconfig = False
1817 needed = set()
1818 sonames = set()
1819 renames = []
1820 ldir = os.path.dirname(file).replace(pkgdest + "/" + pkg, '')
1821 cmd = d.getVar('OBJDUMP') + " -p " + pipes.quote(file) + " 2>/dev/null"
1822 fd = os.popen(cmd)
1823 lines = fd.readlines()
1824 fd.close()
1825 rpath = tuple()
1826 for l in lines:
1827 m = re.match(r"\s+RPATH\s+([^\s]*)", l)
1828 if m:
1829 rpaths = m.group(1).replace("$ORIGIN", ldir).split(":")
1830 rpath = tuple(map(os.path.normpath, rpaths))
1831 for l in lines:
1832 m = re.match(r"\s+NEEDED\s+([^\s]*)", l)
1833 if m:
1834 dep = m.group(1)
1835 if dep not in needed:
1836 needed.add((dep, file, rpath))
1837 m = re.match(r"\s+SONAME\s+([^\s]*)", l)
1838 if m:
1839 this_soname = m.group(1)
1840 prov = (this_soname, ldir, pkgver)
1841 if not prov in sonames:
1842 # if library is private (only used by package) then do not build shlib for it
1843 import fnmatch
1844 if not private_libs or len([i for i in private_libs if fnmatch.fnmatch(this_soname, i)]) == 0:
1845 sonames.add(prov)
1846 if libdir_re.match(os.path.dirname(file)):
1847 needs_ldconfig = True
1848 if snap_symlinks and (os.path.basename(file) != this_soname):
1849 renames.append((file, os.path.join(os.path.dirname(file), this_soname)))
1850 return (needs_ldconfig, needed, sonames, renames)
1851
1852 def darwin_so(file, needed, sonames, renames, pkgver):
1853 if not os.path.exists(file):
1854 return
1855 ldir = os.path.dirname(file).replace(pkgdest + "/" + pkg, '')
1856
1857 def get_combinations(base):
1858 #
1859 # Given a base library name, find all combinations of this split by "." and "-"
1860 #
1861 combos = []
1862 options = base.split(".")
1863 for i in range(1, len(options) + 1):
1864 combos.append(".".join(options[0:i]))
1865 options = base.split("-")
1866 for i in range(1, len(options) + 1):
1867 combos.append("-".join(options[0:i]))
1868 return combos
1869
1870 if (file.endswith('.dylib') or file.endswith('.so')) and not pkg.endswith('-dev') and not pkg.endswith('-dbg') and not pkg.endswith('-src'):
1871 # Drop suffix
1872 name = os.path.basename(file).rsplit(".",1)[0]
1873 # Find all combinations
1874 combos = get_combinations(name)
1875 for combo in combos:
1876 if not combo in sonames:
1877 prov = (combo, ldir, pkgver)
1878 sonames.add(prov)
1879 if file.endswith('.dylib') or file.endswith('.so'):
1880 rpath = []
1881 p = subprocess.Popen([d.expand("${HOST_PREFIX}otool"), '-l', file], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
1882 out, err = p.communicate()
1883 # If returned successfully, process stdout for results
1884 if p.returncode == 0:
1885 for l in out.split("\n"):
1886 l = l.strip()
1887 if l.startswith('path '):
1888 rpath.append(l.split()[1])
1889
1890 p = subprocess.Popen([d.expand("${HOST_PREFIX}otool"), '-L', file], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
1891 out, err = p.communicate()
1892 # If returned successfully, process stdout for results
1893 if p.returncode == 0:
1894 for l in out.split("\n"):
1895 l = l.strip()
1896 if not l or l.endswith(":"):
1897 continue
1898 if "is not an object file" in l:
1899 continue
1900 name = os.path.basename(l.split()[0]).rsplit(".", 1)[0]
1901 if name and name not in needed[pkg]:
1902 needed[pkg].add((name, file, tuple()))
1903
1904 def mingw_dll(file, needed, sonames, renames, pkgver):
1905 if not os.path.exists(file):
1906 return
1907
1908 if file.endswith(".dll"):
1909 # assume all dlls are shared objects provided by the package
1910 sonames.add((os.path.basename(file), os.path.dirname(file).replace(pkgdest + "/" + pkg, ''), pkgver))
1911
1912 if (file.endswith(".dll") or file.endswith(".exe")):
1913 # use objdump to search for "DLL Name: .*\.dll"
1914 p = subprocess.Popen([d.expand("${HOST_PREFIX}objdump"), "-p", file], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
1915 out, err = p.communicate()
1916 # process the output, grabbing all .dll names
1917 if p.returncode == 0:
1918 for m in re.finditer(r"DLL Name: (.*?\.dll)$", out.decode(), re.MULTILINE | re.IGNORECASE):
1919 dllname = m.group(1)
1920 if dllname:
1921 needed[pkg].add((dllname, file, tuple()))
1922
1923 if d.getVar('PACKAGE_SNAP_LIB_SYMLINKS') == "1":
1924 snap_symlinks = True
1925 else:
1926 snap_symlinks = False
1927
1928 needed = {}
1929
1930 shlib_provider = oe.package.read_shlib_providers(d)
1931
1932 for pkg in shlib_pkgs:
1933 private_libs = d.getVar('PRIVATE_LIBS_' + pkg) or d.getVar('PRIVATE_LIBS') or ""
1934 private_libs = private_libs.split()
1935 needs_ldconfig = False
1936 bb.debug(2, "calculating shlib provides for %s" % pkg)
1937
1938 pkgver = d.getVar('PKGV_' + pkg)
1939 if not pkgver:
1940 pkgver = d.getVar('PV_' + pkg)
1941 if not pkgver:
1942 pkgver = ver
1943
1944 needed[pkg] = set()
1945 sonames = set()
1946 renames = []
1947 linuxlist = []
1948 for file in pkgfiles[pkg]:
1949 soname = None
1950 if cpath.islink(file):
1951 continue
1952 if hostos == "darwin" or hostos == "darwin8":
1953 darwin_so(file, needed, sonames, renames, pkgver)
1954 elif hostos.startswith("mingw"):
1955 mingw_dll(file, needed, sonames, renames, pkgver)
1956 elif os.access(file, os.X_OK) or lib_re.match(file):
1957 linuxlist.append(file)
1958
1959 if linuxlist:
1960 results = oe.utils.multiprocess_launch(linux_so, linuxlist, d, extraargs=(pkg, pkgver, d))
1961 for r in results:
1962 ldconfig = r[0]
1963 needed[pkg] |= r[1]
1964 sonames |= r[2]
1965 renames.extend(r[3])
1966 needs_ldconfig = needs_ldconfig or ldconfig
1967
1968 for (old, new) in renames:
1969 bb.note("Renaming %s to %s" % (old, new))
1970 os.rename(old, new)
1971 pkgfiles[pkg].remove(old)
1972
1973 shlibs_file = os.path.join(shlibswork_dir, pkg + ".list")
1974 if len(sonames):
1975 with open(shlibs_file, 'w') as fd:
1976 for s in sorted(sonames):
1977 if s[0] in shlib_provider and s[1] in shlib_provider[s[0]]:
1978 (old_pkg, old_pkgver) = shlib_provider[s[0]][s[1]]
1979 if old_pkg != pkg:
1980 bb.warn('%s-%s was registered as shlib provider for %s, changing it to %s-%s because it was built later' % (old_pkg, old_pkgver, s[0], pkg, pkgver))
1981 bb.debug(1, 'registering %s-%s as shlib provider for %s' % (pkg, pkgver, s[0]))
1982 fd.write(s[0] + ':' + s[1] + ':' + s[2] + '\n')
1983 if s[0] not in shlib_provider:
1984 shlib_provider[s[0]] = {}
1985 shlib_provider[s[0]][s[1]] = (pkg, pkgver)
1986 if needs_ldconfig:
1987 bb.debug(1, 'adding ldconfig call to postinst for %s' % pkg)
1988 postinst = d.getVar('pkg_postinst_%s' % pkg)
1989 if not postinst:
1990 postinst = '#!/bin/sh\n'
1991 postinst += d.getVar('ldconfig_postinst_fragment')
1992 d.setVar('pkg_postinst_%s' % pkg, postinst)
1993 bb.debug(1, 'LIBNAMES: pkg %s sonames %s' % (pkg, sonames))
1994
1995 assumed_libs = d.getVar('ASSUME_SHLIBS')
1996 if assumed_libs:
1997 libdir = d.getVar("libdir")
1998 for e in assumed_libs.split():
1999 l, dep_pkg = e.split(":")
2000 lib_ver = None
2001 dep_pkg = dep_pkg.rsplit("_", 1)
2002 if len(dep_pkg) == 2:
2003 lib_ver = dep_pkg[1]
2004 dep_pkg = dep_pkg[0]
2005 if l not in shlib_provider:
2006 shlib_provider[l] = {}
2007 shlib_provider[l][libdir] = (dep_pkg, lib_ver)
2008
2009 libsearchpath = [d.getVar('libdir'), d.getVar('base_libdir')]
2010
2011 for pkg in shlib_pkgs:
2012 bb.debug(2, "calculating shlib requirements for %s" % pkg)
2013
2014 private_libs = d.getVar('PRIVATE_LIBS_' + pkg) or d.getVar('PRIVATE_LIBS') or ""
2015 private_libs = private_libs.split()
2016
2017 deps = list()
2018 for n in needed[pkg]:
2019 # if n is in private libraries, don't try to search provider for it
2020 # this could cause problem in case some abc.bb provides private
2021 # /opt/abc/lib/libfoo.so.1 and contains /usr/bin/abc depending on system library libfoo.so.1
2022 # but skipping it is still better alternative than providing own
2023 # version and then adding runtime dependency for the same system library
2024 import fnmatch
2025 if private_libs and len([i for i in private_libs if fnmatch.fnmatch(n[0], i)]) > 0:
2026 bb.debug(2, '%s: Dependency %s covered by PRIVATE_LIBS' % (pkg, n[0]))
2027 continue
2028 if n[0] in shlib_provider.keys():
2029 shlib_provider_map = shlib_provider[n[0]]
2030 matches = set()
2031 for p in itertools.chain(list(n[2]), sorted(shlib_provider_map.keys()), libsearchpath):
2032 if p in shlib_provider_map:
2033 matches.add(p)
2034 if len(matches) > 1:
2035 matchpkgs = ', '.join([shlib_provider_map[match][0] for match in matches])
2036 bb.error("%s: Multiple shlib providers for %s: %s (used by files: %s)" % (pkg, n[0], matchpkgs, n[1]))
2037 elif len(matches) == 1:
2038 (dep_pkg, ver_needed) = shlib_provider_map[matches.pop()]
2039
2040 bb.debug(2, '%s: Dependency %s requires package %s (used by files: %s)' % (pkg, n[0], dep_pkg, n[1]))
2041
2042 if dep_pkg == pkg:
2043 continue
2044
2045 if ver_needed:
2046 dep = "%s (>= %s)" % (dep_pkg, ver_needed)
2047 else:
2048 dep = dep_pkg
2049 if not dep in deps:
2050 deps.append(dep)
2051 continue
2052 bb.note("Couldn't find shared library provider for %s, used by files: %s" % (n[0], n[1]))
2053
2054 deps_file = os.path.join(pkgdest, pkg + ".shlibdeps")
2055 if os.path.exists(deps_file):
2056 os.remove(deps_file)
2057 if deps:
2058 with open(deps_file, 'w') as fd:
2059 for dep in sorted(deps):
2060 fd.write(dep + '\n')
2061}
2062
2063python package_do_pkgconfig () {
2064 import re
2065
2066 packages = d.getVar('PACKAGES')
2067 workdir = d.getVar('WORKDIR')
2068 pkgdest = d.getVar('PKGDEST')
2069
2070 shlibs_dirs = d.getVar('SHLIBSDIRS').split()
2071 shlibswork_dir = d.getVar('SHLIBSWORKDIR')
2072
2073 pc_re = re.compile(r'(.*)\.pc$')
2074 var_re = re.compile(r'(.*)=(.*)')
2075 field_re = re.compile(r'(.*): (.*)')
2076
2077 pkgconfig_provided = {}
2078 pkgconfig_needed = {}
2079 for pkg in packages.split():
2080 pkgconfig_provided[pkg] = []
2081 pkgconfig_needed[pkg] = []
2082 for file in pkgfiles[pkg]:
2083 m = pc_re.match(file)
2084 if m:
2085 pd = bb.data.init()
2086 name = m.group(1)
2087 pkgconfig_provided[pkg].append(name)
2088 if not os.access(file, os.R_OK):
2089 continue
2090 with open(file, 'r') as f:
2091 lines = f.readlines()
2092 for l in lines:
2093 m = var_re.match(l)
2094 if m:
2095 name = m.group(1)
2096 val = m.group(2)
2097 pd.setVar(name, pd.expand(val))
2098 continue
2099 m = field_re.match(l)
2100 if m:
2101 hdr = m.group(1)
2102 exp = pd.expand(m.group(2))
2103 if hdr == 'Requires':
2104 pkgconfig_needed[pkg] += exp.replace(',', ' ').split()
2105
2106 for pkg in packages.split():
2107 pkgs_file = os.path.join(shlibswork_dir, pkg + ".pclist")
2108 if pkgconfig_provided[pkg] != []:
2109 with open(pkgs_file, 'w') as f:
2110 for p in pkgconfig_provided[pkg]:
2111 f.write('%s\n' % p)
2112
2113 # Go from least to most specific since the last one found wins
2114 for dir in reversed(shlibs_dirs):
2115 if not os.path.exists(dir):
2116 continue
2117 for file in sorted(os.listdir(dir)):
2118 m = re.match(r'^(.*)\.pclist$', file)
2119 if m:
2120 pkg = m.group(1)
2121 with open(os.path.join(dir, file)) as fd:
2122 lines = fd.readlines()
2123 pkgconfig_provided[pkg] = []
2124 for l in lines:
2125 pkgconfig_provided[pkg].append(l.rstrip())
2126
2127 for pkg in packages.split():
2128 deps = []
2129 for n in pkgconfig_needed[pkg]:
2130 found = False
2131 for k in pkgconfig_provided.keys():
2132 if n in pkgconfig_provided[k]:
2133 if k != pkg and not (k in deps):
2134 deps.append(k)
2135 found = True
2136 if found == False:
2137 bb.note("couldn't find pkgconfig module '%s' in any package" % n)
2138 deps_file = os.path.join(pkgdest, pkg + ".pcdeps")
2139 if len(deps):
2140 with open(deps_file, 'w') as fd:
2141 for dep in deps:
2142 fd.write(dep + '\n')
2143}
2144
2145def read_libdep_files(d):
2146 pkglibdeps = {}
2147 packages = d.getVar('PACKAGES').split()
2148 for pkg in packages:
2149 pkglibdeps[pkg] = {}
2150 for extension in ".shlibdeps", ".pcdeps", ".clilibdeps":
2151 depsfile = d.expand("${PKGDEST}/" + pkg + extension)
2152 if os.access(depsfile, os.R_OK):
2153 with open(depsfile) as fd:
2154 lines = fd.readlines()
2155 for l in lines:
2156 l.rstrip()
2157 deps = bb.utils.explode_dep_versions2(l)
2158 for dep in deps:
2159 if not dep in pkglibdeps[pkg]:
2160 pkglibdeps[pkg][dep] = deps[dep]
2161 return pkglibdeps
2162
2163python read_shlibdeps () {
2164 pkglibdeps = read_libdep_files(d)
2165
2166 packages = d.getVar('PACKAGES').split()
2167 for pkg in packages:
2168 rdepends = bb.utils.explode_dep_versions2(d.getVar('RDEPENDS_' + pkg) or "")
2169 for dep in sorted(pkglibdeps[pkg]):
2170 # Add the dep if it's not already there, or if no comparison is set
2171 if dep not in rdepends:
2172 rdepends[dep] = []
2173 for v in pkglibdeps[pkg][dep]:
2174 if v not in rdepends[dep]:
2175 rdepends[dep].append(v)
2176 d.setVar('RDEPENDS_' + pkg, bb.utils.join_deps(rdepends, commasep=False))
2177}
2178
2179python package_depchains() {
2180 """
2181 For a given set of prefix and postfix modifiers, make those packages
2182 RRECOMMENDS on the corresponding packages for its RDEPENDS.
2183
2184 Example: If package A depends upon package B, and A's .bb emits an
2185 A-dev package, this would make A-dev Recommends: B-dev.
2186
2187 If only one of a given suffix is specified, it will take the RRECOMMENDS
2188 based on the RDEPENDS of *all* other packages. If more than one of a given
2189 suffix is specified, its will only use the RDEPENDS of the single parent
2190 package.
2191 """
2192
2193 packages = d.getVar('PACKAGES')
2194 postfixes = (d.getVar('DEPCHAIN_POST') or '').split()
2195 prefixes = (d.getVar('DEPCHAIN_PRE') or '').split()
2196
2197 def pkg_adddeprrecs(pkg, base, suffix, getname, depends, d):
2198
2199 #bb.note('depends for %s is %s' % (base, depends))
2200 rreclist = bb.utils.explode_dep_versions2(d.getVar('RRECOMMENDS_' + pkg) or "")
2201
2202 for depend in sorted(depends):
2203 if depend.find('-native') != -1 or depend.find('-cross') != -1 or depend.startswith('virtual/'):
2204 #bb.note("Skipping %s" % depend)
2205 continue
2206 if depend.endswith('-dev'):
2207 depend = depend[:-4]
2208 if depend.endswith('-dbg'):
2209 depend = depend[:-4]
2210 pkgname = getname(depend, suffix)
2211 #bb.note("Adding %s for %s" % (pkgname, depend))
2212 if pkgname not in rreclist and pkgname != pkg:
2213 rreclist[pkgname] = []
2214
2215 #bb.note('setting: RRECOMMENDS_%s=%s' % (pkg, ' '.join(rreclist)))
2216 d.setVar('RRECOMMENDS_%s' % pkg, bb.utils.join_deps(rreclist, commasep=False))
2217
2218 def pkg_addrrecs(pkg, base, suffix, getname, rdepends, d):
2219
2220 #bb.note('rdepends for %s is %s' % (base, rdepends))
2221 rreclist = bb.utils.explode_dep_versions2(d.getVar('RRECOMMENDS_' + pkg) or "")
2222
2223 for depend in sorted(rdepends):
2224 if depend.find('virtual-locale-') != -1:
2225 #bb.note("Skipping %s" % depend)
2226 continue
2227 if depend.endswith('-dev'):
2228 depend = depend[:-4]
2229 if depend.endswith('-dbg'):
2230 depend = depend[:-4]
2231 pkgname = getname(depend, suffix)
2232 #bb.note("Adding %s for %s" % (pkgname, depend))
2233 if pkgname not in rreclist and pkgname != pkg:
2234 rreclist[pkgname] = []
2235
2236 #bb.note('setting: RRECOMMENDS_%s=%s' % (pkg, ' '.join(rreclist)))
2237 d.setVar('RRECOMMENDS_%s' % pkg, bb.utils.join_deps(rreclist, commasep=False))
2238
2239 def add_dep(list, dep):
2240 if dep not in list:
2241 list.append(dep)
2242
2243 depends = []
2244 for dep in bb.utils.explode_deps(d.getVar('DEPENDS') or ""):
2245 add_dep(depends, dep)
2246
2247 rdepends = []
2248 for pkg in packages.split():
2249 for dep in bb.utils.explode_deps(d.getVar('RDEPENDS_' + pkg) or ""):
2250 add_dep(rdepends, dep)
2251
2252 #bb.note('rdepends is %s' % rdepends)
2253
2254 def post_getname(name, suffix):
2255 return '%s%s' % (name, suffix)
2256 def pre_getname(name, suffix):
2257 return '%s%s' % (suffix, name)
2258
2259 pkgs = {}
2260 for pkg in packages.split():
2261 for postfix in postfixes:
2262 if pkg.endswith(postfix):
2263 if not postfix in pkgs:
2264 pkgs[postfix] = {}
2265 pkgs[postfix][pkg] = (pkg[:-len(postfix)], post_getname)
2266
2267 for prefix in prefixes:
2268 if pkg.startswith(prefix):
2269 if not prefix in pkgs:
2270 pkgs[prefix] = {}
2271 pkgs[prefix][pkg] = (pkg[:-len(prefix)], pre_getname)
2272
2273 if "-dbg" in pkgs:
2274 pkglibdeps = read_libdep_files(d)
2275 pkglibdeplist = []
2276 for pkg in pkglibdeps:
2277 for k in pkglibdeps[pkg]:
2278 add_dep(pkglibdeplist, k)
2279 dbgdefaultdeps = ((d.getVar('DEPCHAIN_DBGDEFAULTDEPS') == '1') or (bb.data.inherits_class('packagegroup', d)))
2280
2281 for suffix in pkgs:
2282 for pkg in pkgs[suffix]:
2283 if d.getVarFlag('RRECOMMENDS_' + pkg, 'nodeprrecs'):
2284 continue
2285 (base, func) = pkgs[suffix][pkg]
2286 if suffix == "-dev":
2287 pkg_adddeprrecs(pkg, base, suffix, func, depends, d)
2288 elif suffix == "-dbg":
2289 if not dbgdefaultdeps:
2290 pkg_addrrecs(pkg, base, suffix, func, pkglibdeplist, d)
2291 continue
2292 if len(pkgs[suffix]) == 1:
2293 pkg_addrrecs(pkg, base, suffix, func, rdepends, d)
2294 else:
2295 rdeps = []
2296 for dep in bb.utils.explode_deps(d.getVar('RDEPENDS_' + base) or ""):
2297 add_dep(rdeps, dep)
2298 pkg_addrrecs(pkg, base, suffix, func, rdeps, d)
2299}
2300
2301# Since bitbake can't determine which variables are accessed during package
2302# iteration, we need to list them here:
2303PACKAGEVARS = "FILES RDEPENDS RRECOMMENDS SUMMARY DESCRIPTION RSUGGESTS RPROVIDES RCONFLICTS PKG ALLOW_EMPTY pkg_postinst pkg_postrm pkg_postinst_ontarget INITSCRIPT_NAME INITSCRIPT_PARAMS DEBIAN_NOAUTONAME ALTERNATIVE PKGE PKGV PKGR USERADD_PARAM GROUPADD_PARAM CONFFILES SYSTEMD_SERVICE LICENSE SECTION pkg_preinst pkg_prerm RREPLACES GROUPMEMS_PARAM SYSTEMD_AUTO_ENABLE SKIP_FILEDEPS PRIVATE_LIBS PACKAGE_ADD_METADATA"
2304
2305def gen_packagevar(d, pkgvars="PACKAGEVARS"):
2306 ret = []
2307 pkgs = (d.getVar("PACKAGES") or "").split()
2308 vars = (d.getVar(pkgvars) or "").split()
2309 for v in vars:
2310 ret.append(v)
2311 for p in pkgs:
2312 for v in vars:
2313 ret.append(v + "_" + p)
2314
2315 # Ensure that changes to INCOMPATIBLE_LICENSE re-run do_package for
2316 # affected recipes.
2317 ret.append('LICENSE_EXCLUSION-%s' % p)
2318 return " ".join(ret)
2319
2320PACKAGE_PREPROCESS_FUNCS ?= ""
2321# Functions for setting up PKGD
2322PACKAGEBUILDPKGD ?= " \
2323 package_prepare_pkgdata \
2324 perform_packagecopy \
2325 ${PACKAGE_PREPROCESS_FUNCS} \
2326 split_and_strip_files \
2327 fixup_perms \
2328 "
2329# Functions which split PKGD up into separate packages
2330PACKAGESPLITFUNCS ?= " \
2331 package_do_split_locales \
2332 populate_packages"
2333# Functions which process metadata based on split packages
2334PACKAGEFUNCS += " \
2335 package_fixsymlinks \
2336 package_name_hook \
2337 package_do_filedeps \
2338 package_do_shlibs \
2339 package_do_pkgconfig \
2340 read_shlibdeps \
2341 package_depchains \
2342 emit_pkgdata"
2343
2344python do_package () {
2345 # Change the following version to cause sstate to invalidate the package
2346 # cache. This is useful if an item this class depends on changes in a
2347 # way that the output of this class changes. rpmdeps is a good example
2348 # as any change to rpmdeps requires this to be rerun.
2349 # PACKAGE_BBCLASS_VERSION = "4"
2350
2351 # Init cachedpath
2352 global cpath
2353 cpath = oe.cachedpath.CachedPath()
2354
2355 ###########################################################################
2356 # Sanity test the setup
2357 ###########################################################################
2358
2359 packages = (d.getVar('PACKAGES') or "").split()
2360 if len(packages) < 1:
2361 bb.debug(1, "No packages to build, skipping do_package")
2362 return
2363
2364 workdir = d.getVar('WORKDIR')
2365 outdir = d.getVar('DEPLOY_DIR')
2366 dest = d.getVar('D')
2367 dvar = d.getVar('PKGD')
2368 pn = d.getVar('PN')
2369
2370 if not workdir or not outdir or not dest or not dvar or not pn:
2371 msg = "WORKDIR, DEPLOY_DIR, D, PN and PKGD all must be defined, unable to package"
2372 package_qa_handle_error("var-undefined", msg, d)
2373 return
2374
2375 bb.build.exec_func("package_convert_pr_autoinc", d)
2376
2377 ###########################################################################
2378 # Optimisations
2379 ###########################################################################
2380
2381 # Continually expanding complex expressions is inefficient, particularly
2382 # when we write to the datastore and invalidate the expansion cache. This
2383 # code pre-expands some frequently used variables
2384
2385 def expandVar(x, d):
2386 d.setVar(x, d.getVar(x))
2387
2388 for x in 'PN', 'PV', 'BPN', 'TARGET_SYS', 'EXTENDPRAUTO':
2389 expandVar(x, d)
2390
2391 ###########################################################################
2392 # Setup PKGD (from D)
2393 ###########################################################################
2394
2395 for f in (d.getVar('PACKAGEBUILDPKGD') or '').split():
2396 bb.build.exec_func(f, d)
2397
2398 ###########################################################################
2399 # Split up PKGD into PKGDEST
2400 ###########################################################################
2401
2402 cpath = oe.cachedpath.CachedPath()
2403
2404 for f in (d.getVar('PACKAGESPLITFUNCS') or '').split():
2405 bb.build.exec_func(f, d)
2406
2407 ###########################################################################
2408 # Process PKGDEST
2409 ###########################################################################
2410
2411 # Build global list of files in each split package
2412 global pkgfiles
2413 pkgfiles = {}
2414 packages = d.getVar('PACKAGES').split()
2415 pkgdest = d.getVar('PKGDEST')
2416 for pkg in packages:
2417 pkgfiles[pkg] = []
2418 for walkroot, dirs, files in cpath.walk(pkgdest + "/" + pkg):
2419 for file in files:
2420 pkgfiles[pkg].append(walkroot + os.sep + file)
2421
2422 for f in (d.getVar('PACKAGEFUNCS') or '').split():
2423 bb.build.exec_func(f, d)
2424
2425 qa_sane = d.getVar("QA_SANE")
2426 if not qa_sane:
2427 bb.fatal("Fatal QA errors found, failing task.")
2428}
2429
2430do_package[dirs] = "${SHLIBSWORKDIR} ${PKGDESTWORK} ${D}"
2431do_package[vardeps] += "${PACKAGEBUILDPKGD} ${PACKAGESPLITFUNCS} ${PACKAGEFUNCS} ${@gen_packagevar(d)}"
2432addtask package after do_install
2433
2434SSTATETASKS += "do_package"
2435do_package[cleandirs] = "${PKGDEST} ${PKGDESTWORK}"
2436do_package[sstate-plaindirs] = "${PKGD} ${PKGDEST} ${PKGDESTWORK}"
2437do_package_setscene[dirs] = "${STAGING_DIR}"
2438
2439python do_package_setscene () {
2440 sstate_setscene(d)
2441}
2442addtask do_package_setscene
2443
2444# Copy from PKGDESTWORK to tempdirectory as tempdirectory can be cleaned at both
2445# do_package_setscene and do_packagedata_setscene leading to races
2446python do_packagedata () {
2447 bb.build.exec_func("package_get_auto_pr", d)
2448
2449 src = d.expand("${PKGDESTWORK}")
2450 dest = d.expand("${WORKDIR}/pkgdata-pdata-input")
2451 oe.path.copyhardlinktree(src, dest)
2452
2453 bb.build.exec_func("packagedata_translate_pr_autoinc", d)
2454}
2455do_packagedata[cleandirs] += "${WORKDIR}/pkgdata-pdata-input"
2456
2457# Translate the EXTENDPRAUTO and AUTOINC to the final values
2458packagedata_translate_pr_autoinc() {
2459 find ${WORKDIR}/pkgdata-pdata-input -type f | xargs --no-run-if-empty \
2460 sed -e 's,@PRSERV_PV_AUTOINC@,${PRSERV_PV_AUTOINC},g' \
2461 -e 's,@EXTENDPRAUTO@,${EXTENDPRAUTO},g' -i
2462}
2463
2464addtask packagedata before do_build after do_package
2465
2466SSTATETASKS += "do_packagedata"
2467do_packagedata[sstate-inputdirs] = "${WORKDIR}/pkgdata-pdata-input"
2468do_packagedata[sstate-outputdirs] = "${PKGDATA_DIR}"
2469do_packagedata[stamp-extra-info] = "${MACHINE_ARCH}"
2470
2471python do_packagedata_setscene () {
2472 sstate_setscene(d)
2473}
2474addtask do_packagedata_setscene
2475
2476#
2477# Helper functions for the package writing classes
2478#
2479
2480def mapping_rename_hook(d):
2481 """
2482 Rewrite variables to account for package renaming in things
2483 like debian.bbclass or manual PKG variable name changes
2484 """
2485 pkg = d.getVar("PKG")
2486 runtime_mapping_rename("RDEPENDS", pkg, d)
2487 runtime_mapping_rename("RRECOMMENDS", pkg, d)
2488 runtime_mapping_rename("RSUGGESTS", pkg, d)
diff --git a/meta/classes/package_deb.bbclass b/meta/classes/package_deb.bbclass
deleted file mode 100644
index b3d8ce330e..0000000000
--- a/meta/classes/package_deb.bbclass
+++ /dev/null
@@ -1,324 +0,0 @@
1#
2# Copyright 2006-2008 OpenedHand Ltd.
3#
4
5inherit package
6
7IMAGE_PKGTYPE ?= "deb"
8
9DPKG_BUILDCMD ??= "dpkg-deb"
10
11DPKG_ARCH ?= "${@debian_arch_map(d.getVar('TARGET_ARCH'), d.getVar('TUNE_FEATURES'))}"
12DPKG_ARCH[vardepvalue] = "${DPKG_ARCH}"
13
14PKGWRITEDIRDEB = "${WORKDIR}/deploy-debs"
15
16APTCONF_TARGET = "${WORKDIR}"
17
18APT_ARGS = "${@['', '--no-install-recommends'][d.getVar("NO_RECOMMENDATIONS") == "1"]}"
19
20def debian_arch_map(arch, tune):
21 tune_features = tune.split()
22 if arch == "allarch":
23 return "all"
24 if arch in ["i586", "i686"]:
25 return "i386"
26 if arch == "x86_64":
27 if "mx32" in tune_features:
28 return "x32"
29 return "amd64"
30 if arch.startswith("mips"):
31 endian = ["el", ""]["bigendian" in tune_features]
32 if "n64" in tune_features:
33 return "mips64" + endian
34 if "n32" in tune_features:
35 return "mipsn32" + endian
36 return "mips" + endian
37 if arch == "powerpc":
38 return arch + ["", "spe"]["spe" in tune_features]
39 if arch == "aarch64":
40 return "arm64"
41 if arch == "arm":
42 return arch + ["el", "hf"]["callconvention-hard" in tune_features]
43 return arch
44
45python do_package_deb () {
46 packages = d.getVar('PACKAGES')
47 if not packages:
48 bb.debug(1, "PACKAGES not defined, nothing to package")
49 return
50
51 tmpdir = d.getVar('TMPDIR')
52 if os.access(os.path.join(tmpdir, "stamps", "DEB_PACKAGE_INDEX_CLEAN"),os.R_OK):
53 os.unlink(os.path.join(tmpdir, "stamps", "DEB_PACKAGE_INDEX_CLEAN"))
54
55 oe.utils.multiprocess_launch(deb_write_pkg, packages.split(), d, extraargs=(d,))
56}
57do_package_deb[vardeps] += "deb_write_pkg"
58do_package_deb[vardepsexclude] = "BB_NUMBER_THREADS"
59
60def deb_write_pkg(pkg, d):
61 import re, copy
62 import textwrap
63 import subprocess
64 import collections
65 import codecs
66
67 outdir = d.getVar('PKGWRITEDIRDEB')
68 pkgdest = d.getVar('PKGDEST')
69
70 def cleanupcontrol(root):
71 for p in ['CONTROL', 'DEBIAN']:
72 p = os.path.join(root, p)
73 if os.path.exists(p):
74 bb.utils.prunedir(p)
75
76 localdata = bb.data.createCopy(d)
77 root = "%s/%s" % (pkgdest, pkg)
78
79 lf = bb.utils.lockfile(root + ".lock")
80 try:
81
82 localdata.setVar('ROOT', '')
83 localdata.setVar('ROOT_%s' % pkg, root)
84 pkgname = localdata.getVar('PKG_%s' % pkg)
85 if not pkgname:
86 pkgname = pkg
87 localdata.setVar('PKG', pkgname)
88
89 localdata.setVar('OVERRIDES', d.getVar("OVERRIDES", False) + ":" + pkg)
90
91 basedir = os.path.join(os.path.dirname(root))
92
93 pkgoutdir = os.path.join(outdir, localdata.getVar('PACKAGE_ARCH'))
94 bb.utils.mkdirhier(pkgoutdir)
95
96 os.chdir(root)
97 cleanupcontrol(root)
98 from glob import glob
99 g = glob('*')
100 if not g and localdata.getVar('ALLOW_EMPTY', False) != "1":
101 bb.note("Not creating empty archive for %s-%s-%s" % (pkg, localdata.getVar('PKGV'), localdata.getVar('PKGR')))
102 return
103
104 controldir = os.path.join(root, 'DEBIAN')
105 bb.utils.mkdirhier(controldir)
106 os.chmod(controldir, 0o755)
107
108 ctrlfile = codecs.open(os.path.join(controldir, 'control'), 'w', 'utf-8')
109
110 fields = []
111 pe = d.getVar('PKGE')
112 if pe and int(pe) > 0:
113 fields.append(["Version: %s:%s-%s\n", ['PKGE', 'PKGV', 'PKGR']])
114 else:
115 fields.append(["Version: %s-%s\n", ['PKGV', 'PKGR']])
116 fields.append(["Description: %s\n", ['DESCRIPTION']])
117 fields.append(["Section: %s\n", ['SECTION']])
118 fields.append(["Priority: %s\n", ['PRIORITY']])
119 fields.append(["Maintainer: %s\n", ['MAINTAINER']])
120 fields.append(["Architecture: %s\n", ['DPKG_ARCH']])
121 fields.append(["OE: %s\n", ['PN']])
122 fields.append(["PackageArch: %s\n", ['PACKAGE_ARCH']])
123 if d.getVar('HOMEPAGE'):
124 fields.append(["Homepage: %s\n", ['HOMEPAGE']])
125
126 # Package, Version, Maintainer, Description - mandatory
127 # Section, Priority, Essential, Architecture, Source, Depends, Pre-Depends, Recommends, Suggests, Conflicts, Replaces, Provides - Optional
128
129
130 def pullData(l, d):
131 l2 = []
132 for i in l:
133 data = d.getVar(i)
134 if data is None:
135 raise KeyError(i)
136 if i == 'DPKG_ARCH' and d.getVar('PACKAGE_ARCH') == 'all':
137 data = 'all'
138 elif i == 'PACKAGE_ARCH' or i == 'DPKG_ARCH':
139 # The params in deb package control don't allow character
140 # `_', so change the arch's `_' to `-'. Such as `x86_64'
141 # -->`x86-64'
142 data = data.replace('_', '-')
143 l2.append(data)
144 return l2
145
146 ctrlfile.write("Package: %s\n" % pkgname)
147 if d.getVar('PACKAGE_ARCH') == "all":
148 ctrlfile.write("Multi-Arch: foreign\n")
149 # check for required fields
150 for (c, fs) in fields:
151 # Special behavior for description...
152 if 'DESCRIPTION' in fs:
153 summary = localdata.getVar('SUMMARY') or localdata.getVar('DESCRIPTION') or "."
154 ctrlfile.write('Description: %s\n' % summary)
155 description = localdata.getVar('DESCRIPTION') or "."
156 description = textwrap.dedent(description).strip()
157 if '\\n' in description:
158 # Manually indent
159 for t in description.split('\\n'):
160 ctrlfile.write(' %s\n' % (t.strip() or '.'))
161 else:
162 # Auto indent
163 ctrlfile.write('%s\n' % textwrap.fill(description.strip(), width=74, initial_indent=' ', subsequent_indent=' '))
164
165 else:
166 ctrlfile.write(c % tuple(pullData(fs, localdata)))
167
168 # more fields
169
170 custom_fields_chunk = get_package_additional_metadata("deb", localdata)
171 if custom_fields_chunk:
172 ctrlfile.write(custom_fields_chunk)
173 ctrlfile.write("\n")
174
175 mapping_rename_hook(localdata)
176
177 def debian_cmp_remap(var):
178 # dpkg does not allow for '(', ')' or ':' in a dependency name
179 # Replace any instances of them with '__'
180 #
181 # In debian '>' and '<' do not mean what it appears they mean
182 # '<' = less or equal
183 # '>' = greater or equal
184 # adjust these to the '<<' and '>>' equivalents
185 #
186 for dep in list(var.keys()):
187 if '(' in dep or '/' in dep:
188 newdep = re.sub(r'[(:)/]', '__', dep)
189 if newdep.startswith("__"):
190 newdep = "A" + newdep
191 if newdep != dep:
192 var[newdep] = var[dep]
193 del var[dep]
194 for dep in var:
195 for i, v in enumerate(var[dep]):
196 if (v or "").startswith("< "):
197 var[dep][i] = var[dep][i].replace("< ", "<< ")
198 elif (v or "").startswith("> "):
199 var[dep][i] = var[dep][i].replace("> ", ">> ")
200
201 rdepends = bb.utils.explode_dep_versions2(localdata.getVar("RDEPENDS") or "")
202 debian_cmp_remap(rdepends)
203 for dep in list(rdepends.keys()):
204 if dep == pkg:
205 del rdepends[dep]
206 continue
207 if '*' in dep:
208 del rdepends[dep]
209 rrecommends = bb.utils.explode_dep_versions2(localdata.getVar("RRECOMMENDS") or "")
210 debian_cmp_remap(rrecommends)
211 for dep in list(rrecommends.keys()):
212 if '*' in dep:
213 del rrecommends[dep]
214 rsuggests = bb.utils.explode_dep_versions2(localdata.getVar("RSUGGESTS") or "")
215 debian_cmp_remap(rsuggests)
216 # Deliberately drop version information here, not wanted/supported by deb
217 rprovides = dict.fromkeys(bb.utils.explode_dep_versions2(localdata.getVar("RPROVIDES") or ""), [])
218 # Remove file paths if any from rprovides, debian does not support custom providers
219 for key in list(rprovides.keys()):
220 if key.startswith('/'):
221 del rprovides[key]
222 rprovides = collections.OrderedDict(sorted(rprovides.items(), key=lambda x: x[0]))
223 debian_cmp_remap(rprovides)
224 rreplaces = bb.utils.explode_dep_versions2(localdata.getVar("RREPLACES") or "")
225 debian_cmp_remap(rreplaces)
226 rconflicts = bb.utils.explode_dep_versions2(localdata.getVar("RCONFLICTS") or "")
227 debian_cmp_remap(rconflicts)
228 if rdepends:
229 ctrlfile.write("Depends: %s\n" % bb.utils.join_deps(rdepends))
230 if rsuggests:
231 ctrlfile.write("Suggests: %s\n" % bb.utils.join_deps(rsuggests))
232 if rrecommends:
233 ctrlfile.write("Recommends: %s\n" % bb.utils.join_deps(rrecommends))
234 if rprovides:
235 ctrlfile.write("Provides: %s\n" % bb.utils.join_deps(rprovides))
236 if rreplaces:
237 ctrlfile.write("Replaces: %s\n" % bb.utils.join_deps(rreplaces))
238 if rconflicts:
239 ctrlfile.write("Conflicts: %s\n" % bb.utils.join_deps(rconflicts))
240 ctrlfile.close()
241
242 for script in ["preinst", "postinst", "prerm", "postrm"]:
243 scriptvar = localdata.getVar('pkg_%s' % script)
244 if not scriptvar:
245 continue
246 scriptvar = scriptvar.strip()
247 scriptfile = open(os.path.join(controldir, script), 'w')
248
249 if scriptvar.startswith("#!"):
250 pos = scriptvar.find("\n") + 1
251 scriptfile.write(scriptvar[:pos])
252 else:
253 pos = 0
254 scriptfile.write("#!/bin/sh\n")
255
256 # Prevent the prerm/postrm scripts from being run during an upgrade
257 if script in ('prerm', 'postrm'):
258 scriptfile.write('[ "$1" != "upgrade" ] || exit 0\n')
259
260 scriptfile.write(scriptvar[pos:])
261 scriptfile.write('\n')
262 scriptfile.close()
263 os.chmod(os.path.join(controldir, script), 0o755)
264
265 conffiles_str = ' '.join(get_conffiles(pkg, d))
266 if conffiles_str:
267 conffiles = open(os.path.join(controldir, 'conffiles'), 'w')
268 for f in conffiles_str.split():
269 if os.path.exists(oe.path.join(root, f)):
270 conffiles.write('%s\n' % f)
271 conffiles.close()
272
273 os.chdir(basedir)
274 subprocess.check_output("PATH=\"%s\" %s -b %s %s" % (localdata.getVar("PATH"), localdata.getVar("DPKG_BUILDCMD"),
275 root, pkgoutdir),
276 stderr=subprocess.STDOUT,
277 shell=True)
278
279 finally:
280 cleanupcontrol(root)
281 bb.utils.unlockfile(lf)
282
283# Otherwise allarch packages may change depending on override configuration
284deb_write_pkg[vardepsexclude] = "OVERRIDES"
285
286# Have to list any variables referenced as X_<pkg> that aren't in pkgdata here
287DEBEXTRAVARS = "PKGV PKGR PKGV DESCRIPTION SECTION PRIORITY MAINTAINER DPKG_ARCH PN HOMEPAGE PACKAGE_ADD_METADATA_DEB"
288do_package_write_deb[vardeps] += "${@gen_packagevar(d, 'DEBEXTRAVARS')}"
289
290SSTATETASKS += "do_package_write_deb"
291do_package_write_deb[sstate-inputdirs] = "${PKGWRITEDIRDEB}"
292do_package_write_deb[sstate-outputdirs] = "${DEPLOY_DIR_DEB}"
293
294python do_package_write_deb_setscene () {
295 tmpdir = d.getVar('TMPDIR')
296
297 if os.access(os.path.join(tmpdir, "stamps", "DEB_PACKAGE_INDEX_CLEAN"),os.R_OK):
298 os.unlink(os.path.join(tmpdir, "stamps", "DEB_PACKAGE_INDEX_CLEAN"))
299
300 sstate_setscene(d)
301}
302addtask do_package_write_deb_setscene
303
304python () {
305 if d.getVar('PACKAGES') != '':
306 deps = ' dpkg-native:do_populate_sysroot virtual/fakeroot-native:do_populate_sysroot'
307 d.appendVarFlag('do_package_write_deb', 'depends', deps)
308 d.setVarFlag('do_package_write_deb', 'fakeroot', "1")
309}
310
311python do_package_write_deb () {
312 bb.build.exec_func("read_subpackage_metadata", d)
313 bb.build.exec_func("do_package_deb", d)
314}
315do_package_write_deb[dirs] = "${PKGWRITEDIRDEB}"
316do_package_write_deb[cleandirs] = "${PKGWRITEDIRDEB}"
317do_package_write_deb[depends] += "${@oe.utils.build_depends_string(d.getVar('PACKAGE_WRITE_DEPS'), 'do_populate_sysroot')}"
318addtask package_write_deb after do_packagedata do_package
319
320
321PACKAGEINDEXDEPS += "dpkg-native:do_populate_sysroot"
322PACKAGEINDEXDEPS += "apt-native:do_populate_sysroot"
323
324do_build[recrdeptask] += "do_package_write_deb"
diff --git a/meta/classes/package_ipk.bbclass b/meta/classes/package_ipk.bbclass
deleted file mode 100644
index 600b3ac90c..0000000000
--- a/meta/classes/package_ipk.bbclass
+++ /dev/null
@@ -1,282 +0,0 @@
1inherit package
2
3IMAGE_PKGTYPE ?= "ipk"
4
5IPKGCONF_TARGET = "${WORKDIR}/opkg.conf"
6IPKGCONF_SDK = "${WORKDIR}/opkg-sdk.conf"
7IPKGCONF_SDK_TARGET = "${WORKDIR}/opkg-sdk-target.conf"
8
9PKGWRITEDIRIPK = "${WORKDIR}/deploy-ipks"
10
11# Program to be used to build opkg packages
12OPKGBUILDCMD ??= 'opkg-build -Z xz -a "${XZ_DEFAULTS}"'
13
14OPKG_ARGS += "--force_postinstall --prefer-arch-to-version"
15OPKG_ARGS += "${@['', '--no-install-recommends'][d.getVar("NO_RECOMMENDATIONS") == "1"]}"
16OPKG_ARGS += "${@['', '--add-exclude ' + ' --add-exclude '.join((d.getVar('PACKAGE_EXCLUDE') or "").split())][(d.getVar("PACKAGE_EXCLUDE") or "").strip() != ""]}"
17
18OPKGLIBDIR ??= "${localstatedir}/lib"
19
20python do_package_ipk () {
21 workdir = d.getVar('WORKDIR')
22 outdir = d.getVar('PKGWRITEDIRIPK')
23 tmpdir = d.getVar('TMPDIR')
24 pkgdest = d.getVar('PKGDEST')
25 if not workdir or not outdir or not tmpdir:
26 bb.error("Variables incorrectly set, unable to package")
27 return
28
29 packages = d.getVar('PACKAGES')
30 if not packages or packages == '':
31 bb.debug(1, "No packages; nothing to do")
32 return
33
34 # We're about to add new packages so the index needs to be checked
35 # so remove the appropriate stamp file.
36 if os.access(os.path.join(tmpdir, "stamps", "IPK_PACKAGE_INDEX_CLEAN"), os.R_OK):
37 os.unlink(os.path.join(tmpdir, "stamps", "IPK_PACKAGE_INDEX_CLEAN"))
38
39 oe.utils.multiprocess_launch(ipk_write_pkg, packages.split(), d, extraargs=(d,))
40}
41do_package_ipk[vardeps] += "ipk_write_pkg"
42do_package_ipk[vardepsexclude] = "BB_NUMBER_THREADS"
43
44def ipk_write_pkg(pkg, d):
45 import re, copy
46 import subprocess
47 import textwrap
48 import collections
49 import glob
50
51 def cleanupcontrol(root):
52 for p in ['CONTROL', 'DEBIAN']:
53 p = os.path.join(root, p)
54 if os.path.exists(p):
55 bb.utils.prunedir(p)
56
57 outdir = d.getVar('PKGWRITEDIRIPK')
58 pkgdest = d.getVar('PKGDEST')
59 recipesource = os.path.basename(d.getVar('FILE'))
60
61 localdata = bb.data.createCopy(d)
62 root = "%s/%s" % (pkgdest, pkg)
63
64 lf = bb.utils.lockfile(root + ".lock")
65 try:
66 localdata.setVar('ROOT', '')
67 localdata.setVar('ROOT_%s' % pkg, root)
68 pkgname = localdata.getVar('PKG_%s' % pkg)
69 if not pkgname:
70 pkgname = pkg
71 localdata.setVar('PKG', pkgname)
72
73 localdata.setVar('OVERRIDES', d.getVar("OVERRIDES", False) + ":" + pkg)
74
75 basedir = os.path.join(os.path.dirname(root))
76 arch = localdata.getVar('PACKAGE_ARCH')
77
78 if localdata.getVar('IPK_HIERARCHICAL_FEED', False) == "1":
79 # Spread packages across subdirectories so each isn't too crowded
80 if pkgname.startswith('lib'):
81 pkg_prefix = 'lib' + pkgname[3]
82 else:
83 pkg_prefix = pkgname[0]
84
85 # Keep -dbg, -dev, -doc, -staticdev, -locale and -locale-* packages
86 # together. These package suffixes are taken from the definitions of
87 # PACKAGES and PACKAGES_DYNAMIC in meta/conf/bitbake.conf
88 if pkgname[-4:] in ('-dbg', '-dev', '-doc'):
89 pkg_subdir = pkgname[:-4]
90 elif pkgname.endswith('-staticdev'):
91 pkg_subdir = pkgname[:-10]
92 elif pkgname.endswith('-locale'):
93 pkg_subdir = pkgname[:-7]
94 elif '-locale-' in pkgname:
95 pkg_subdir = pkgname[:pkgname.find('-locale-')]
96 else:
97 pkg_subdir = pkgname
98
99 pkgoutdir = "%s/%s/%s/%s" % (outdir, arch, pkg_prefix, pkg_subdir)
100 else:
101 pkgoutdir = "%s/%s" % (outdir, arch)
102
103 bb.utils.mkdirhier(pkgoutdir)
104 os.chdir(root)
105 cleanupcontrol(root)
106 g = glob.glob('*')
107 if not g and localdata.getVar('ALLOW_EMPTY', False) != "1":
108 bb.note("Not creating empty archive for %s-%s-%s" % (pkg, localdata.getVar('PKGV'), localdata.getVar('PKGR')))
109 return
110
111 controldir = os.path.join(root, 'CONTROL')
112 bb.utils.mkdirhier(controldir)
113 ctrlfile = open(os.path.join(controldir, 'control'), 'w')
114
115 fields = []
116 pe = d.getVar('PKGE')
117 if pe and int(pe) > 0:
118 fields.append(["Version: %s:%s-%s\n", ['PKGE', 'PKGV', 'PKGR']])
119 else:
120 fields.append(["Version: %s-%s\n", ['PKGV', 'PKGR']])
121 fields.append(["Description: %s\n", ['DESCRIPTION']])
122 fields.append(["Section: %s\n", ['SECTION']])
123 fields.append(["Priority: %s\n", ['PRIORITY']])
124 fields.append(["Maintainer: %s\n", ['MAINTAINER']])
125 fields.append(["License: %s\n", ['LICENSE']])
126 fields.append(["Architecture: %s\n", ['PACKAGE_ARCH']])
127 fields.append(["OE: %s\n", ['PN']])
128 if d.getVar('HOMEPAGE'):
129 fields.append(["Homepage: %s\n", ['HOMEPAGE']])
130
131 def pullData(l, d):
132 l2 = []
133 for i in l:
134 l2.append(d.getVar(i))
135 return l2
136
137 ctrlfile.write("Package: %s\n" % pkgname)
138 # check for required fields
139 for (c, fs) in fields:
140 for f in fs:
141 if localdata.getVar(f, False) is None:
142 raise KeyError(f)
143 # Special behavior for description...
144 if 'DESCRIPTION' in fs:
145 summary = localdata.getVar('SUMMARY') or localdata.getVar('DESCRIPTION') or "."
146 ctrlfile.write('Description: %s\n' % summary)
147 description = localdata.getVar('DESCRIPTION') or "."
148 description = textwrap.dedent(description).strip()
149 if '\\n' in description:
150 # Manually indent: multiline description includes a leading space
151 for t in description.split('\\n'):
152 ctrlfile.write(' %s\n' % (t.strip() or ' .'))
153 else:
154 # Auto indent
155 ctrlfile.write('%s\n' % textwrap.fill(description, width=74, initial_indent=' ', subsequent_indent=' '))
156 else:
157 ctrlfile.write(c % tuple(pullData(fs, localdata)))
158
159 custom_fields_chunk = get_package_additional_metadata("ipk", localdata)
160 if custom_fields_chunk is not None:
161 ctrlfile.write(custom_fields_chunk)
162 ctrlfile.write("\n")
163
164 mapping_rename_hook(localdata)
165
166 def debian_cmp_remap(var):
167 # In debian '>' and '<' do not mean what it appears they mean
168 # '<' = less or equal
169 # '>' = greater or equal
170 # adjust these to the '<<' and '>>' equivalents
171 #
172 for dep in var:
173 for i, v in enumerate(var[dep]):
174 if (v or "").startswith("< "):
175 var[dep][i] = var[dep][i].replace("< ", "<< ")
176 elif (v or "").startswith("> "):
177 var[dep][i] = var[dep][i].replace("> ", ">> ")
178
179 rdepends = bb.utils.explode_dep_versions2(localdata.getVar("RDEPENDS") or "")
180 debian_cmp_remap(rdepends)
181 rrecommends = bb.utils.explode_dep_versions2(localdata.getVar("RRECOMMENDS") or "")
182 debian_cmp_remap(rrecommends)
183 rsuggests = bb.utils.explode_dep_versions2(localdata.getVar("RSUGGESTS") or "")
184 debian_cmp_remap(rsuggests)
185 # Deliberately drop version information here, not wanted/supported by ipk
186 rprovides = dict.fromkeys(bb.utils.explode_dep_versions2(localdata.getVar("RPROVIDES") or ""), [])
187 rprovides = collections.OrderedDict(sorted(rprovides.items(), key=lambda x: x[0]))
188 debian_cmp_remap(rprovides)
189 rreplaces = bb.utils.explode_dep_versions2(localdata.getVar("RREPLACES") or "")
190 debian_cmp_remap(rreplaces)
191 rconflicts = bb.utils.explode_dep_versions2(localdata.getVar("RCONFLICTS") or "")
192 debian_cmp_remap(rconflicts)
193
194 if rdepends:
195 ctrlfile.write("Depends: %s\n" % bb.utils.join_deps(rdepends))
196 if rsuggests:
197 ctrlfile.write("Suggests: %s\n" % bb.utils.join_deps(rsuggests))
198 if rrecommends:
199 ctrlfile.write("Recommends: %s\n" % bb.utils.join_deps(rrecommends))
200 if rprovides:
201 ctrlfile.write("Provides: %s\n" % bb.utils.join_deps(rprovides))
202 if rreplaces:
203 ctrlfile.write("Replaces: %s\n" % bb.utils.join_deps(rreplaces))
204 if rconflicts:
205 ctrlfile.write("Conflicts: %s\n" % bb.utils.join_deps(rconflicts))
206 ctrlfile.write("Source: %s\n" % recipesource)
207 ctrlfile.close()
208
209 for script in ["preinst", "postinst", "prerm", "postrm"]:
210 scriptvar = localdata.getVar('pkg_%s' % script)
211 if not scriptvar:
212 continue
213 scriptfile = open(os.path.join(controldir, script), 'w')
214 scriptfile.write(scriptvar)
215 scriptfile.close()
216 os.chmod(os.path.join(controldir, script), 0o755)
217
218 conffiles_str = ' '.join(get_conffiles(pkg, d))
219 if conffiles_str:
220 conffiles = open(os.path.join(controldir, 'conffiles'), 'w')
221 for f in conffiles_str.split():
222 if os.path.exists(oe.path.join(root, f)):
223 conffiles.write('%s\n' % f)
224 conffiles.close()
225
226 os.chdir(basedir)
227 subprocess.check_output("PATH=\"%s\" %s %s %s" % (localdata.getVar("PATH"),
228 d.getVar("OPKGBUILDCMD"), pkg, pkgoutdir),
229 stderr=subprocess.STDOUT,
230 shell=True)
231
232 if d.getVar('IPK_SIGN_PACKAGES') == '1':
233 ipkver = "%s-%s" % (d.getVar('PKGV'), d.getVar('PKGR'))
234 ipk_to_sign = "%s/%s_%s_%s.ipk" % (pkgoutdir, pkgname, ipkver, d.getVar('PACKAGE_ARCH'))
235 sign_ipk(d, ipk_to_sign)
236
237 finally:
238 cleanupcontrol(root)
239 bb.utils.unlockfile(lf)
240
241# Have to list any variables referenced as X_<pkg> that aren't in pkgdata here
242IPKEXTRAVARS = "PRIORITY MAINTAINER PACKAGE_ARCH HOMEPAGE PACKAGE_ADD_METADATA_IPK"
243ipk_write_pkg[vardeps] += "${@gen_packagevar(d, 'IPKEXTRAVARS')}"
244
245# Otherwise allarch packages may change depending on override configuration
246ipk_write_pkg[vardepsexclude] = "OVERRIDES"
247
248
249SSTATETASKS += "do_package_write_ipk"
250do_package_write_ipk[sstate-inputdirs] = "${PKGWRITEDIRIPK}"
251do_package_write_ipk[sstate-outputdirs] = "${DEPLOY_DIR_IPK}"
252
253python do_package_write_ipk_setscene () {
254 tmpdir = d.getVar('TMPDIR')
255
256 if os.access(os.path.join(tmpdir, "stamps", "IPK_PACKAGE_INDEX_CLEAN"), os.R_OK):
257 os.unlink(os.path.join(tmpdir, "stamps", "IPK_PACKAGE_INDEX_CLEAN"))
258
259 sstate_setscene(d)
260}
261addtask do_package_write_ipk_setscene
262
263python () {
264 if d.getVar('PACKAGES') != '':
265 deps = ' opkg-utils-native:do_populate_sysroot virtual/fakeroot-native:do_populate_sysroot xz-native:do_populate_sysroot'
266 d.appendVarFlag('do_package_write_ipk', 'depends', deps)
267 d.setVarFlag('do_package_write_ipk', 'fakeroot', "1")
268}
269
270python do_package_write_ipk () {
271 bb.build.exec_func("read_subpackage_metadata", d)
272 bb.build.exec_func("do_package_ipk", d)
273}
274do_package_write_ipk[dirs] = "${PKGWRITEDIRIPK}"
275do_package_write_ipk[cleandirs] = "${PKGWRITEDIRIPK}"
276do_package_write_ipk[depends] += "${@oe.utils.build_depends_string(d.getVar('PACKAGE_WRITE_DEPS'), 'do_populate_sysroot')}"
277addtask package_write_ipk after do_packagedata do_package
278
279PACKAGEINDEXDEPS += "opkg-utils-native:do_populate_sysroot"
280PACKAGEINDEXDEPS += "opkg-native:do_populate_sysroot"
281
282do_build[recrdeptask] += "do_package_write_ipk"
diff --git a/meta/classes/package_pkgdata.bbclass b/meta/classes/package_pkgdata.bbclass
deleted file mode 100644
index 18b7ed62e0..0000000000
--- a/meta/classes/package_pkgdata.bbclass
+++ /dev/null
@@ -1,167 +0,0 @@
1WORKDIR_PKGDATA = "${WORKDIR}/pkgdata-sysroot"
2
3def package_populate_pkgdata_dir(pkgdatadir, d):
4 import glob
5
6 postinsts = []
7 seendirs = set()
8 stagingdir = d.getVar("PKGDATA_DIR")
9 pkgarchs = ['${MACHINE_ARCH}']
10 pkgarchs = pkgarchs + list(reversed(d.getVar("PACKAGE_EXTRA_ARCHS").split()))
11 pkgarchs.append('allarch')
12
13 bb.utils.mkdirhier(pkgdatadir)
14 for pkgarch in pkgarchs:
15 for manifest in glob.glob(d.expand("${SSTATE_MANIFESTS}/manifest-%s-*.packagedata" % pkgarch)):
16 with open(manifest, "r") as f:
17 for l in f:
18 l = l.strip()
19 dest = l.replace(stagingdir, "")
20 if l.endswith("/"):
21 staging_copydir(l, pkgdatadir, dest, seendirs)
22 continue
23 try:
24 staging_copyfile(l, pkgdatadir, dest, postinsts, seendirs)
25 except FileExistsError:
26 continue
27
28python package_prepare_pkgdata() {
29 import copy
30 import glob
31
32 taskdepdata = d.getVar("BB_TASKDEPDATA", False)
33 mytaskname = d.getVar("BB_RUNTASK")
34 if mytaskname.endswith("_setscene"):
35 mytaskname = mytaskname.replace("_setscene", "")
36 workdir = d.getVar("WORKDIR")
37 pn = d.getVar("PN")
38 stagingdir = d.getVar("PKGDATA_DIR")
39 pkgdatadir = d.getVar("WORKDIR_PKGDATA")
40
41 # Detect bitbake -b usage
42 nodeps = d.getVar("BB_LIMITEDDEPS") or False
43 if nodeps:
44 staging_package_populate_pkgdata_dir(pkgdatadir, d)
45 return
46
47 start = None
48 configuredeps = []
49 for dep in taskdepdata:
50 data = taskdepdata[dep]
51 if data[1] == mytaskname and data[0] == pn:
52 start = dep
53 break
54 if start is None:
55 bb.fatal("Couldn't find ourself in BB_TASKDEPDATA?")
56
57 # We need to figure out which sysroot files we need to expose to this task.
58 # This needs to match what would get restored from sstate, which is controlled
59 # ultimately by calls from bitbake to setscene_depvalid().
60 # That function expects a setscene dependency tree. We build a dependency tree
61 # condensed to inter-sstate task dependencies, similar to that used by setscene
62 # tasks. We can then call into setscene_depvalid() and decide
63 # which dependencies we can "see" and should expose in the recipe specific sysroot.
64 setscenedeps = copy.deepcopy(taskdepdata)
65
66 start = set([start])
67
68 sstatetasks = d.getVar("SSTATETASKS").split()
69 # Add recipe specific tasks referenced by setscene_depvalid()
70 sstatetasks.append("do_stash_locale")
71
72 # If start is an sstate task (like do_package) we need to add in its direct dependencies
73 # else the code below won't recurse into them.
74 for dep in set(start):
75 for dep2 in setscenedeps[dep][3]:
76 start.add(dep2)
77 start.remove(dep)
78
79 # Create collapsed do_populate_sysroot -> do_populate_sysroot tree
80 for dep in taskdepdata:
81 data = setscenedeps[dep]
82 if data[1] not in sstatetasks:
83 for dep2 in setscenedeps:
84 data2 = setscenedeps[dep2]
85 if dep in data2[3]:
86 data2[3].update(setscenedeps[dep][3])
87 data2[3].remove(dep)
88 if dep in start:
89 start.update(setscenedeps[dep][3])
90 start.remove(dep)
91 del setscenedeps[dep]
92
93 # Remove circular references
94 for dep in setscenedeps:
95 if dep in setscenedeps[dep][3]:
96 setscenedeps[dep][3].remove(dep)
97
98 # Direct dependencies should be present and can be depended upon
99 for dep in set(start):
100 if setscenedeps[dep][1] == "do_packagedata":
101 if dep not in configuredeps:
102 configuredeps.append(dep)
103
104 msgbuf = []
105 # Call into setscene_depvalid for each sub-dependency and only copy sysroot files
106 # for ones that would be restored from sstate.
107 done = list(start)
108 next = list(start)
109 while next:
110 new = []
111 for dep in next:
112 data = setscenedeps[dep]
113 for datadep in data[3]:
114 if datadep in done:
115 continue
116 taskdeps = {}
117 taskdeps[dep] = setscenedeps[dep][:2]
118 taskdeps[datadep] = setscenedeps[datadep][:2]
119 retval = setscene_depvalid(datadep, taskdeps, [], d, msgbuf)
120 done.append(datadep)
121 new.append(datadep)
122 if retval:
123 msgbuf.append("Skipping setscene dependency %s" % datadep)
124 continue
125 if datadep not in configuredeps and setscenedeps[datadep][1] == "do_packagedata":
126 configuredeps.append(datadep)
127 msgbuf.append("Adding dependency on %s" % setscenedeps[datadep][0])
128 else:
129 msgbuf.append("Following dependency on %s" % setscenedeps[datadep][0])
130 next = new
131
132 # This logging is too verbose for day to day use sadly
133 #bb.debug(2, "\n".join(msgbuf))
134
135 seendirs = set()
136 postinsts = []
137 multilibs = {}
138 manifests = {}
139
140 msg_adding = []
141
142 for dep in configuredeps:
143 c = setscenedeps[dep][0]
144 msg_adding.append(c)
145
146 manifest, d2 = oe.sstatesig.find_sstate_manifest(c, setscenedeps[dep][2], "packagedata", d, multilibs)
147 destsysroot = pkgdatadir
148
149 if manifest:
150 targetdir = destsysroot
151 with open(manifest, "r") as f:
152 manifests[dep] = manifest
153 for l in f:
154 l = l.strip()
155 dest = targetdir + l.replace(stagingdir, "")
156 if l.endswith("/"):
157 staging_copydir(l, targetdir, dest, seendirs)
158 continue
159 staging_copyfile(l, targetdir, dest, postinsts, seendirs)
160
161 bb.note("Installed into pkgdata-sysroot: %s" % str(msg_adding))
162
163}
164package_prepare_pkgdata[cleandirs] = "${WORKDIR_PKGDATA}"
165package_prepare_pkgdata[vardepsexclude] += "MACHINE_ARCH PACKAGE_EXTRA_ARCHS SDK_ARCH BUILD_ARCH SDK_OS BB_TASKDEPDATA"
166
167
diff --git a/meta/classes/package_rpm.bbclass b/meta/classes/package_rpm.bbclass
deleted file mode 100644
index 84a9a6dd12..0000000000
--- a/meta/classes/package_rpm.bbclass
+++ /dev/null
@@ -1,756 +0,0 @@
1inherit package
2
3IMAGE_PKGTYPE ?= "rpm"
4
5RPM="rpm"
6RPMBUILD="rpmbuild"
7
8PKGWRITEDIRRPM = "${WORKDIR}/deploy-rpms"
9
10# Maintaining the perfile dependencies has singificant overhead when writing the
11# packages. When set, this value merges them for efficiency.
12MERGEPERFILEDEPS = "1"
13
14# Filter dependencies based on a provided function.
15def filter_deps(var, f):
16 import collections
17
18 depends_dict = bb.utils.explode_dep_versions2(var)
19 newdeps_dict = collections.OrderedDict()
20 for dep in depends_dict:
21 if f(dep):
22 newdeps_dict[dep] = depends_dict[dep]
23 return bb.utils.join_deps(newdeps_dict, commasep=False)
24
25# Filter out absolute paths (typically /bin/sh and /usr/bin/env) and any perl
26# dependencies for nativesdk packages.
27def filter_nativesdk_deps(srcname, var):
28 if var and srcname.startswith("nativesdk-"):
29 var = filter_deps(var, lambda dep: not dep.startswith('/') and dep != 'perl' and not dep.startswith('perl('))
30 return var
31
32# Construct per file dependencies file
33def write_rpm_perfiledata(srcname, d):
34 workdir = d.getVar('WORKDIR')
35 packages = d.getVar('PACKAGES')
36 pkgd = d.getVar('PKGD')
37
38 def dump_filerdeps(varname, outfile, d):
39 outfile.write("#!/usr/bin/env python3\n\n")
40 outfile.write("# Dependency table\n")
41 outfile.write('deps = {\n')
42 for pkg in packages.split():
43 dependsflist_key = 'FILE' + varname + 'FLIST' + "_" + pkg
44 dependsflist = (d.getVar(dependsflist_key) or "")
45 for dfile in dependsflist.split():
46 key = "FILE" + varname + "_" + dfile + "_" + pkg
47 deps = filter_nativesdk_deps(srcname, d.getVar(key) or "")
48 depends_dict = bb.utils.explode_dep_versions(deps)
49 file = dfile.replace("@underscore@", "_")
50 file = file.replace("@closebrace@", "]")
51 file = file.replace("@openbrace@", "[")
52 file = file.replace("@tab@", "\t")
53 file = file.replace("@space@", " ")
54 file = file.replace("@at@", "@")
55 outfile.write('"' + pkgd + file + '" : "')
56 for dep in depends_dict:
57 ver = depends_dict[dep]
58 if dep and ver:
59 ver = ver.replace("(","")
60 ver = ver.replace(")","")
61 outfile.write(dep + " " + ver + " ")
62 else:
63 outfile.write(dep + " ")
64 outfile.write('",\n')
65 outfile.write('}\n\n')
66 outfile.write("import sys\n")
67 outfile.write("while 1:\n")
68 outfile.write("\tline = sys.stdin.readline().strip()\n")
69 outfile.write("\tif not line:\n")
70 outfile.write("\t\tsys.exit(0)\n")
71 outfile.write("\tif line in deps:\n")
72 outfile.write("\t\tprint(deps[line] + '\\n')\n")
73
74 # OE-core dependencies a.k.a. RPM requires
75 outdepends = workdir + "/" + srcname + ".requires"
76
77 dependsfile = open(outdepends, 'w')
78
79 dump_filerdeps('RDEPENDS', dependsfile, d)
80
81 dependsfile.close()
82 os.chmod(outdepends, 0o755)
83
84 # OE-core / RPM Provides
85 outprovides = workdir + "/" + srcname + ".provides"
86
87 providesfile = open(outprovides, 'w')
88
89 dump_filerdeps('RPROVIDES', providesfile, d)
90
91 providesfile.close()
92 os.chmod(outprovides, 0o755)
93
94 return (outdepends, outprovides)
95
96
97python write_specfile () {
98 import oe.packagedata
99
100 # append information for logs and patches to %prep
101 def add_prep(d,spec_files_bottom):
102 if d.getVarFlag('ARCHIVER_MODE', 'srpm') == '1' and bb.data.inherits_class('archiver', d):
103 spec_files_bottom.append('%%prep -n %s' % d.getVar('PN') )
104 spec_files_bottom.append('%s' % "echo \"include logs and patches, Please check them in SOURCES\"")
105 spec_files_bottom.append('')
106
107 # append the name of tarball to key word 'SOURCE' in xxx.spec.
108 def tail_source(d):
109 if d.getVarFlag('ARCHIVER_MODE', 'srpm') == '1' and bb.data.inherits_class('archiver', d):
110 ar_outdir = d.getVar('ARCHIVER_OUTDIR')
111 if not os.path.exists(ar_outdir):
112 return
113 source_list = os.listdir(ar_outdir)
114 source_number = 0
115 for source in source_list:
116 # do_deploy_archives may have already run (from sstate) meaning a .src.rpm may already
117 # exist in ARCHIVER_OUTDIR so skip if present.
118 if source.endswith(".src.rpm"):
119 continue
120 # The rpmbuild doesn't need the root permission, but it needs
121 # to know the file's user and group name, the only user and
122 # group in fakeroot is "root" when working in fakeroot.
123 f = os.path.join(ar_outdir, source)
124 os.chown(f, 0, 0)
125 spec_preamble_top.append('Source%s: %s' % (source_number, source))
126 source_number += 1
127
128 # In RPM, dependencies are of the format: pkg <>= Epoch:Version-Release
129 # This format is similar to OE, however there are restrictions on the
130 # characters that can be in a field. In the Version field, "-"
131 # characters are not allowed. "-" is allowed in the Release field.
132 #
133 # We translate the "-" in the version to a "+", by loading the PKGV
134 # from the dependent recipe, replacing the - with a +, and then using
135 # that value to do a replace inside of this recipe's dependencies.
136 # This preserves the "-" separator between the version and release, as
137 # well as any "-" characters inside of the release field.
138 #
139 # All of this has to happen BEFORE the mapping_rename_hook as
140 # after renaming we cannot look up the dependencies in the packagedata
141 # store.
142 def translate_vers(varname, d):
143 depends = d.getVar(varname)
144 if depends:
145 depends_dict = bb.utils.explode_dep_versions2(depends)
146 newdeps_dict = {}
147 for dep in depends_dict:
148 verlist = []
149 for ver in depends_dict[dep]:
150 if '-' in ver:
151 subd = oe.packagedata.read_subpkgdata_dict(dep, d)
152 if 'PKGV' in subd:
153 pv = subd['PV']
154 pkgv = subd['PKGV']
155 reppv = pkgv.replace('-', '+')
156 ver = ver.replace(pv, reppv).replace(pkgv, reppv)
157 if 'PKGR' in subd:
158 # Make sure PKGR rather than PR in ver
159 pr = '-' + subd['PR']
160 pkgr = '-' + subd['PKGR']
161 if pkgr not in ver:
162 ver = ver.replace(pr, pkgr)
163 verlist.append(ver)
164 else:
165 verlist.append(ver)
166 newdeps_dict[dep] = verlist
167 depends = bb.utils.join_deps(newdeps_dict)
168 d.setVar(varname, depends.strip())
169
170 # We need to change the style the dependency from BB to RPM
171 # This needs to happen AFTER the mapping_rename_hook
172 def print_deps(variable, tag, array, d):
173 depends = variable
174 if depends:
175 depends_dict = bb.utils.explode_dep_versions2(depends)
176 for dep in depends_dict:
177 for ver in depends_dict[dep]:
178 ver = ver.replace('(', '')
179 ver = ver.replace(')', '')
180 array.append("%s: %s %s" % (tag, dep, ver))
181 if not len(depends_dict[dep]):
182 array.append("%s: %s" % (tag, dep))
183
184 def walk_files(walkpath, target, conffiles, dirfiles):
185 # We can race against the ipk/deb backends which create CONTROL or DEBIAN directories
186 # when packaging. We just ignore these files which are created in
187 # packages-split/ and not package/
188 # We have the odd situation where the CONTROL/DEBIAN directory can be removed in the middle of
189 # of the walk, the isdir() test would then fail and the walk code would assume its a file
190 # hence we check for the names in files too.
191 for rootpath, dirs, files in os.walk(walkpath):
192 path = rootpath.replace(walkpath, "")
193 if path.endswith("DEBIAN") or path.endswith("CONTROL"):
194 continue
195 path = path.replace("%", "%%%%%%%%")
196 path = path.replace("[", "?")
197 path = path.replace("]", "?")
198
199 # Treat all symlinks to directories as normal files.
200 # os.walk() lists them as directories.
201 def move_to_files(dir):
202 if os.path.islink(os.path.join(rootpath, dir)):
203 files.append(dir)
204 return True
205 else:
206 return False
207 dirs[:] = [dir for dir in dirs if not move_to_files(dir)]
208
209 # Directory handling can happen in two ways, either DIRFILES is not set at all
210 # in which case we fall back to the older behaviour of packages owning all their
211 # directories
212 if dirfiles is None:
213 for dir in dirs:
214 if dir == "CONTROL" or dir == "DEBIAN":
215 continue
216 dir = dir.replace("%", "%%%%%%%%")
217 dir = dir.replace("[", "?")
218 dir = dir.replace("]", "?")
219 # All packages own the directories their files are in...
220 target.append('%dir "' + path + '/' + dir + '"')
221 else:
222 # packages own only empty directories or explict directory.
223 # This will prevent the overlapping of security permission.
224 if path and not files and not dirs:
225 target.append('%dir "' + path + '"')
226 elif path and path in dirfiles:
227 target.append('%dir "' + path + '"')
228
229 for file in files:
230 if file == "CONTROL" or file == "DEBIAN":
231 continue
232 file = file.replace("%", "%%%%%%%%")
233 file = file.replace("[", "?")
234 file = file.replace("]", "?")
235 if conffiles.count(path + '/' + file):
236 target.append('%config "' + path + '/' + file + '"')
237 else:
238 target.append('"' + path + '/' + file + '"')
239
240 # Prevent the prerm/postrm scripts from being run during an upgrade
241 def wrap_uninstall(scriptvar):
242 scr = scriptvar.strip()
243 if scr.startswith("#!"):
244 pos = scr.find("\n") + 1
245 else:
246 pos = 0
247 scr = scr[:pos] + 'if [ "$1" = "0" ] ; then\n' + scr[pos:] + '\nfi'
248 return scr
249
250 def get_perfile(varname, pkg, d):
251 deps = []
252 dependsflist_key = 'FILE' + varname + 'FLIST' + "_" + pkg
253 dependsflist = (d.getVar(dependsflist_key) or "")
254 for dfile in dependsflist.split():
255 key = "FILE" + varname + "_" + dfile + "_" + pkg
256 depends = d.getVar(key)
257 if depends:
258 deps.append(depends)
259 return " ".join(deps)
260
261 def append_description(spec_preamble, text):
262 """
263 Add the description to the spec file.
264 """
265 import textwrap
266 dedent_text = textwrap.dedent(text).strip()
267 # Bitbake saves "\n" as "\\n"
268 if '\\n' in dedent_text:
269 for t in dedent_text.split('\\n'):
270 spec_preamble.append(t.strip())
271 else:
272 spec_preamble.append('%s' % textwrap.fill(dedent_text, width=75))
273
274 packages = d.getVar('PACKAGES')
275 if not packages or packages == '':
276 bb.debug(1, "No packages; nothing to do")
277 return
278
279 pkgdest = d.getVar('PKGDEST')
280 if not pkgdest:
281 bb.fatal("No PKGDEST")
282
283 outspecfile = d.getVar('OUTSPECFILE')
284 if not outspecfile:
285 bb.fatal("No OUTSPECFILE")
286
287 # Construct the SPEC file...
288 srcname = d.getVar('PN')
289 localdata = bb.data.createCopy(d)
290 localdata.setVar('OVERRIDES', d.getVar("OVERRIDES", False) + ":" + srcname)
291 srcsummary = (localdata.getVar('SUMMARY') or localdata.getVar('DESCRIPTION') or ".")
292 srcversion = localdata.getVar('PKGV').replace('-', '+')
293 srcrelease = localdata.getVar('PKGR')
294 srcepoch = (localdata.getVar('PKGE') or "")
295 srclicense = localdata.getVar('LICENSE')
296 srcsection = localdata.getVar('SECTION')
297 srcmaintainer = localdata.getVar('MAINTAINER')
298 srchomepage = localdata.getVar('HOMEPAGE')
299 srcdescription = localdata.getVar('DESCRIPTION') or "."
300 srccustomtagschunk = get_package_additional_metadata("rpm", localdata)
301
302 srcdepends = d.getVar('DEPENDS')
303 srcrdepends = ""
304 srcrrecommends = ""
305 srcrsuggests = ""
306 srcrprovides = ""
307 srcrreplaces = ""
308 srcrconflicts = ""
309 srcrobsoletes = ""
310
311 srcrpreinst = []
312 srcrpostinst = []
313 srcrprerm = []
314 srcrpostrm = []
315
316 spec_preamble_top = []
317 spec_preamble_bottom = []
318
319 spec_scriptlets_top = []
320 spec_scriptlets_bottom = []
321
322 spec_files_top = []
323 spec_files_bottom = []
324
325 perfiledeps = (d.getVar("MERGEPERFILEDEPS") or "0") == "0"
326 extra_pkgdata = (d.getVar("RPM_EXTRA_PKGDATA") or "0") == "1"
327
328 for pkg in packages.split():
329 localdata = bb.data.createCopy(d)
330
331 root = "%s/%s" % (pkgdest, pkg)
332
333 localdata.setVar('ROOT', '')
334 localdata.setVar('ROOT_%s' % pkg, root)
335 pkgname = localdata.getVar('PKG_%s' % pkg)
336 if not pkgname:
337 pkgname = pkg
338 localdata.setVar('PKG', pkgname)
339
340 localdata.setVar('OVERRIDES', d.getVar("OVERRIDES", False) + ":" + pkg)
341
342 conffiles = get_conffiles(pkg, d)
343 dirfiles = localdata.getVar('DIRFILES')
344 if dirfiles is not None:
345 dirfiles = dirfiles.split()
346
347 splitname = pkgname
348
349 splitsummary = (localdata.getVar('SUMMARY') or localdata.getVar('DESCRIPTION') or ".")
350 splitversion = (localdata.getVar('PKGV') or "").replace('-', '+')
351 splitrelease = (localdata.getVar('PKGR') or "")
352 splitepoch = (localdata.getVar('PKGE') or "")
353 splitlicense = (localdata.getVar('LICENSE') or "")
354 splitsection = (localdata.getVar('SECTION') or "")
355 splitdescription = (localdata.getVar('DESCRIPTION') or ".")
356 splitcustomtagschunk = get_package_additional_metadata("rpm", localdata)
357
358 translate_vers('RDEPENDS', localdata)
359 translate_vers('RRECOMMENDS', localdata)
360 translate_vers('RSUGGESTS', localdata)
361 translate_vers('RPROVIDES', localdata)
362 translate_vers('RREPLACES', localdata)
363 translate_vers('RCONFLICTS', localdata)
364
365 # Map the dependencies into their final form
366 mapping_rename_hook(localdata)
367
368 splitrdepends = localdata.getVar('RDEPENDS') or ""
369 splitrrecommends = localdata.getVar('RRECOMMENDS') or ""
370 splitrsuggests = localdata.getVar('RSUGGESTS') or ""
371 splitrprovides = localdata.getVar('RPROVIDES') or ""
372 splitrreplaces = localdata.getVar('RREPLACES') or ""
373 splitrconflicts = localdata.getVar('RCONFLICTS') or ""
374 splitrobsoletes = ""
375
376 splitrpreinst = localdata.getVar('pkg_preinst')
377 splitrpostinst = localdata.getVar('pkg_postinst')
378 splitrprerm = localdata.getVar('pkg_prerm')
379 splitrpostrm = localdata.getVar('pkg_postrm')
380
381
382 if not perfiledeps:
383 # Add in summary of per file dependencies
384 splitrdepends = splitrdepends + " " + get_perfile('RDEPENDS', pkg, d)
385 splitrprovides = splitrprovides + " " + get_perfile('RPROVIDES', pkg, d)
386
387 splitrdepends = filter_nativesdk_deps(srcname, splitrdepends)
388
389 # Gather special src/first package data
390 if srcname == splitname:
391 archiving = d.getVarFlag('ARCHIVER_MODE', 'srpm') == '1' and \
392 bb.data.inherits_class('archiver', d)
393 if archiving and srclicense != splitlicense:
394 bb.warn("The SRPM produced may not have the correct overall source license in the License tag. This is due to the LICENSE for the primary package and SRPM conflicting.")
395
396 srclicense = splitlicense
397 srcrdepends = splitrdepends
398 srcrrecommends = splitrrecommends
399 srcrsuggests = splitrsuggests
400 srcrprovides = splitrprovides
401 srcrreplaces = splitrreplaces
402 srcrconflicts = splitrconflicts
403
404 srcrpreinst = splitrpreinst
405 srcrpostinst = splitrpostinst
406 srcrprerm = splitrprerm
407 srcrpostrm = splitrpostrm
408
409 file_list = []
410 walk_files(root, file_list, conffiles, dirfiles)
411 if not file_list and localdata.getVar('ALLOW_EMPTY', False) != "1":
412 bb.note("Not creating empty RPM package for %s" % splitname)
413 else:
414 spec_files_top.append('%files')
415 if extra_pkgdata:
416 package_rpm_extra_pkgdata(splitname, spec_files_top, localdata)
417 spec_files_top.append('%defattr(-,-,-,-)')
418 if file_list:
419 bb.note("Creating RPM package for %s" % splitname)
420 spec_files_top.extend(file_list)
421 else:
422 bb.note("Creating empty RPM package for %s" % splitname)
423 spec_files_top.append('')
424 continue
425
426 # Process subpackage data
427 spec_preamble_bottom.append('%%package -n %s' % splitname)
428 spec_preamble_bottom.append('Summary: %s' % splitsummary)
429 if srcversion != splitversion:
430 spec_preamble_bottom.append('Version: %s' % splitversion)
431 if srcrelease != splitrelease:
432 spec_preamble_bottom.append('Release: %s' % splitrelease)
433 if srcepoch != splitepoch:
434 spec_preamble_bottom.append('Epoch: %s' % splitepoch)
435 spec_preamble_bottom.append('License: %s' % splitlicense)
436 spec_preamble_bottom.append('Group: %s' % splitsection)
437
438 if srccustomtagschunk != splitcustomtagschunk:
439 spec_preamble_bottom.append(splitcustomtagschunk)
440
441 # Replaces == Obsoletes && Provides
442 robsoletes = bb.utils.explode_dep_versions2(splitrobsoletes)
443 rprovides = bb.utils.explode_dep_versions2(splitrprovides)
444 rreplaces = bb.utils.explode_dep_versions2(splitrreplaces)
445 for dep in rreplaces:
446 if not dep in robsoletes:
447 robsoletes[dep] = rreplaces[dep]
448 if not dep in rprovides:
449 rprovides[dep] = rreplaces[dep]
450 splitrobsoletes = bb.utils.join_deps(robsoletes, commasep=False)
451 splitrprovides = bb.utils.join_deps(rprovides, commasep=False)
452
453 print_deps(splitrdepends, "Requires", spec_preamble_bottom, d)
454 if splitrpreinst:
455 print_deps(splitrdepends, "Requires(pre)", spec_preamble_bottom, d)
456 if splitrpostinst:
457 print_deps(splitrdepends, "Requires(post)", spec_preamble_bottom, d)
458 if splitrprerm:
459 print_deps(splitrdepends, "Requires(preun)", spec_preamble_bottom, d)
460 if splitrpostrm:
461 print_deps(splitrdepends, "Requires(postun)", spec_preamble_bottom, d)
462
463 print_deps(splitrrecommends, "Recommends", spec_preamble_bottom, d)
464 print_deps(splitrsuggests, "Suggests", spec_preamble_bottom, d)
465 print_deps(splitrprovides, "Provides", spec_preamble_bottom, d)
466 print_deps(splitrobsoletes, "Obsoletes", spec_preamble_bottom, d)
467 print_deps(splitrconflicts, "Conflicts", spec_preamble_bottom, d)
468
469 spec_preamble_bottom.append('')
470
471 spec_preamble_bottom.append('%%description -n %s' % splitname)
472 append_description(spec_preamble_bottom, splitdescription)
473
474 spec_preamble_bottom.append('')
475
476 # Now process scriptlets
477 if splitrpreinst:
478 spec_scriptlets_bottom.append('%%pre -n %s' % splitname)
479 spec_scriptlets_bottom.append('# %s - preinst' % splitname)
480 spec_scriptlets_bottom.append(splitrpreinst)
481 spec_scriptlets_bottom.append('')
482 if splitrpostinst:
483 spec_scriptlets_bottom.append('%%post -n %s' % splitname)
484 spec_scriptlets_bottom.append('# %s - postinst' % splitname)
485 spec_scriptlets_bottom.append(splitrpostinst)
486 spec_scriptlets_bottom.append('')
487 if splitrprerm:
488 spec_scriptlets_bottom.append('%%preun -n %s' % splitname)
489 spec_scriptlets_bottom.append('# %s - prerm' % splitname)
490 scriptvar = wrap_uninstall(splitrprerm)
491 spec_scriptlets_bottom.append(scriptvar)
492 spec_scriptlets_bottom.append('')
493 if splitrpostrm:
494 spec_scriptlets_bottom.append('%%postun -n %s' % splitname)
495 spec_scriptlets_bottom.append('# %s - postrm' % splitname)
496 scriptvar = wrap_uninstall(splitrpostrm)
497 spec_scriptlets_bottom.append(scriptvar)
498 spec_scriptlets_bottom.append('')
499
500 # Now process files
501 file_list = []
502 walk_files(root, file_list, conffiles, dirfiles)
503 if not file_list and localdata.getVar('ALLOW_EMPTY', False) != "1":
504 bb.note("Not creating empty RPM package for %s" % splitname)
505 else:
506 spec_files_bottom.append('%%files -n %s' % splitname)
507 if extra_pkgdata:
508 package_rpm_extra_pkgdata(splitname, spec_files_bottom, localdata)
509 spec_files_bottom.append('%defattr(-,-,-,-)')
510 if file_list:
511 bb.note("Creating RPM package for %s" % splitname)
512 spec_files_bottom.extend(file_list)
513 else:
514 bb.note("Creating empty RPM package for %s" % splitname)
515 spec_files_bottom.append('')
516
517 del localdata
518
519 add_prep(d,spec_files_bottom)
520 spec_preamble_top.append('Summary: %s' % srcsummary)
521 spec_preamble_top.append('Name: %s' % srcname)
522 spec_preamble_top.append('Version: %s' % srcversion)
523 spec_preamble_top.append('Release: %s' % srcrelease)
524 if srcepoch and srcepoch.strip() != "":
525 spec_preamble_top.append('Epoch: %s' % srcepoch)
526 spec_preamble_top.append('License: %s' % srclicense)
527 spec_preamble_top.append('Group: %s' % srcsection)
528 spec_preamble_top.append('Packager: %s' % srcmaintainer)
529 if srchomepage:
530 spec_preamble_top.append('URL: %s' % srchomepage)
531 if srccustomtagschunk:
532 spec_preamble_top.append(srccustomtagschunk)
533 tail_source(d)
534
535 # Replaces == Obsoletes && Provides
536 robsoletes = bb.utils.explode_dep_versions2(srcrobsoletes)
537 rprovides = bb.utils.explode_dep_versions2(srcrprovides)
538 rreplaces = bb.utils.explode_dep_versions2(srcrreplaces)
539 for dep in rreplaces:
540 if not dep in robsoletes:
541 robsoletes[dep] = rreplaces[dep]
542 if not dep in rprovides:
543 rprovides[dep] = rreplaces[dep]
544 srcrobsoletes = bb.utils.join_deps(robsoletes, commasep=False)
545 srcrprovides = bb.utils.join_deps(rprovides, commasep=False)
546
547 print_deps(srcdepends, "BuildRequires", spec_preamble_top, d)
548 print_deps(srcrdepends, "Requires", spec_preamble_top, d)
549 if srcrpreinst:
550 print_deps(srcrdepends, "Requires(pre)", spec_preamble_top, d)
551 if srcrpostinst:
552 print_deps(srcrdepends, "Requires(post)", spec_preamble_top, d)
553 if srcrprerm:
554 print_deps(srcrdepends, "Requires(preun)", spec_preamble_top, d)
555 if srcrpostrm:
556 print_deps(srcrdepends, "Requires(postun)", spec_preamble_top, d)
557
558 print_deps(srcrrecommends, "Recommends", spec_preamble_top, d)
559 print_deps(srcrsuggests, "Suggests", spec_preamble_top, d)
560 print_deps(srcrprovides, "Provides", spec_preamble_top, d)
561 print_deps(srcrobsoletes, "Obsoletes", spec_preamble_top, d)
562 print_deps(srcrconflicts, "Conflicts", spec_preamble_top, d)
563
564 spec_preamble_top.append('')
565
566 spec_preamble_top.append('%description')
567 append_description(spec_preamble_top, srcdescription)
568
569 spec_preamble_top.append('')
570
571 if srcrpreinst:
572 spec_scriptlets_top.append('%pre')
573 spec_scriptlets_top.append('# %s - preinst' % srcname)
574 spec_scriptlets_top.append(srcrpreinst)
575 spec_scriptlets_top.append('')
576 if srcrpostinst:
577 spec_scriptlets_top.append('%post')
578 spec_scriptlets_top.append('# %s - postinst' % srcname)
579 spec_scriptlets_top.append(srcrpostinst)
580 spec_scriptlets_top.append('')
581 if srcrprerm:
582 spec_scriptlets_top.append('%preun')
583 spec_scriptlets_top.append('# %s - prerm' % srcname)
584 scriptvar = wrap_uninstall(srcrprerm)
585 spec_scriptlets_top.append(scriptvar)
586 spec_scriptlets_top.append('')
587 if srcrpostrm:
588 spec_scriptlets_top.append('%postun')
589 spec_scriptlets_top.append('# %s - postrm' % srcname)
590 scriptvar = wrap_uninstall(srcrpostrm)
591 spec_scriptlets_top.append(scriptvar)
592 spec_scriptlets_top.append('')
593
594 # Write the SPEC file
595 specfile = open(outspecfile, 'w')
596
597 # RPMSPEC_PREAMBLE is a way to add arbitrary text to the top
598 # of the generated spec file
599 external_preamble = d.getVar("RPMSPEC_PREAMBLE")
600 if external_preamble:
601 specfile.write(external_preamble + "\n")
602
603 for line in spec_preamble_top:
604 specfile.write(line + "\n")
605
606 for line in spec_preamble_bottom:
607 specfile.write(line + "\n")
608
609 for line in spec_scriptlets_top:
610 specfile.write(line + "\n")
611
612 for line in spec_scriptlets_bottom:
613 specfile.write(line + "\n")
614
615 for line in spec_files_top:
616 specfile.write(line + "\n")
617
618 for line in spec_files_bottom:
619 specfile.write(line + "\n")
620
621 specfile.close()
622}
623# Otherwise allarch packages may change depending on override configuration
624write_specfile[vardepsexclude] = "OVERRIDES"
625
626# Have to list any variables referenced as X_<pkg> that aren't in pkgdata here
627RPMEXTRAVARS = "PACKAGE_ADD_METADATA_RPM"
628write_specfile[vardeps] += "${@gen_packagevar(d, 'RPMEXTRAVARS')}"
629
630python do_package_rpm () {
631 workdir = d.getVar('WORKDIR')
632 tmpdir = d.getVar('TMPDIR')
633 pkgd = d.getVar('PKGD')
634 pkgdest = d.getVar('PKGDEST')
635 if not workdir or not pkgd or not tmpdir:
636 bb.error("Variables incorrectly set, unable to package")
637 return
638
639 packages = d.getVar('PACKAGES')
640 if not packages or packages == '':
641 bb.debug(1, "No packages; nothing to do")
642 return
643
644 # Construct the spec file...
645 # If the spec file already exist, and has not been stored into
646 # pseudo's files.db, it maybe cause rpmbuild src.rpm fail,
647 # so remove it before doing rpmbuild src.rpm.
648 srcname = d.getVar('PN')
649 outspecfile = workdir + "/" + srcname + ".spec"
650 if os.path.isfile(outspecfile):
651 os.remove(outspecfile)
652 d.setVar('OUTSPECFILE', outspecfile)
653 bb.build.exec_func('write_specfile', d)
654
655 perfiledeps = (d.getVar("MERGEPERFILEDEPS") or "0") == "0"
656 if perfiledeps:
657 outdepends, outprovides = write_rpm_perfiledata(srcname, d)
658
659 # Setup the rpmbuild arguments...
660 rpmbuild = d.getVar('RPMBUILD')
661 targetsys = d.getVar('TARGET_SYS')
662 targetvendor = d.getVar('HOST_VENDOR')
663
664 # Too many places in dnf stack assume that arch-independent packages are "noarch".
665 # Let's not fight against this.
666 package_arch = (d.getVar('PACKAGE_ARCH') or "").replace("-", "_")
667 if package_arch == "all":
668 package_arch = "noarch"
669
670 sdkpkgsuffix = (d.getVar('SDKPKGSUFFIX') or "nativesdk").replace("-", "_")
671 d.setVar('PACKAGE_ARCH_EXTEND', package_arch)
672 pkgwritedir = d.expand('${PKGWRITEDIRRPM}/${PACKAGE_ARCH_EXTEND}')
673 d.setVar('RPM_PKGWRITEDIR', pkgwritedir)
674 bb.debug(1, 'PKGWRITEDIR: %s' % d.getVar('RPM_PKGWRITEDIR'))
675 pkgarch = d.expand('${PACKAGE_ARCH_EXTEND}${HOST_VENDOR}-linux')
676 bb.utils.mkdirhier(pkgwritedir)
677 os.chmod(pkgwritedir, 0o755)
678
679 cmd = rpmbuild
680 cmd = cmd + " --noclean --nodeps --short-circuit --target " + pkgarch + " --buildroot " + pkgd
681 cmd = cmd + " --define '_topdir " + workdir + "' --define '_rpmdir " + pkgwritedir + "'"
682 cmd = cmd + " --define '_builddir " + d.getVar('B') + "'"
683 cmd = cmd + " --define '_build_name_fmt %%{NAME}-%%{VERSION}-%%{RELEASE}.%%{ARCH}.rpm'"
684 cmd = cmd + " --define '_use_internal_dependency_generator 0'"
685 cmd = cmd + " --define '_binaries_in_noarch_packages_terminate_build 0'"
686 cmd = cmd + " --define '_build_id_links none'"
687 cmd = cmd + " --define '_binary_payload w6T.xzdio'"
688 cmd = cmd + " --define '_source_payload w6T.xzdio'"
689 cmd = cmd + " --define 'clamp_mtime_to_source_date_epoch 1'"
690 cmd = cmd + " --define 'use_source_date_epoch_as_buildtime 1'"
691 cmd = cmd + " --define '_buildhost reproducible'"
692 cmd = cmd + " --define '__font_provides %{nil}'"
693 if perfiledeps:
694 cmd = cmd + " --define '__find_requires " + outdepends + "'"
695 cmd = cmd + " --define '__find_provides " + outprovides + "'"
696 else:
697 cmd = cmd + " --define '__find_requires %{nil}'"
698 cmd = cmd + " --define '__find_provides %{nil}'"
699 cmd = cmd + " --define '_unpackaged_files_terminate_build 0'"
700 cmd = cmd + " --define 'debug_package %{nil}'"
701 cmd = cmd + " --define '_tmppath " + workdir + "'"
702 if d.getVarFlag('ARCHIVER_MODE', 'srpm') == '1' and bb.data.inherits_class('archiver', d):
703 cmd = cmd + " --define '_sourcedir " + d.getVar('ARCHIVER_OUTDIR') + "'"
704 cmdsrpm = cmd + " --define '_srcrpmdir " + d.getVar('ARCHIVER_RPMOUTDIR') + "'"
705 cmdsrpm = cmdsrpm + " -bs " + outspecfile
706 # Build the .src.rpm
707 d.setVar('SBUILDSPEC', cmdsrpm + "\n")
708 d.setVarFlag('SBUILDSPEC', 'func', '1')
709 bb.build.exec_func('SBUILDSPEC', d)
710 cmd = cmd + " -bb " + outspecfile
711
712 # rpm 4 creates various empty directories in _topdir, let's clean them up
713 cleanupcmd = "rm -rf %s/BUILDROOT %s/SOURCES %s/SPECS %s/SRPMS" % (workdir, workdir, workdir, workdir)
714
715 # Build the rpm package!
716 d.setVar('BUILDSPEC', cmd + "\n" + cleanupcmd + "\n")
717 d.setVarFlag('BUILDSPEC', 'func', '1')
718 bb.build.exec_func('BUILDSPEC', d)
719
720 if d.getVar('RPM_SIGN_PACKAGES') == '1':
721 bb.build.exec_func("sign_rpm", d)
722}
723
724python () {
725 if d.getVar('PACKAGES') != '':
726 deps = ' rpm-native:do_populate_sysroot virtual/fakeroot-native:do_populate_sysroot'
727 d.appendVarFlag('do_package_write_rpm', 'depends', deps)
728 d.setVarFlag('do_package_write_rpm', 'fakeroot', '1')
729}
730
731SSTATETASKS += "do_package_write_rpm"
732do_package_write_rpm[sstate-inputdirs] = "${PKGWRITEDIRRPM}"
733do_package_write_rpm[sstate-outputdirs] = "${DEPLOY_DIR_RPM}"
734# Take a shared lock, we can write multiple packages at the same time...
735# but we need to stop the rootfs/solver from running while we do...
736do_package_write_rpm[sstate-lockfile-shared] += "${DEPLOY_DIR_RPM}/rpm.lock"
737
738python do_package_write_rpm_setscene () {
739 sstate_setscene(d)
740}
741addtask do_package_write_rpm_setscene
742
743python do_package_write_rpm () {
744 bb.build.exec_func("read_subpackage_metadata", d)
745 bb.build.exec_func("do_package_rpm", d)
746}
747
748do_package_write_rpm[dirs] = "${PKGWRITEDIRRPM}"
749do_package_write_rpm[cleandirs] = "${PKGWRITEDIRRPM}"
750do_package_write_rpm[depends] += "${@oe.utils.build_depends_string(d.getVar('PACKAGE_WRITE_DEPS'), 'do_populate_sysroot')}"
751addtask package_write_rpm after do_packagedata do_package
752
753PACKAGEINDEXDEPS += "rpm-native:do_populate_sysroot"
754PACKAGEINDEXDEPS += "createrepo-c-native:do_populate_sysroot"
755
756do_build[recrdeptask] += "do_package_write_rpm"
diff --git a/meta/classes/package_tar.bbclass b/meta/classes/package_tar.bbclass
deleted file mode 100644
index d6c1b306fc..0000000000
--- a/meta/classes/package_tar.bbclass
+++ /dev/null
@@ -1,71 +0,0 @@
1inherit package
2
3IMAGE_PKGTYPE ?= "tar"
4
5python do_package_tar () {
6 import subprocess
7
8 oldcwd = os.getcwd()
9
10 workdir = d.getVar('WORKDIR')
11 if not workdir:
12 bb.error("WORKDIR not defined, unable to package")
13 return
14
15 outdir = d.getVar('DEPLOY_DIR_TAR')
16 if not outdir:
17 bb.error("DEPLOY_DIR_TAR not defined, unable to package")
18 return
19
20 dvar = d.getVar('D')
21 if not dvar:
22 bb.error("D not defined, unable to package")
23 return
24
25 packages = d.getVar('PACKAGES')
26 if not packages:
27 bb.debug(1, "PACKAGES not defined, nothing to package")
28 return
29
30 pkgdest = d.getVar('PKGDEST')
31
32 bb.utils.mkdirhier(outdir)
33 bb.utils.mkdirhier(dvar)
34
35 for pkg in packages.split():
36 localdata = bb.data.createCopy(d)
37 root = "%s/%s" % (pkgdest, pkg)
38
39 overrides = localdata.getVar('OVERRIDES', False)
40 localdata.setVar('OVERRIDES', '%s:%s' % (overrides, pkg))
41
42 bb.utils.mkdirhier(root)
43 basedir = os.path.dirname(root)
44 tarfn = localdata.expand("${DEPLOY_DIR_TAR}/${PKG}-${PKGV}-${PKGR}.tar.gz")
45 os.chdir(root)
46 dlist = os.listdir(root)
47 if not dlist:
48 bb.note("Not creating empty archive for %s-%s-%s" % (pkg, localdata.getVar('PKGV'), localdata.getVar('PKGR')))
49 continue
50 args = "tar -cz --exclude=CONTROL --exclude=DEBIAN -f".split()
51 ret = subprocess.call(args + [tarfn] + dlist)
52 if ret != 0:
53 bb.error("Creation of tar %s failed." % tarfn)
54
55 os.chdir(oldcwd)
56}
57
58python () {
59 if d.getVar('PACKAGES') != '':
60 deps = ' tar-native:do_populate_sysroot virtual/fakeroot-native:do_populate_sysroot'
61 d.appendVarFlag('do_package_write_tar', 'depends', deps)
62 d.setVarFlag('do_package_write_tar', 'fakeroot', "1")
63}
64
65
66python do_package_write_tar () {
67 bb.build.exec_func("read_subpackage_metadata", d)
68 bb.build.exec_func("do_package_tar", d)
69}
70do_package_write_tar[dirs] = "${D}"
71addtask package_write_tar before do_build after do_packagedata do_package
diff --git a/meta/classes/packagedata.bbclass b/meta/classes/packagedata.bbclass
deleted file mode 100644
index a903e5cfd2..0000000000
--- a/meta/classes/packagedata.bbclass
+++ /dev/null
@@ -1,34 +0,0 @@
1python read_subpackage_metadata () {
2 import oe.packagedata
3
4 vars = {
5 "PN" : d.getVar('PN'),
6 "PE" : d.getVar('PE'),
7 "PV" : d.getVar('PV'),
8 "PR" : d.getVar('PR'),
9 }
10
11 data = oe.packagedata.read_pkgdata(vars["PN"], d)
12
13 for key in data.keys():
14 d.setVar(key, data[key])
15
16 for pkg in d.getVar('PACKAGES').split():
17 sdata = oe.packagedata.read_subpkgdata(pkg, d)
18 for key in sdata.keys():
19 if key in vars:
20 if sdata[key] != vars[key]:
21 if key == "PN":
22 bb.fatal("Recipe %s is trying to create package %s which was already written by recipe %s. This will cause corruption, please resolve this and only provide the package from one recipe or the other or only build one of the recipes." % (vars[key], pkg, sdata[key]))
23 bb.fatal("Recipe %s is trying to change %s from '%s' to '%s'. This will cause do_package_write_* failures since the incorrect data will be used and they will be unable to find the right workdir." % (vars["PN"], key, vars[key], sdata[key]))
24 continue
25 #
26 # If we set unsuffixed variables here there is a chance they could clobber override versions
27 # of that variable, e.g. DESCRIPTION could clobber DESCRIPTION_<pkgname>
28 # We therefore don't clobber for the unsuffixed variable versions
29 #
30 if key.endswith("_" + pkg):
31 d.setVar(key, sdata[key])
32 else:
33 d.setVar(key, sdata[key], parsing=True)
34}
diff --git a/meta/classes/packagegroup.bbclass b/meta/classes/packagegroup.bbclass
deleted file mode 100644
index 1541c8fbff..0000000000
--- a/meta/classes/packagegroup.bbclass
+++ /dev/null
@@ -1,61 +0,0 @@
1# Class for packagegroup (package group) recipes
2
3# By default, only the packagegroup package itself is in PACKAGES.
4# -dbg and -dev flavours are handled by the anonfunc below.
5# This means that packagegroup recipes used to build multiple packagegroup
6# packages have to modify PACKAGES after inheriting packagegroup.bbclass.
7PACKAGES = "${PN}"
8
9# By default, packagegroup packages do not depend on a certain architecture.
10# Only if dependencies are modified by MACHINE_FEATURES, packages
11# need to be set to MACHINE_ARCH before inheriting packagegroup.bbclass
12PACKAGE_ARCH ?= "all"
13
14# Fully expanded - so it applies the overrides as well
15PACKAGE_ARCH_EXPANDED := "${PACKAGE_ARCH}"
16
17LICENSE ?= "MIT"
18
19inherit ${@oe.utils.ifelse(d.getVar('PACKAGE_ARCH_EXPANDED') == 'all', 'allarch', '')}
20
21# This automatically adds -dbg and -dev flavours of all PACKAGES
22# to the list. Their dependencies (RRECOMMENDS) are handled as usual
23# by package_depchains in a following step.
24# Also mark all packages as ALLOW_EMPTY
25python () {
26 packages = d.getVar('PACKAGES').split()
27 if d.getVar('PACKAGEGROUP_DISABLE_COMPLEMENTARY') != '1':
28 types = ['', '-dbg', '-dev']
29 if bb.utils.contains('DISTRO_FEATURES', 'ptest', True, False, d):
30 types.append('-ptest')
31 packages = [pkg + suffix for pkg in packages
32 for suffix in types]
33 d.setVar('PACKAGES', ' '.join(packages))
34 for pkg in packages:
35 d.setVar('ALLOW_EMPTY_%s' % pkg, '1')
36}
37
38# We don't want to look at shared library dependencies for the
39# dbg packages
40DEPCHAIN_DBGDEFAULTDEPS = "1"
41
42# We only need the packaging tasks - disable the rest
43deltask do_fetch
44deltask do_unpack
45deltask do_patch
46deltask do_configure
47deltask do_compile
48deltask do_install
49deltask do_populate_sysroot
50
51INHIBIT_DEFAULT_DEPS = "1"
52
53python () {
54 if bb.data.inherits_class('nativesdk', d):
55 return
56 initman = d.getVar("VIRTUAL-RUNTIME_init_manager")
57 if initman and initman in ['sysvinit', 'systemd'] and not bb.utils.contains('DISTRO_FEATURES', initman, True, False, d):
58 bb.fatal("Please ensure that your setting of VIRTUAL-RUNTIME_init_manager (%s) matches the entries enabled in DISTRO_FEATURES" % initman)
59}
60
61CVE_PRODUCT = ""
diff --git a/meta/classes/patch.bbclass b/meta/classes/patch.bbclass
deleted file mode 100644
index cd491a563d..0000000000
--- a/meta/classes/patch.bbclass
+++ /dev/null
@@ -1,166 +0,0 @@
1# Copyright (C) 2006 OpenedHand LTD
2
3# Point to an empty file so any user's custom settings don't break things
4QUILTRCFILE ?= "${STAGING_ETCDIR_NATIVE}/quiltrc"
5
6PATCHDEPENDENCY = "${PATCHTOOL}-native:do_populate_sysroot"
7
8# There is a bug in patch 2.7.3 and earlier where index lines
9# in patches can change file modes when they shouldn't:
10# http://git.savannah.gnu.org/cgit/patch.git/patch/?id=82b800c9552a088a241457948219d25ce0a407a4
11# This leaks into debug sources in particular. Add the dependency
12# to target recipes to avoid this problem until we can rely on 2.7.4 or later.
13PATCHDEPENDENCY_append_class-target = " patch-replacement-native:do_populate_sysroot"
14
15PATCH_GIT_USER_NAME ?= "OpenEmbedded"
16PATCH_GIT_USER_EMAIL ?= "oe.patch@oe"
17
18inherit terminal
19
20python () {
21 if d.getVar('PATCHTOOL') == 'git' and d.getVar('PATCH_COMMIT_FUNCTIONS') == '1':
22 extratasks = bb.build.tasksbetween('do_unpack', 'do_patch', d)
23 try:
24 extratasks.remove('do_unpack')
25 except ValueError:
26 # For some recipes do_unpack doesn't exist, ignore it
27 pass
28
29 d.appendVarFlag('do_patch', 'prefuncs', ' patch_task_patch_prefunc')
30 for task in extratasks:
31 d.appendVarFlag(task, 'postfuncs', ' patch_task_postfunc')
32}
33
34python patch_task_patch_prefunc() {
35 # Prefunc for do_patch
36 srcsubdir = d.getVar('S')
37
38 workdir = os.path.abspath(d.getVar('WORKDIR'))
39 testsrcdir = os.path.abspath(srcsubdir)
40 if (testsrcdir + os.sep).startswith(workdir + os.sep):
41 # Double-check that either workdir or S or some directory in-between is a git repository
42 found = False
43 while testsrcdir != workdir:
44 if os.path.exists(os.path.join(testsrcdir, '.git')):
45 found = True
46 break
47 if testsrcdir == workdir:
48 break
49 testsrcdir = os.path.dirname(testsrcdir)
50 if not found:
51 bb.fatal('PATCHTOOL = "git" set for source tree that is not a git repository. Refusing to continue as that may result in commits being made in your metadata repository.')
52
53 patchdir = os.path.join(srcsubdir, 'patches')
54 if os.path.exists(patchdir):
55 if os.listdir(patchdir):
56 d.setVar('PATCH_HAS_PATCHES_DIR', '1')
57 else:
58 os.rmdir(patchdir)
59}
60
61python patch_task_postfunc() {
62 # Prefunc for task functions between do_unpack and do_patch
63 import oe.patch
64 import shutil
65 func = d.getVar('BB_RUNTASK')
66 srcsubdir = d.getVar('S')
67
68 if os.path.exists(srcsubdir):
69 if func == 'do_patch':
70 haspatches = (d.getVar('PATCH_HAS_PATCHES_DIR') == '1')
71 patchdir = os.path.join(srcsubdir, 'patches')
72 if os.path.exists(patchdir):
73 shutil.rmtree(patchdir)
74 if haspatches:
75 stdout, _ = bb.process.run('git status --porcelain patches', cwd=srcsubdir)
76 if stdout:
77 bb.process.run('git checkout patches', cwd=srcsubdir)
78 stdout, _ = bb.process.run('git status --porcelain .', cwd=srcsubdir)
79 if stdout:
80 useroptions = []
81 oe.patch.GitApplyTree.gitCommandUserOptions(useroptions, d=d)
82 bb.process.run('git add .; git %s commit -a -m "Committing changes from %s\n\n%s"' % (' '.join(useroptions), func, oe.patch.GitApplyTree.ignore_commit_prefix + ' - from %s' % func), cwd=srcsubdir)
83}
84
85def src_patches(d, all=False, expand=True):
86 import oe.patch
87 return oe.patch.src_patches(d, all, expand)
88
89def should_apply(parm, d):
90 """Determine if we should apply the given patch"""
91 import oe.patch
92 return oe.patch.should_apply(parm, d)
93
94should_apply[vardepsexclude] = "DATE SRCDATE"
95
96python patch_do_patch() {
97 import oe.patch
98
99 patchsetmap = {
100 "patch": oe.patch.PatchTree,
101 "quilt": oe.patch.QuiltTree,
102 "git": oe.patch.GitApplyTree,
103 }
104
105 cls = patchsetmap[d.getVar('PATCHTOOL') or 'quilt']
106
107 resolvermap = {
108 "noop": oe.patch.NOOPResolver,
109 "user": oe.patch.UserResolver,
110 }
111
112 rcls = resolvermap[d.getVar('PATCHRESOLVE') or 'user']
113
114 classes = {}
115
116 s = d.getVar('S')
117
118 os.putenv('PATH', d.getVar('PATH'))
119
120 # We must use one TMPDIR per process so that the "patch" processes
121 # don't generate the same temp file name.
122
123 import tempfile
124 process_tmpdir = tempfile.mkdtemp()
125 os.environ['TMPDIR'] = process_tmpdir
126
127 for patch in src_patches(d):
128 _, _, local, _, _, parm = bb.fetch.decodeurl(patch)
129
130 if "patchdir" in parm:
131 patchdir = parm["patchdir"]
132 if not os.path.isabs(patchdir):
133 patchdir = os.path.join(s, patchdir)
134 else:
135 patchdir = s
136
137 if not patchdir in classes:
138 patchset = cls(patchdir, d)
139 resolver = rcls(patchset, oe_terminal)
140 classes[patchdir] = (patchset, resolver)
141 patchset.Clean()
142 else:
143 patchset, resolver = classes[patchdir]
144
145 bb.note("Applying patch '%s' (%s)" % (parm['patchname'], oe.path.format_display(local, d)))
146 try:
147 patchset.Import({"file":local, "strippath": parm['striplevel']}, True)
148 except Exception as exc:
149 bb.utils.remove(process_tmpdir, True)
150 bb.fatal(str(exc))
151 try:
152 resolver.Resolve()
153 except bb.BBHandledException as e:
154 bb.utils.remove(process_tmpdir, True)
155 bb.fatal(str(e))
156
157 bb.utils.remove(process_tmpdir, True)
158 del os.environ['TMPDIR']
159}
160patch_do_patch[vardepsexclude] = "PATCHRESOLVE"
161
162addtask patch after do_unpack
163do_patch[dirs] = "${WORKDIR}"
164do_patch[depends] = "${PATCHDEPENDENCY}"
165
166EXPORT_FUNCTIONS do_patch
diff --git a/meta/classes/perl-version.bbclass b/meta/classes/perl-version.bbclass
deleted file mode 100644
index 84b67b8180..0000000000
--- a/meta/classes/perl-version.bbclass
+++ /dev/null
@@ -1,66 +0,0 @@
1PERL_OWN_DIR = ""
2
3# Determine the staged version of perl from the perl configuration file
4# Assign vardepvalue, because otherwise signature is changed before and after
5# perl is built (from None to real version in config.sh).
6get_perl_version[vardepvalue] = "${PERL_OWN_DIR}"
7def get_perl_version(d):
8 import re
9 cfg = d.expand('${STAGING_LIBDIR}${PERL_OWN_DIR}/perl5/config.sh')
10 try:
11 f = open(cfg, 'r')
12 except IOError:
13 return None
14 l = f.readlines();
15 f.close();
16 r = re.compile(r"^version='(\d*\.\d*\.\d*)'")
17 for s in l:
18 m = r.match(s)
19 if m:
20 return m.group(1)
21 return None
22
23PERLVERSION := "${@get_perl_version(d)}"
24PERLVERSION[vardepvalue] = ""
25
26
27# Determine the staged arch of perl from the perl configuration file
28# Assign vardepvalue, because otherwise signature is changed before and after
29# perl is built (from None to real version in config.sh).
30def get_perl_arch(d):
31 import re
32 cfg = d.expand('${STAGING_LIBDIR}${PERL_OWN_DIR}/perl5/config.sh')
33 try:
34 f = open(cfg, 'r')
35 except IOError:
36 return None
37 l = f.readlines();
38 f.close();
39 r = re.compile("^archname='([^']*)'")
40 for s in l:
41 m = r.match(s)
42 if m:
43 return m.group(1)
44 return None
45
46PERLARCH := "${@get_perl_arch(d)}"
47PERLARCH[vardepvalue] = ""
48
49# Determine the staged arch of perl-native from the perl configuration file
50# Assign vardepvalue, because otherwise signature is changed before and after
51# perl is built (from None to real version in config.sh).
52def get_perl_hostarch(d):
53 import re
54 cfg = d.expand('${STAGING_LIBDIR_NATIVE}/perl5/config.sh')
55 try:
56 f = open(cfg, 'r')
57 except IOError:
58 return None
59 l = f.readlines();
60 f.close();
61 r = re.compile("^archname='([^']*)'")
62 for s in l:
63 m = r.match(s)
64 if m:
65 return m.group(1)
66 return None
diff --git a/meta/classes/perlnative.bbclass b/meta/classes/perlnative.bbclass
deleted file mode 100644
index cc8de8b381..0000000000
--- a/meta/classes/perlnative.bbclass
+++ /dev/null
@@ -1,3 +0,0 @@
1EXTRANATIVEPATH += "perl-native"
2DEPENDS += "perl-native"
3OECMAKE_PERLNATIVE_DIR = "${STAGING_BINDIR_NATIVE}/perl-native"
diff --git a/meta/classes/pixbufcache.bbclass b/meta/classes/pixbufcache.bbclass
deleted file mode 100644
index b07f51ed56..0000000000
--- a/meta/classes/pixbufcache.bbclass
+++ /dev/null
@@ -1,63 +0,0 @@
1#
2# This class will generate the proper postinst/postrm scriptlets for pixbuf
3# packages.
4#
5
6DEPENDS_append_class-target = " qemu-native"
7inherit qemu
8
9PIXBUF_PACKAGES ??= "${PN}"
10
11PACKAGE_WRITE_DEPS += "qemu-native gdk-pixbuf-native"
12
13pixbufcache_common() {
14if [ "x$D" != "x" ]; then
15 $INTERCEPT_DIR/postinst_intercept update_pixbuf_cache ${PKG} mlprefix=${MLPREFIX} binprefix=${MLPREFIX} libdir=${libdir} \
16 bindir=${bindir} base_libdir=${base_libdir}
17else
18
19 # Update the pixbuf loaders in case they haven't been registered yet
20 ${libdir}/gdk-pixbuf-2.0/gdk-pixbuf-query-loaders --update-cache
21
22 if [ -x ${bindir}/gtk-update-icon-cache ] && [ -d ${datadir}/icons ]; then
23 for icondir in /usr/share/icons/*; do
24 if [ -d ${icondir} ]; then
25 gtk-update-icon-cache -t -q ${icondir}
26 fi
27 done
28 fi
29fi
30}
31
32python populate_packages_append() {
33 pixbuf_pkgs = d.getVar('PIXBUF_PACKAGES').split()
34
35 for pkg in pixbuf_pkgs:
36 bb.note("adding pixbuf postinst and postrm scripts to %s" % pkg)
37 postinst = d.getVar('pkg_postinst_%s' % pkg) or d.getVar('pkg_postinst')
38 if not postinst:
39 postinst = '#!/bin/sh\n'
40 postinst += d.getVar('pixbufcache_common')
41 d.setVar('pkg_postinst_%s' % pkg, postinst)
42
43 postrm = d.getVar('pkg_postrm_%s' % pkg) or d.getVar('pkg_postrm')
44 if not postrm:
45 postrm = '#!/bin/sh\n'
46 postrm += d.getVar('pixbufcache_common')
47 d.setVar('pkg_postrm_%s' % pkg, postrm)
48}
49
50gdkpixbuf_complete() {
51GDK_PIXBUF_FATAL_LOADER=1 ${STAGING_LIBDIR_NATIVE}/gdk-pixbuf-2.0/gdk-pixbuf-query-loaders --update-cache || exit 1
52}
53
54DEPENDS_append_class-native = " gdk-pixbuf-native"
55SYSROOT_PREPROCESS_FUNCS_append_class-native = " pixbufcache_sstate_postinst"
56
57pixbufcache_sstate_postinst() {
58 mkdir -p ${SYSROOT_DESTDIR}${bindir}
59 dest=${SYSROOT_DESTDIR}${bindir}/postinst-${PN}
60 echo '#!/bin/sh' > $dest
61 echo "${gdkpixbuf_complete}" >> $dest
62 chmod 0755 $dest
63}
diff --git a/meta/classes/pkgconfig.bbclass b/meta/classes/pkgconfig.bbclass
deleted file mode 100644
index ad1f84f506..0000000000
--- a/meta/classes/pkgconfig.bbclass
+++ /dev/null
@@ -1,2 +0,0 @@
1DEPENDS_prepend = "pkgconfig-native "
2
diff --git a/meta/classes/populate_sdk.bbclass b/meta/classes/populate_sdk.bbclass
deleted file mode 100644
index f64a911b72..0000000000
--- a/meta/classes/populate_sdk.bbclass
+++ /dev/null
@@ -1,7 +0,0 @@
1# The majority of populate_sdk is located in populate_sdk_base
2# This chunk simply facilitates compatibility with SDK only recipes.
3
4inherit populate_sdk_base
5
6addtask populate_sdk after do_install before do_build
7
diff --git a/meta/classes/populate_sdk_base.bbclass b/meta/classes/populate_sdk_base.bbclass
deleted file mode 100644
index c8a7084d33..0000000000
--- a/meta/classes/populate_sdk_base.bbclass
+++ /dev/null
@@ -1,340 +0,0 @@
1inherit meta image-postinst-intercepts image-artifact-names
2
3# Wildcards specifying complementary packages to install for every package that has been explicitly
4# installed into the rootfs
5COMPLEMENTARY_GLOB[dev-pkgs] = '*-dev'
6COMPLEMENTARY_GLOB[staticdev-pkgs] = '*-staticdev'
7COMPLEMENTARY_GLOB[doc-pkgs] = '*-doc'
8COMPLEMENTARY_GLOB[dbg-pkgs] = '*-dbg'
9COMPLEMENTARY_GLOB[src-pkgs] = '*-src'
10COMPLEMENTARY_GLOB[ptest-pkgs] = '*-ptest'
11COMPLEMENTARY_GLOB[bash-completion-pkgs] = '*-bash-completion'
12
13def complementary_globs(featurevar, d):
14 all_globs = d.getVarFlags('COMPLEMENTARY_GLOB')
15 globs = []
16 features = set((d.getVar(featurevar) or '').split())
17 for name, glob in all_globs.items():
18 if name in features:
19 globs.append(glob)
20 return ' '.join(globs)
21
22SDKIMAGE_FEATURES ??= "dev-pkgs dbg-pkgs src-pkgs ${@bb.utils.contains('DISTRO_FEATURES', 'api-documentation', 'doc-pkgs', '', d)}"
23SDKIMAGE_INSTALL_COMPLEMENTARY = '${@complementary_globs("SDKIMAGE_FEATURES", d)}'
24SDKIMAGE_INSTALL_COMPLEMENTARY[vardeps] += "SDKIMAGE_FEATURES"
25
26PACKAGE_ARCHS_append_task-populate-sdk = " sdk-provides-dummy-target"
27SDK_PACKAGE_ARCHS += "sdk-provides-dummy-${SDKPKGSUFFIX}"
28
29# List of locales to install, or "all" for all of them, or unset for none.
30SDKIMAGE_LINGUAS ?= "all"
31
32inherit rootfs_${IMAGE_PKGTYPE}
33
34SDK_DIR = "${WORKDIR}/sdk"
35SDK_OUTPUT = "${SDK_DIR}/image"
36SDK_DEPLOY = "${DEPLOY_DIR}/sdk"
37
38SDKDEPLOYDIR = "${WORKDIR}/${SDKMACHINE}-deploy-${PN}-populate-sdk"
39
40B_task-populate-sdk = "${SDK_DIR}"
41
42SDKTARGETSYSROOT = "${SDKPATH}/sysroots/${REAL_MULTIMACH_TARGET_SYS}"
43
44TOOLCHAIN_HOST_TASK ?= "nativesdk-packagegroup-sdk-host packagegroup-cross-canadian-${MACHINE}"
45TOOLCHAIN_HOST_TASK_ATTEMPTONLY ?= ""
46TOOLCHAIN_TARGET_TASK ?= "${@multilib_pkg_extend(d, 'packagegroup-core-standalone-sdk-target')} target-sdk-provides-dummy"
47TOOLCHAIN_TARGET_TASK_ATTEMPTONLY ?= ""
48TOOLCHAIN_OUTPUTNAME ?= "${SDK_NAME}-toolchain-${SDK_VERSION}"
49
50# Default archived SDK's suffix
51SDK_ARCHIVE_TYPE ?= "tar.xz"
52SDK_XZ_COMPRESSION_LEVEL ?= "-9"
53SDK_XZ_OPTIONS ?= "${XZ_DEFAULTS} ${SDK_XZ_COMPRESSION_LEVEL}"
54
55# To support different sdk type according to SDK_ARCHIVE_TYPE, now support zip and tar.xz
56python () {
57 if d.getVar('SDK_ARCHIVE_TYPE') == 'zip':
58 d.setVar('SDK_ARCHIVE_DEPENDS', 'zip-native')
59 # SDK_ARCHIVE_CMD used to generate archived sdk ${TOOLCHAIN_OUTPUTNAME}.${SDK_ARCHIVE_TYPE} from input dir ${SDK_OUTPUT}/${SDKPATH} to output dir ${SDKDEPLOYDIR}
60 # recommand to cd into input dir first to avoid archive with buildpath
61 d.setVar('SDK_ARCHIVE_CMD', 'cd ${SDK_OUTPUT}/${SDKPATH}; zip -r -y ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.${SDK_ARCHIVE_TYPE} .')
62 else:
63 d.setVar('SDK_ARCHIVE_DEPENDS', 'xz-native')
64 d.setVar('SDK_ARCHIVE_CMD', 'cd ${SDK_OUTPUT}/${SDKPATH}; tar ${SDKTAROPTS} -cf - . | xz ${SDK_XZ_OPTIONS} > ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.${SDK_ARCHIVE_TYPE}')
65}
66
67SDK_RDEPENDS = "${TOOLCHAIN_TARGET_TASK} ${TOOLCHAIN_HOST_TASK}"
68SDK_DEPENDS = "virtual/fakeroot-native ${SDK_ARCHIVE_DEPENDS} cross-localedef-native nativesdk-qemuwrapper-cross ${@' '.join(["%s-qemuwrapper-cross" % m for m in d.getVar("MULTILIB_VARIANTS").split()])} qemuwrapper-cross"
69PATH_prepend = "${STAGING_DIR_HOST}${SDKPATHNATIVE}${bindir}/crossscripts:${@":".join(all_multilib_tune_values(d, 'STAGING_BINDIR_CROSS').split())}:"
70SDK_DEPENDS += "nativesdk-glibc-locale"
71
72# We want the MULTIARCH_TARGET_SYS to point to the TUNE_PKGARCH, not PACKAGE_ARCH as it
73# could be set to the MACHINE_ARCH
74REAL_MULTIMACH_TARGET_SYS = "${TUNE_PKGARCH}${TARGET_VENDOR}-${TARGET_OS}"
75
76PID = "${@os.getpid()}"
77
78EXCLUDE_FROM_WORLD = "1"
79
80SDK_PACKAGING_FUNC ?= "create_shar"
81SDK_PRE_INSTALL_COMMAND ?= ""
82SDK_POST_INSTALL_COMMAND ?= ""
83SDK_RELOCATE_AFTER_INSTALL ?= "1"
84
85SDKEXTPATH ??= "~/${@d.getVar('DISTRO')}_sdk"
86SDK_TITLE ??= "${@d.getVar('DISTRO_NAME') or d.getVar('DISTRO')} SDK"
87
88SDK_TARGET_MANIFEST = "${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.target.manifest"
89SDK_HOST_MANIFEST = "${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.host.manifest"
90SDK_EXT_TARGET_MANIFEST = "${SDK_DEPLOY}/${TOOLCHAINEXT_OUTPUTNAME}.target.manifest"
91SDK_EXT_HOST_MANIFEST = "${SDK_DEPLOY}/${TOOLCHAINEXT_OUTPUTNAME}.host.manifest"
92
93python write_target_sdk_manifest () {
94 from oe.sdk import sdk_list_installed_packages
95 from oe.utils import format_pkg_list
96 sdkmanifestdir = os.path.dirname(d.getVar("SDK_TARGET_MANIFEST"))
97 pkgs = sdk_list_installed_packages(d, True)
98 if not os.path.exists(sdkmanifestdir):
99 bb.utils.mkdirhier(sdkmanifestdir)
100 with open(d.getVar('SDK_TARGET_MANIFEST'), 'w') as output:
101 output.write(format_pkg_list(pkgs, 'ver'))
102}
103
104python write_sdk_test_data() {
105 from oe.data import export2json
106 testdata = "%s/%s.testdata.json" % (d.getVar('SDKDEPLOYDIR'), d.getVar('TOOLCHAIN_OUTPUTNAME'))
107 bb.utils.mkdirhier(os.path.dirname(testdata))
108 export2json(d, testdata)
109}
110
111python write_host_sdk_manifest () {
112 from oe.sdk import sdk_list_installed_packages
113 from oe.utils import format_pkg_list
114 sdkmanifestdir = os.path.dirname(d.getVar("SDK_HOST_MANIFEST"))
115 pkgs = sdk_list_installed_packages(d, False)
116 if not os.path.exists(sdkmanifestdir):
117 bb.utils.mkdirhier(sdkmanifestdir)
118 with open(d.getVar('SDK_HOST_MANIFEST'), 'w') as output:
119 output.write(format_pkg_list(pkgs, 'ver'))
120}
121
122POPULATE_SDK_POST_TARGET_COMMAND_append = " write_sdk_test_data ; "
123POPULATE_SDK_POST_TARGET_COMMAND_append_task-populate-sdk = " write_target_sdk_manifest ; "
124POPULATE_SDK_POST_HOST_COMMAND_append_task-populate-sdk = " write_host_sdk_manifest; "
125SDK_PACKAGING_COMMAND = "${@'${SDK_PACKAGING_FUNC};' if '${SDK_PACKAGING_FUNC}' else ''}"
126SDK_POSTPROCESS_COMMAND = " create_sdk_files; check_sdk_sysroots; archive_sdk; ${SDK_PACKAGING_COMMAND} "
127
128def populate_sdk_common(d):
129 from oe.sdk import populate_sdk
130 from oe.manifest import create_manifest, Manifest
131
132 # Handle package exclusions
133 excl_pkgs = (d.getVar("PACKAGE_EXCLUDE") or "").split()
134 inst_pkgs = (d.getVar("PACKAGE_INSTALL") or "").split()
135 inst_attempt_pkgs = (d.getVar("PACKAGE_INSTALL_ATTEMPTONLY") or "").split()
136
137 d.setVar('PACKAGE_INSTALL_ORIG', ' '.join(inst_pkgs))
138 d.setVar('PACKAGE_INSTALL_ATTEMPTONLY', ' '.join(inst_attempt_pkgs))
139
140 for pkg in excl_pkgs:
141 if pkg in inst_pkgs:
142 bb.warn("Package %s, set to be excluded, is in %s PACKAGE_INSTALL (%s). It will be removed from the list." % (pkg, d.getVar('PN'), inst_pkgs))
143 inst_pkgs.remove(pkg)
144
145 if pkg in inst_attempt_pkgs:
146 bb.warn("Package %s, set to be excluded, is in %s PACKAGE_INSTALL_ATTEMPTONLY (%s). It will be removed from the list." % (pkg, d.getVar('PN'), inst_pkgs))
147 inst_attempt_pkgs.remove(pkg)
148
149 d.setVar("PACKAGE_INSTALL", ' '.join(inst_pkgs))
150 d.setVar("PACKAGE_INSTALL_ATTEMPTONLY", ' '.join(inst_attempt_pkgs))
151
152 pn = d.getVar('PN')
153 runtime_mapping_rename("TOOLCHAIN_TARGET_TASK", pn, d)
154 runtime_mapping_rename("TOOLCHAIN_TARGET_TASK_ATTEMPTONLY", pn, d)
155
156 ld = bb.data.createCopy(d)
157 ld.setVar("PKGDATA_DIR", "${STAGING_DIR}/${SDK_ARCH}-${SDKPKGSUFFIX}${SDK_VENDOR}-${SDK_OS}/pkgdata")
158 runtime_mapping_rename("TOOLCHAIN_HOST_TASK", pn, ld)
159 runtime_mapping_rename("TOOLCHAIN_HOST_TASK_ATTEMPTONLY", pn, ld)
160 d.setVar("TOOLCHAIN_HOST_TASK", ld.getVar("TOOLCHAIN_HOST_TASK"))
161 d.setVar("TOOLCHAIN_HOST_TASK_ATTEMPTONLY", ld.getVar("TOOLCHAIN_HOST_TASK_ATTEMPTONLY"))
162
163 # create target/host SDK manifests
164 create_manifest(d, manifest_dir=d.getVar('SDK_DIR'),
165 manifest_type=Manifest.MANIFEST_TYPE_SDK_HOST)
166 create_manifest(d, manifest_dir=d.getVar('SDK_DIR'),
167 manifest_type=Manifest.MANIFEST_TYPE_SDK_TARGET)
168
169 populate_sdk(d)
170
171fakeroot python do_populate_sdk() {
172 populate_sdk_common(d)
173}
174SSTATETASKS += "do_populate_sdk"
175SSTATE_SKIP_CREATION_task-populate-sdk = '1'
176do_populate_sdk[cleandirs] = "${SDKDEPLOYDIR}"
177do_populate_sdk[sstate-inputdirs] = "${SDKDEPLOYDIR}"
178do_populate_sdk[sstate-outputdirs] = "${SDK_DEPLOY}"
179do_populate_sdk[stamp-extra-info] = "${MACHINE_ARCH}${SDKMACHINE}"
180
181PSEUDO_IGNORE_PATHS .= ",${SDKDEPLOYDIR},${WORKDIR}/oe-sdk-repo,${WORKDIR}/sstate-build-populate_sdk"
182
183fakeroot create_sdk_files() {
184 cp ${COREBASE}/scripts/relocate_sdk.py ${SDK_OUTPUT}/${SDKPATH}/
185
186 # Replace the ##DEFAULT_INSTALL_DIR## with the correct pattern.
187 # Escape special characters like '+' and '.' in the SDKPATH
188 escaped_sdkpath=$(echo ${SDKPATH} |sed -e "s:[\+\.]:\\\\\\\\\0:g")
189 sed -i -e "s:##DEFAULT_INSTALL_DIR##:$escaped_sdkpath:" ${SDK_OUTPUT}/${SDKPATH}/relocate_sdk.py
190
191 mkdir -p ${SDK_OUTPUT}/${SDKPATHNATIVE}${sysconfdir}/
192 echo '${SDKPATHNATIVE}${libdir_nativesdk}
193${SDKPATHNATIVE}${base_libdir_nativesdk}
194include /etc/ld.so.conf' > ${SDK_OUTPUT}/${SDKPATHNATIVE}${sysconfdir}/ld.so.conf
195}
196
197python check_sdk_sysroots() {
198 # Fails build if there are broken or dangling symlinks in SDK sysroots
199
200 if d.getVar('CHECK_SDK_SYSROOTS') != '1':
201 # disabled, bail out
202 return
203
204 def norm_path(path):
205 return os.path.abspath(path)
206
207 # Get scan root
208 SCAN_ROOT = norm_path("%s/%s/sysroots/" % (d.getVar('SDK_OUTPUT'),
209 d.getVar('SDKPATH')))
210
211 bb.note('Checking SDK sysroots at ' + SCAN_ROOT)
212
213 def check_symlink(linkPath):
214 if not os.path.islink(linkPath):
215 return
216
217 linkDirPath = os.path.dirname(linkPath)
218
219 targetPath = os.readlink(linkPath)
220 if not os.path.isabs(targetPath):
221 targetPath = os.path.join(linkDirPath, targetPath)
222 targetPath = norm_path(targetPath)
223
224 if SCAN_ROOT != os.path.commonprefix( [SCAN_ROOT, targetPath] ):
225 bb.error("Escaping symlink {0!s} --> {1!s}".format(linkPath, targetPath))
226 return
227
228 if not os.path.exists(targetPath):
229 bb.error("Broken symlink {0!s} --> {1!s}".format(linkPath, targetPath))
230 return
231
232 if os.path.isdir(targetPath):
233 dir_walk(targetPath)
234
235 def walk_error_handler(e):
236 bb.error(str(e))
237
238 def dir_walk(rootDir):
239 for dirPath,subDirEntries,fileEntries in os.walk(rootDir, followlinks=False, onerror=walk_error_handler):
240 entries = subDirEntries + fileEntries
241 for e in entries:
242 ePath = os.path.join(dirPath, e)
243 check_symlink(ePath)
244
245 # start
246 dir_walk(SCAN_ROOT)
247}
248
249SDKTAROPTS = "--owner=root --group=root"
250
251fakeroot archive_sdk() {
252 # Package it up
253 mkdir -p ${SDKDEPLOYDIR}
254 ${SDK_ARCHIVE_CMD}
255}
256
257TOOLCHAIN_SHAR_EXT_TMPL ?= "${COREBASE}/meta/files/toolchain-shar-extract.sh"
258TOOLCHAIN_SHAR_REL_TMPL ?= "${COREBASE}/meta/files/toolchain-shar-relocate.sh"
259
260fakeroot create_shar() {
261 # copy in the template shar extractor script
262 cp ${TOOLCHAIN_SHAR_EXT_TMPL} ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.sh
263
264 rm -f ${T}/pre_install_command ${T}/post_install_command
265
266 if [ "${SDK_RELOCATE_AFTER_INSTALL}" = "1" ] ; then
267 cp ${TOOLCHAIN_SHAR_REL_TMPL} ${T}/post_install_command
268 fi
269 cat << "EOF" >> ${T}/pre_install_command
270${SDK_PRE_INSTALL_COMMAND}
271EOF
272
273 cat << "EOF" >> ${T}/post_install_command
274${SDK_POST_INSTALL_COMMAND}
275EOF
276 sed -i -e '/@SDK_PRE_INSTALL_COMMAND@/r ${T}/pre_install_command' \
277 -e '/@SDK_POST_INSTALL_COMMAND@/r ${T}/post_install_command' \
278 ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.sh
279
280 # substitute variables
281 sed -i -e 's#@SDK_ARCH@#${SDK_ARCH}#g' \
282 -e 's#@SDKPATH@#${SDKPATH}#g' \
283 -e 's#@SDKEXTPATH@#${SDKEXTPATH}#g' \
284 -e 's#@OLDEST_KERNEL@#${SDK_OLDEST_KERNEL}#g' \
285 -e 's#@REAL_MULTIMACH_TARGET_SYS@#${REAL_MULTIMACH_TARGET_SYS}#g' \
286 -e 's#@SDK_TITLE@#${@d.getVar("SDK_TITLE").replace('&', '\\&')}#g' \
287 -e 's#@SDK_VERSION@#${SDK_VERSION}#g' \
288 -e '/@SDK_PRE_INSTALL_COMMAND@/d' \
289 -e '/@SDK_POST_INSTALL_COMMAND@/d' \
290 -e 's#@SDK_GCC_VER@#${@oe.utils.host_gcc_version(d, taskcontextonly=True)}#g' \
291 -e 's#@SDK_ARCHIVE_TYPE@#${SDK_ARCHIVE_TYPE}#g' \
292 ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.sh
293
294 # add execution permission
295 chmod +x ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.sh
296
297 # append the SDK tarball
298 cat ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.${SDK_ARCHIVE_TYPE} >> ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.sh
299
300 # delete the old tarball, we don't need it anymore
301 rm ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.${SDK_ARCHIVE_TYPE}
302}
303
304populate_sdk_log_check() {
305 for target in $*
306 do
307 lf_path="`dirname ${BB_LOGFILE}`/log.do_$target.${PID}"
308
309 echo "log_check: Using $lf_path as logfile"
310
311 if [ -e "$lf_path" ]; then
312 ${IMAGE_PKGTYPE}_log_check $target $lf_path
313 else
314 echo "Cannot find logfile [$lf_path]"
315 fi
316 echo "Logfile is clean"
317 done
318}
319
320def sdk_command_variables(d):
321 return ['OPKG_PREPROCESS_COMMANDS','OPKG_POSTPROCESS_COMMANDS','POPULATE_SDK_POST_HOST_COMMAND','POPULATE_SDK_PRE_TARGET_COMMAND','POPULATE_SDK_POST_TARGET_COMMAND','SDK_POSTPROCESS_COMMAND','RPM_PREPROCESS_COMMANDS','RPM_POSTPROCESS_COMMANDS']
322
323def sdk_variables(d):
324 variables = ['BUILD_IMAGES_FROM_FEEDS','SDK_OS','SDK_OUTPUT','SDKPATHNATIVE','SDKTARGETSYSROOT','SDK_DIR','SDK_VENDOR','SDKIMAGE_INSTALL_COMPLEMENTARY','SDK_PACKAGE_ARCHS','SDK_OUTPUT',
325 'SDKTARGETSYSROOT','MULTILIB_VARIANTS','MULTILIBS','ALL_MULTILIB_PACKAGE_ARCHS','MULTILIB_GLOBAL_VARIANTS','BAD_RECOMMENDATIONS','NO_RECOMMENDATIONS','PACKAGE_ARCHS',
326 'PACKAGE_CLASSES','TARGET_VENDOR','TARGET_VENDOR','TARGET_ARCH','TARGET_OS','BBEXTENDVARIANT','FEED_DEPLOYDIR_BASE_URI', 'PACKAGE_EXCLUDE_COMPLEMENTARY', 'IMAGE_INSTALL_DEBUGFS']
327 variables.extend(sdk_command_variables(d))
328 return " ".join(variables)
329
330do_populate_sdk[vardeps] += "${@sdk_variables(d)}"
331
332do_populate_sdk[file-checksums] += "${TOOLCHAIN_SHAR_REL_TMPL}:True \
333 ${TOOLCHAIN_SHAR_EXT_TMPL}:True"
334
335do_populate_sdk[dirs] = "${PKGDATA_DIR} ${TOPDIR}"
336do_populate_sdk[depends] += "${@' '.join([x + ':do_populate_sysroot' for x in d.getVar('SDK_DEPENDS').split()])} ${@d.getVarFlag('do_rootfs', 'depends', False)}"
337do_populate_sdk[rdepends] = "${@' '.join([x + ':do_package_write_${IMAGE_PKGTYPE} ' + x + ':do_packagedata' for x in d.getVar('SDK_RDEPENDS').split()])}"
338do_populate_sdk[recrdeptask] += "do_packagedata do_package_write_rpm do_package_write_ipk do_package_write_deb"
339do_populate_sdk[file-checksums] += "${POSTINST_INTERCEPT_CHECKSUMS}"
340addtask populate_sdk
diff --git a/meta/classes/populate_sdk_ext.bbclass b/meta/classes/populate_sdk_ext.bbclass
deleted file mode 100644
index e6bf27cf38..0000000000
--- a/meta/classes/populate_sdk_ext.bbclass
+++ /dev/null
@@ -1,796 +0,0 @@
1# Extensible SDK
2
3inherit populate_sdk_base
4
5# NOTE: normally you cannot use task overrides for this kind of thing - this
6# only works because of get_sdk_ext_rdepends()
7
8TOOLCHAIN_HOST_TASK_task-populate-sdk-ext = " \
9 meta-environment-extsdk-${MACHINE} \
10 "
11
12TOOLCHAIN_TARGET_TASK_task-populate-sdk-ext = ""
13
14SDK_RELOCATE_AFTER_INSTALL_task-populate-sdk-ext = "0"
15
16SDK_EXT = ""
17SDK_EXT_task-populate-sdk-ext = "-ext"
18
19# Options are full or minimal
20SDK_EXT_TYPE ?= "full"
21SDK_INCLUDE_PKGDATA ?= "0"
22SDK_INCLUDE_TOOLCHAIN ?= "${@'1' if d.getVar('SDK_EXT_TYPE') == 'full' else '0'}"
23SDK_INCLUDE_NATIVESDK ?= "0"
24SDK_INCLUDE_BUILDTOOLS ?= '1'
25
26SDK_RECRDEP_TASKS ?= ""
27SDK_CUSTOM_TEMPLATECONF ?= "0"
28
29SDK_LOCAL_CONF_WHITELIST ?= ""
30SDK_LOCAL_CONF_BLACKLIST ?= "CONF_VERSION \
31 BB_NUMBER_THREADS \
32 BB_NUMBER_PARSE_THREADS \
33 PARALLEL_MAKE \
34 PRSERV_HOST \
35 SSTATE_MIRRORS \
36 DL_DIR \
37 SSTATE_DIR \
38 TMPDIR \
39 BB_SERVER_TIMEOUT \
40 "
41SDK_INHERIT_BLACKLIST ?= "buildhistory icecc"
42SDK_UPDATE_URL ?= ""
43
44SDK_TARGETS ?= "${PN}"
45
46def get_sdk_install_targets(d, images_only=False):
47 sdk_install_targets = ''
48 if images_only or d.getVar('SDK_EXT_TYPE') != 'minimal':
49 sdk_install_targets = d.getVar('SDK_TARGETS')
50
51 depd = d.getVar('BB_TASKDEPDATA', False)
52 tasklist = bb.build.tasksbetween('do_image_complete', 'do_build', d)
53 tasklist.remove('do_build')
54 for v in depd.values():
55 if v[1] in tasklist:
56 if v[0] not in sdk_install_targets:
57 sdk_install_targets += ' {}'.format(v[0])
58
59 if not images_only:
60 if d.getVar('SDK_INCLUDE_PKGDATA') == '1':
61 sdk_install_targets += ' meta-world-pkgdata:do_allpackagedata'
62 if d.getVar('SDK_INCLUDE_TOOLCHAIN') == '1':
63 sdk_install_targets += ' meta-extsdk-toolchain:do_populate_sysroot'
64
65 return sdk_install_targets
66
67get_sdk_install_targets[vardepsexclude] = "BB_TASKDEPDATA"
68
69OE_INIT_ENV_SCRIPT ?= "oe-init-build-env"
70
71# The files from COREBASE that you want preserved in the COREBASE copied
72# into the sdk. This allows someone to have their own setup scripts in
73# COREBASE be preserved as well as untracked files.
74COREBASE_FILES ?= " \
75 oe-init-build-env \
76 scripts \
77 LICENSE \
78 .templateconf \
79"
80
81SDK_DIR_task-populate-sdk-ext = "${WORKDIR}/sdk-ext"
82B_task-populate-sdk-ext = "${SDK_DIR}"
83TOOLCHAINEXT_OUTPUTNAME ?= "${SDK_NAME}-toolchain-ext-${SDK_VERSION}"
84TOOLCHAIN_OUTPUTNAME_task-populate-sdk-ext = "${TOOLCHAINEXT_OUTPUTNAME}"
85
86SDK_EXT_TARGET_MANIFEST = "${SDK_DEPLOY}/${TOOLCHAINEXT_OUTPUTNAME}.target.manifest"
87SDK_EXT_HOST_MANIFEST = "${SDK_DEPLOY}/${TOOLCHAINEXT_OUTPUTNAME}.host.manifest"
88
89python write_target_sdk_ext_manifest () {
90 from oe.sdk import get_extra_sdkinfo
91 sstate_dir = d.expand('${SDK_OUTPUT}/${SDKPATH}/sstate-cache')
92 extra_info = get_extra_sdkinfo(sstate_dir)
93
94 target = d.getVar('TARGET_SYS')
95 target_multimach = d.getVar('MULTIMACH_TARGET_SYS')
96 real_target_multimach = d.getVar('REAL_MULTIMACH_TARGET_SYS')
97
98 pkgs = {}
99 os.makedirs(os.path.dirname(d.getVar('SDK_EXT_TARGET_MANIFEST')), exist_ok=True)
100 with open(d.getVar('SDK_EXT_TARGET_MANIFEST'), 'w') as f:
101 for fn in extra_info['filesizes']:
102 info = fn.split(':')
103 if info[2] in (target, target_multimach, real_target_multimach) \
104 or info[5] == 'allarch':
105 if not info[1] in pkgs:
106 f.write("%s %s %s\n" % (info[1], info[2], info[3]))
107 pkgs[info[1]] = {}
108}
109python write_host_sdk_ext_manifest () {
110 from oe.sdk import get_extra_sdkinfo
111 sstate_dir = d.expand('${SDK_OUTPUT}/${SDKPATH}/sstate-cache')
112 extra_info = get_extra_sdkinfo(sstate_dir)
113 host = d.getVar('BUILD_SYS')
114 with open(d.getVar('SDK_EXT_HOST_MANIFEST'), 'w') as f:
115 for fn in extra_info['filesizes']:
116 info = fn.split(':')
117 if info[2] == host:
118 f.write("%s %s %s\n" % (info[1], info[2], info[3]))
119}
120
121SDK_POSTPROCESS_COMMAND_append_task-populate-sdk-ext = "write_target_sdk_ext_manifest; write_host_sdk_ext_manifest; "
122
123SDK_TITLE_task-populate-sdk-ext = "${@d.getVar('DISTRO_NAME') or d.getVar('DISTRO')} Extensible SDK"
124
125def clean_esdk_builddir(d, sdkbasepath):
126 """Clean up traces of the fake build for create_filtered_tasklist()"""
127 import shutil
128 cleanpaths = ['cache', 'tmp']
129 for pth in cleanpaths:
130 fullpth = os.path.join(sdkbasepath, pth)
131 if os.path.isdir(fullpth):
132 shutil.rmtree(fullpth)
133 elif os.path.isfile(fullpth):
134 os.remove(fullpth)
135
136def create_filtered_tasklist(d, sdkbasepath, tasklistfile, conf_initpath):
137 """
138 Create a filtered list of tasks. Also double-checks that the build system
139 within the SDK basically works and required sstate artifacts are available.
140 """
141 import tempfile
142 import shutil
143 import oe.copy_buildsystem
144
145 # Create a temporary build directory that we can pass to the env setup script
146 shutil.copyfile(sdkbasepath + '/conf/local.conf', sdkbasepath + '/conf/local.conf.bak')
147 try:
148 with open(sdkbasepath + '/conf/local.conf', 'a') as f:
149 # Force the use of sstate from the build system
150 f.write('\nSSTATE_DIR_forcevariable = "%s"\n' % d.getVar('SSTATE_DIR'))
151 f.write('SSTATE_MIRRORS_forcevariable = "file://universal/(.*) file://universal-4.9/\\1 file://universal-4.9/(.*) file://universal-4.8/\\1"\n')
152 # Ensure TMPDIR is the default so that clean_esdk_builddir() can delete it
153 f.write('TMPDIR_forcevariable = "${TOPDIR}/tmp"\n')
154 f.write('TCLIBCAPPEND_forcevariable = ""\n')
155 # Drop uninative if the build isn't using it (or else NATIVELSBSTRING will
156 # be different and we won't be able to find our native sstate)
157 if not bb.data.inherits_class('uninative', d):
158 f.write('INHERIT_remove = "uninative"\n')
159
160 # Unfortunately the default SDKPATH (or even a custom value) may contain characters that bitbake
161 # will not allow in its COREBASE path, so we need to rename the directory temporarily
162 temp_sdkbasepath = d.getVar('SDK_OUTPUT') + '/tmp-renamed-sdk'
163 # Delete any existing temp dir
164 try:
165 shutil.rmtree(temp_sdkbasepath)
166 except FileNotFoundError:
167 pass
168 os.rename(sdkbasepath, temp_sdkbasepath)
169 cmdprefix = '. %s .; ' % conf_initpath
170 logfile = d.getVar('WORKDIR') + '/tasklist_bb_log.txt'
171 try:
172 oe.copy_buildsystem.check_sstate_task_list(d, get_sdk_install_targets(d), tasklistfile, cmdprefix=cmdprefix, cwd=temp_sdkbasepath, logfile=logfile)
173 except bb.process.ExecutionError as e:
174 msg = 'Failed to generate filtered task list for extensible SDK:\n%s' % e.stdout.rstrip()
175 if 'attempted to execute unexpectedly and should have been setscened' in e.stdout:
176 msg += '\n----------\n\nNOTE: "attempted to execute unexpectedly and should have been setscened" errors indicate this may be caused by missing sstate artifacts that were likely produced in earlier builds, but have been subsequently deleted for some reason.\n'
177 bb.fatal(msg)
178 os.rename(temp_sdkbasepath, sdkbasepath)
179 # Clean out residue of running bitbake, which check_sstate_task_list()
180 # will effectively do
181 clean_esdk_builddir(d, sdkbasepath)
182 finally:
183 localconf = sdkbasepath + '/conf/local.conf'
184 if os.path.exists(localconf + '.bak'):
185 os.replace(localconf + '.bak', localconf)
186
187python copy_buildsystem () {
188 import re
189 import shutil
190 import glob
191 import oe.copy_buildsystem
192
193 oe_init_env_script = d.getVar('OE_INIT_ENV_SCRIPT')
194
195 conf_bbpath = ''
196 conf_initpath = ''
197 core_meta_subdir = ''
198
199 # Copy in all metadata layers + bitbake (as repositories)
200 buildsystem = oe.copy_buildsystem.BuildSystem('extensible SDK', d)
201 baseoutpath = d.getVar('SDK_OUTPUT') + '/' + d.getVar('SDKPATH')
202
203 #check if custome templateconf path is set
204 use_custom_templateconf = d.getVar('SDK_CUSTOM_TEMPLATECONF')
205
206 # Determine if we're building a derivative extensible SDK (from devtool build-sdk)
207 derivative = (d.getVar('SDK_DERIVATIVE') or '') == '1'
208 if derivative:
209 workspace_name = 'orig-workspace'
210 else:
211 workspace_name = None
212
213 corebase, sdkbblayers = buildsystem.copy_bitbake_and_layers(baseoutpath + '/layers', workspace_name)
214 conf_bbpath = os.path.join('layers', corebase, 'bitbake')
215
216 for path in os.listdir(baseoutpath + '/layers'):
217 relpath = os.path.join('layers', path, oe_init_env_script)
218 if os.path.exists(os.path.join(baseoutpath, relpath)):
219 conf_initpath = relpath
220
221 relpath = os.path.join('layers', path, 'scripts', 'devtool')
222 if os.path.exists(os.path.join(baseoutpath, relpath)):
223 scriptrelpath = os.path.dirname(relpath)
224
225 relpath = os.path.join('layers', path, 'meta')
226 if os.path.exists(os.path.join(baseoutpath, relpath, 'lib', 'oe')):
227 core_meta_subdir = relpath
228
229 d.setVar('oe_init_build_env_path', conf_initpath)
230 d.setVar('scriptrelpath', scriptrelpath)
231
232 # Write out config file for devtool
233 import configparser
234 config = configparser.SafeConfigParser()
235 config.add_section('General')
236 config.set('General', 'bitbake_subdir', conf_bbpath)
237 config.set('General', 'init_path', conf_initpath)
238 config.set('General', 'core_meta_subdir', core_meta_subdir)
239 config.add_section('SDK')
240 config.set('SDK', 'sdk_targets', d.getVar('SDK_TARGETS'))
241 updateurl = d.getVar('SDK_UPDATE_URL')
242 if updateurl:
243 config.set('SDK', 'updateserver', updateurl)
244 bb.utils.mkdirhier(os.path.join(baseoutpath, 'conf'))
245 with open(os.path.join(baseoutpath, 'conf', 'devtool.conf'), 'w') as f:
246 config.write(f)
247
248 unlockedsigs = os.path.join(baseoutpath, 'conf', 'unlocked-sigs.inc')
249 with open(unlockedsigs, 'w') as f:
250 pass
251
252 # Create a layer for new recipes / appends
253 bbpath = d.getVar('BBPATH')
254 bb.process.run(['devtool', '--bbpath', bbpath, '--basepath', baseoutpath, 'create-workspace', '--create-only', os.path.join(baseoutpath, 'workspace')])
255
256 # Create bblayers.conf
257 bb.utils.mkdirhier(baseoutpath + '/conf')
258 with open(baseoutpath + '/conf/bblayers.conf', 'w') as f:
259 f.write('# WARNING: this configuration has been automatically generated and in\n')
260 f.write('# most cases should not be edited. If you need more flexibility than\n')
261 f.write('# this configuration provides, it is strongly suggested that you set\n')
262 f.write('# up a proper instance of the full build system and use that instead.\n\n')
263
264 # LCONF_VERSION may not be set, for example when using meta-poky
265 # so don't error if it isn't found
266 lconf_version = d.getVar('LCONF_VERSION', False)
267 if lconf_version is not None:
268 f.write('LCONF_VERSION = "%s"\n\n' % lconf_version)
269
270 f.write('BBPATH = "$' + '{TOPDIR}"\n')
271 f.write('SDKBASEMETAPATH = "$' + '{TOPDIR}"\n')
272 f.write('BBLAYERS := " \\\n')
273 for layerrelpath in sdkbblayers:
274 f.write(' $' + '{SDKBASEMETAPATH}/layers/%s \\\n' % layerrelpath)
275 f.write(' $' + '{SDKBASEMETAPATH}/workspace \\\n')
276 f.write(' "\n')
277
278 # Copy uninative tarball
279 # For now this is where uninative.bbclass expects the tarball
280 if bb.data.inherits_class('uninative', d):
281 uninative_file = d.expand('${UNINATIVE_DLDIR}/' + d.getVarFlag("UNINATIVE_CHECKSUM", d.getVar("BUILD_ARCH")) + '/${UNINATIVE_TARBALL}')
282 uninative_checksum = bb.utils.sha256_file(uninative_file)
283 uninative_outdir = '%s/downloads/uninative/%s' % (baseoutpath, uninative_checksum)
284 bb.utils.mkdirhier(uninative_outdir)
285 shutil.copy(uninative_file, uninative_outdir)
286
287 env_whitelist = (d.getVar('BB_ENV_EXTRAWHITE') or '').split()
288 env_whitelist_values = {}
289
290 # Create local.conf
291 builddir = d.getVar('TOPDIR')
292 if derivative and os.path.exists(builddir + '/conf/site.conf'):
293 shutil.copyfile(builddir + '/conf/site.conf', baseoutpath + '/conf/site.conf')
294 if derivative and os.path.exists(builddir + '/conf/auto.conf'):
295 shutil.copyfile(builddir + '/conf/auto.conf', baseoutpath + '/conf/auto.conf')
296 if derivative:
297 shutil.copyfile(builddir + '/conf/local.conf', baseoutpath + '/conf/local.conf')
298 else:
299 local_conf_whitelist = (d.getVar('SDK_LOCAL_CONF_WHITELIST') or '').split()
300 local_conf_blacklist = (d.getVar('SDK_LOCAL_CONF_BLACKLIST') or '').split()
301 def handle_var(varname, origvalue, op, newlines):
302 if varname in local_conf_blacklist or (origvalue.strip().startswith('/') and not varname in local_conf_whitelist):
303 newlines.append('# Removed original setting of %s\n' % varname)
304 return None, op, 0, True
305 else:
306 if varname in env_whitelist:
307 env_whitelist_values[varname] = origvalue
308 return origvalue, op, 0, True
309 varlist = ['[^#=+ ]*']
310 oldlines = []
311 if os.path.exists(builddir + '/conf/site.conf'):
312 with open(builddir + '/conf/site.conf', 'r') as f:
313 oldlines += f.readlines()
314 if os.path.exists(builddir + '/conf/auto.conf'):
315 with open(builddir + '/conf/auto.conf', 'r') as f:
316 oldlines += f.readlines()
317 if os.path.exists(builddir + '/conf/local.conf'):
318 with open(builddir + '/conf/local.conf', 'r') as f:
319 oldlines += f.readlines()
320 (updated, newlines) = bb.utils.edit_metadata(oldlines, varlist, handle_var)
321
322 with open(baseoutpath + '/conf/local.conf', 'w') as f:
323 f.write('# WARNING: this configuration has been automatically generated and in\n')
324 f.write('# most cases should not be edited. If you need more flexibility than\n')
325 f.write('# this configuration provides, it is strongly suggested that you set\n')
326 f.write('# up a proper instance of the full build system and use that instead.\n\n')
327 for line in newlines:
328 if line.strip() and not line.startswith('#'):
329 f.write(line)
330 # Write a newline just in case there's none at the end of the original
331 f.write('\n')
332
333 f.write('TMPDIR = "${TOPDIR}/tmp"\n')
334 f.write('TCLIBCAPPEND = ""\n')
335 f.write('DL_DIR = "${TOPDIR}/downloads"\n')
336
337 if bb.data.inherits_class('uninative', d):
338 f.write('INHERIT += "%s"\n' % 'uninative')
339 f.write('UNINATIVE_CHECKSUM[%s] = "%s"\n\n' % (d.getVar('BUILD_ARCH'), uninative_checksum))
340 f.write('CONF_VERSION = "%s"\n\n' % d.getVar('CONF_VERSION', False))
341
342 # Some classes are not suitable for SDK, remove them from INHERIT
343 f.write('INHERIT_remove = "%s"\n' % d.getVar('SDK_INHERIT_BLACKLIST', False))
344
345 # Bypass the default connectivity check if any
346 f.write('CONNECTIVITY_CHECK_URIS = ""\n\n')
347
348 # This warning will come out if reverse dependencies for a task
349 # don't have sstate as well as the task itself. We already know
350 # this will be the case for the extensible sdk, so turn off the
351 # warning.
352 f.write('SIGGEN_LOCKEDSIGS_SSTATE_EXISTS_CHECK = "none"\n\n')
353
354 # Warn if the sigs in the locked-signature file don't match
355 # the sig computed from the metadata.
356 f.write('SIGGEN_LOCKEDSIGS_TASKSIG_CHECK = "warn"\n\n')
357
358 # We want to be able to set this without a full reparse
359 f.write('BB_HASHCONFIG_WHITELIST_append = " SIGGEN_UNLOCKED_RECIPES"\n\n')
360
361 # Set up whitelist for run on install
362 f.write('BB_SETSCENE_ENFORCE_WHITELIST = "%:* *:do_shared_workdir *:do_rm_work wic-tools:* *:do_addto_recipe_sysroot"\n\n')
363
364 # Hide the config information from bitbake output (since it's fixed within the SDK)
365 f.write('BUILDCFG_HEADER = ""\n\n')
366
367 f.write('# Provide a flag to indicate we are in the EXT_SDK Context\n')
368 f.write('WITHIN_EXT_SDK = "1"\n\n')
369
370 # Map gcc-dependent uninative sstate cache for installer usage
371 f.write('SSTATE_MIRRORS += " file://universal/(.*) file://universal-4.9/\\1 file://universal-4.9/(.*) file://universal-4.8/\\1"\n\n')
372
373 # Allow additional config through sdk-extra.conf
374 fn = bb.cookerdata.findConfigFile('sdk-extra.conf', d)
375 if fn:
376 with open(fn, 'r') as xf:
377 for line in xf:
378 f.write(line)
379
380 # If you define a sdk_extraconf() function then it can contain additional config
381 # (Though this is awkward; sdk-extra.conf should probably be used instead)
382 extraconf = (d.getVar('sdk_extraconf') or '').strip()
383 if extraconf:
384 # Strip off any leading / trailing spaces
385 for line in extraconf.splitlines():
386 f.write(line.strip() + '\n')
387
388 f.write('require conf/locked-sigs.inc\n')
389 f.write('require conf/unlocked-sigs.inc\n')
390
391 if os.path.exists(builddir + '/cache/bb_unihashes.dat'):
392 bb.parse.siggen.save_unitaskhashes()
393 bb.utils.mkdirhier(os.path.join(baseoutpath, 'cache'))
394 shutil.copyfile(builddir + '/cache/bb_unihashes.dat', baseoutpath + '/cache/bb_unihashes.dat')
395
396 # Use templateconf.cfg file from builddir if exists
397 if os.path.exists(builddir + '/conf/templateconf.cfg') and use_custom_templateconf == '1':
398 shutil.copyfile(builddir + '/conf/templateconf.cfg', baseoutpath + '/conf/templateconf.cfg')
399 else:
400 # Write a templateconf.cfg
401 with open(baseoutpath + '/conf/templateconf.cfg', 'w') as f:
402 f.write('meta/conf\n')
403
404 # Ensure any variables set from the external environment (by way of
405 # BB_ENV_EXTRAWHITE) are set in the SDK's configuration
406 extralines = []
407 for name, value in env_whitelist_values.items():
408 actualvalue = d.getVar(name) or ''
409 if value != actualvalue:
410 extralines.append('%s = "%s"\n' % (name, actualvalue))
411 if extralines:
412 with open(baseoutpath + '/conf/local.conf', 'a') as f:
413 f.write('\n')
414 f.write('# Extra settings from environment:\n')
415 for line in extralines:
416 f.write(line)
417 f.write('\n')
418
419 # Filter the locked signatures file to just the sstate tasks we are interested in
420 excluded_targets = get_sdk_install_targets(d, images_only=True)
421 sigfile = d.getVar('WORKDIR') + '/locked-sigs.inc'
422 lockedsigs_pruned = baseoutpath + '/conf/locked-sigs.inc'
423 #nativesdk-only sigfile to merge into locked-sigs.inc
424 sdk_include_nativesdk = (d.getVar("SDK_INCLUDE_NATIVESDK") == '1')
425 nativesigfile = d.getVar('WORKDIR') + '/locked-sigs_nativesdk.inc'
426 nativesigfile_pruned = d.getVar('WORKDIR') + '/locked-sigs_nativesdk_pruned.inc'
427
428 if sdk_include_nativesdk:
429 oe.copy_buildsystem.prune_lockedsigs([],
430 excluded_targets.split(),
431 nativesigfile,
432 True,
433 nativesigfile_pruned)
434
435 oe.copy_buildsystem.merge_lockedsigs([],
436 sigfile,
437 nativesigfile_pruned,
438 sigfile)
439
440 oe.copy_buildsystem.prune_lockedsigs([],
441 excluded_targets.split(),
442 sigfile,
443 False,
444 lockedsigs_pruned)
445
446 sstate_out = baseoutpath + '/sstate-cache'
447 bb.utils.remove(sstate_out, True)
448
449 # uninative.bbclass sets NATIVELSBSTRING to 'universal%s' % oe.utils.host_gcc_version(d)
450 fixedlsbstring = "universal%s" % oe.utils.host_gcc_version(d)
451
452 sdk_include_toolchain = (d.getVar('SDK_INCLUDE_TOOLCHAIN') == '1')
453 sdk_ext_type = d.getVar('SDK_EXT_TYPE')
454 if (sdk_ext_type != 'minimal' or sdk_include_toolchain or derivative) and not sdk_include_nativesdk:
455 # Create the filtered task list used to generate the sstate cache shipped with the SDK
456 tasklistfn = d.getVar('WORKDIR') + '/tasklist.txt'
457 create_filtered_tasklist(d, baseoutpath, tasklistfn, conf_initpath)
458 else:
459 tasklistfn = None
460
461 if os.path.exists(builddir + '/cache/bb_unihashes.dat'):
462 bb.parse.siggen.save_unitaskhashes()
463 bb.utils.mkdirhier(os.path.join(baseoutpath, 'cache'))
464 shutil.copyfile(builddir + '/cache/bb_unihashes.dat', baseoutpath + '/cache/bb_unihashes.dat')
465
466 # Add packagedata if enabled
467 if d.getVar('SDK_INCLUDE_PKGDATA') == '1':
468 lockedsigs_base = d.getVar('WORKDIR') + '/locked-sigs-base.inc'
469 lockedsigs_copy = d.getVar('WORKDIR') + '/locked-sigs-copy.inc'
470 shutil.move(lockedsigs_pruned, lockedsigs_base)
471 oe.copy_buildsystem.merge_lockedsigs(['do_packagedata'],
472 lockedsigs_base,
473 d.getVar('STAGING_DIR_HOST') + '/world-pkgdata/locked-sigs-pkgdata.inc',
474 lockedsigs_pruned,
475 lockedsigs_copy)
476
477 if sdk_include_toolchain:
478 lockedsigs_base = d.getVar('WORKDIR') + '/locked-sigs-base2.inc'
479 lockedsigs_toolchain = d.expand("${STAGING_DIR}/${TUNE_PKGARCH}/meta-extsdk-toolchain/locked-sigs/locked-sigs-extsdk-toolchain.inc")
480 shutil.move(lockedsigs_pruned, lockedsigs_base)
481 oe.copy_buildsystem.merge_lockedsigs([],
482 lockedsigs_base,
483 lockedsigs_toolchain,
484 lockedsigs_pruned)
485 oe.copy_buildsystem.create_locked_sstate_cache(lockedsigs_toolchain,
486 d.getVar('SSTATE_DIR'),
487 sstate_out, d,
488 fixedlsbstring,
489 filterfile=tasklistfn)
490
491 if sdk_ext_type == 'minimal':
492 if derivative:
493 # Assume the user is not going to set up an additional sstate
494 # mirror, thus we need to copy the additional artifacts (from
495 # workspace recipes) into the derivative SDK
496 lockedsigs_orig = d.getVar('TOPDIR') + '/conf/locked-sigs.inc'
497 if os.path.exists(lockedsigs_orig):
498 lockedsigs_extra = d.getVar('WORKDIR') + '/locked-sigs-extra.inc'
499 oe.copy_buildsystem.merge_lockedsigs(None,
500 lockedsigs_orig,
501 lockedsigs_pruned,
502 None,
503 lockedsigs_extra)
504 oe.copy_buildsystem.create_locked_sstate_cache(lockedsigs_extra,
505 d.getVar('SSTATE_DIR'),
506 sstate_out, d,
507 fixedlsbstring,
508 filterfile=tasklistfn)
509 else:
510 oe.copy_buildsystem.create_locked_sstate_cache(lockedsigs_pruned,
511 d.getVar('SSTATE_DIR'),
512 sstate_out, d,
513 fixedlsbstring,
514 filterfile=tasklistfn)
515
516 # We don't need sstate do_package files
517 for root, dirs, files in os.walk(sstate_out):
518 for name in files:
519 if name.endswith("_package.tgz"):
520 f = os.path.join(root, name)
521 os.remove(f)
522
523 # Write manifest file
524 # Note: at the moment we cannot include the env setup script here to keep
525 # it updated, since it gets modified during SDK installation (see
526 # sdk_ext_postinst() below) thus the checksum we take here would always
527 # be different.
528 manifest_file_list = ['conf/*']
529 esdk_manifest_excludes = (d.getVar('ESDK_MANIFEST_EXCLUDES') or '').split()
530 esdk_manifest_excludes_list = []
531 for exclude_item in esdk_manifest_excludes:
532 esdk_manifest_excludes_list += glob.glob(os.path.join(baseoutpath, exclude_item))
533 manifest_file = os.path.join(baseoutpath, 'conf', 'sdk-conf-manifest')
534 with open(manifest_file, 'w') as f:
535 for item in manifest_file_list:
536 for fn in glob.glob(os.path.join(baseoutpath, item)):
537 if fn == manifest_file:
538 continue
539 if fn in esdk_manifest_excludes_list:
540 continue
541 chksum = bb.utils.sha256_file(fn)
542 f.write('%s\t%s\n' % (chksum, os.path.relpath(fn, baseoutpath)))
543}
544
545def get_current_buildtools(d):
546 """Get the file name of the current buildtools installer"""
547 import glob
548 btfiles = glob.glob(os.path.join(d.getVar('SDK_DEPLOY'), '*-buildtools-nativesdk-standalone-*.sh'))
549 btfiles.sort(key=os.path.getctime)
550 return os.path.basename(btfiles[-1])
551
552def get_sdk_required_utilities(buildtools_fn, d):
553 """Find required utilities that aren't provided by the buildtools"""
554 sanity_required_utilities = (d.getVar('SANITY_REQUIRED_UTILITIES') or '').split()
555 sanity_required_utilities.append(d.expand('${BUILD_PREFIX}gcc'))
556 sanity_required_utilities.append(d.expand('${BUILD_PREFIX}g++'))
557 if buildtools_fn:
558 buildtools_installer = os.path.join(d.getVar('SDK_DEPLOY'), buildtools_fn)
559 filelist, _ = bb.process.run('%s -l' % buildtools_installer)
560 else:
561 buildtools_installer = None
562 filelist = ""
563 localdata = bb.data.createCopy(d)
564 localdata.setVar('SDKPATH', '.')
565 sdkpathnative = localdata.getVar('SDKPATHNATIVE')
566 sdkbindirs = [localdata.getVar('bindir_nativesdk'),
567 localdata.getVar('sbindir_nativesdk'),
568 localdata.getVar('base_bindir_nativesdk'),
569 localdata.getVar('base_sbindir_nativesdk')]
570 for line in filelist.splitlines():
571 splitline = line.split()
572 if len(splitline) > 5:
573 fn = splitline[5]
574 if not fn.startswith('./'):
575 fn = './%s' % fn
576 if fn.startswith(sdkpathnative):
577 relpth = '/' + os.path.relpath(fn, sdkpathnative)
578 for bindir in sdkbindirs:
579 if relpth.startswith(bindir):
580 relpth = os.path.relpath(relpth, bindir)
581 if relpth in sanity_required_utilities:
582 sanity_required_utilities.remove(relpth)
583 break
584 return ' '.join(sanity_required_utilities)
585
586install_tools() {
587 install -d ${SDK_OUTPUT}/${SDKPATHNATIVE}${bindir_nativesdk}
588 scripts="devtool recipetool oe-find-native-sysroot runqemu* wic"
589 for script in $scripts; do
590 for scriptfn in `find ${SDK_OUTPUT}/${SDKPATH}/${scriptrelpath} -maxdepth 1 -executable -name "$script"`; do
591 targetscriptfn="${SDK_OUTPUT}/${SDKPATHNATIVE}${bindir_nativesdk}/$(basename $scriptfn)"
592 test -e ${targetscriptfn} || lnr ${scriptfn} ${targetscriptfn}
593 done
594 done
595 # We can't use the same method as above because files in the sysroot won't exist at this point
596 # (they get populated from sstate on installation)
597 unfsd_path="${SDK_OUTPUT}/${SDKPATHNATIVE}${bindir_nativesdk}/unfsd"
598 if [ "${SDK_INCLUDE_TOOLCHAIN}" = "1" -a ! -e $unfsd_path ] ; then
599 binrelpath=${@os.path.relpath(d.getVar('STAGING_BINDIR_NATIVE'), d.getVar('TMPDIR'))}
600 lnr ${SDK_OUTPUT}/${SDKPATH}/tmp/$binrelpath/unfsd $unfsd_path
601 fi
602 touch ${SDK_OUTPUT}/${SDKPATH}/.devtoolbase
603
604 # find latest buildtools-tarball and install it
605 if [ -n "${SDK_BUILDTOOLS_INSTALLER}" ]; then
606 install ${SDK_DEPLOY}/${SDK_BUILDTOOLS_INSTALLER} ${SDK_OUTPUT}/${SDKPATH}
607 fi
608
609 install -m 0644 ${COREBASE}/meta/files/ext-sdk-prepare.py ${SDK_OUTPUT}/${SDKPATH}
610}
611do_populate_sdk_ext[file-checksums] += "${COREBASE}/meta/files/ext-sdk-prepare.py:True"
612
613sdk_ext_preinst() {
614 # Since bitbake won't run as root it doesn't make sense to try and install
615 # the extensible sdk as root.
616 if [ "`id -u`" = "0" ]; then
617 echo "ERROR: The extensible sdk cannot be installed as root."
618 exit 1
619 fi
620 if ! command -v locale > /dev/null; then
621 echo "ERROR: The installer requires the locale command, please install it first"
622 exit 1
623 fi
624 # Check setting of LC_ALL set above
625 canonicalised_locale=`echo $LC_ALL | sed 's/UTF-8/utf8/'`
626 if ! locale -a | grep -q $canonicalised_locale ; then
627 echo "ERROR: the installer requires the $LC_ALL locale to be installed (but not selected), please install it first"
628 exit 1
629 fi
630 # The relocation script used by buildtools installer requires python
631 if ! command -v python3 > /dev/null; then
632 echo "ERROR: The installer requires python3, please install it first"
633 exit 1
634 fi
635 missing_utils=""
636 for util in ${SDK_REQUIRED_UTILITIES}; do
637 if ! command -v $util > /dev/null; then
638 missing_utils="$missing_utils $util"
639 fi
640 done
641 if [ -n "$missing_utils" ] ; then
642 echo "ERROR: the SDK requires the following missing utilities, please install them: $missing_utils"
643 exit 1
644 fi
645 SDK_EXTENSIBLE="1"
646 if [ "$publish" = "1" ] && [ "${SDK_EXT_TYPE}" = "minimal" ] ; then
647 EXTRA_TAR_OPTIONS="$EXTRA_TAR_OPTIONS --exclude=sstate-cache"
648 fi
649}
650SDK_PRE_INSTALL_COMMAND_task-populate-sdk-ext = "${sdk_ext_preinst}"
651
652# FIXME this preparation should be done as part of the SDK construction
653sdk_ext_postinst() {
654 printf "\nExtracting buildtools...\n"
655 cd $target_sdk_dir
656 env_setup_script="$target_sdk_dir/environment-setup-${REAL_MULTIMACH_TARGET_SYS}"
657 if [ -n "${SDK_BUILDTOOLS_INSTALLER}" ]; then
658 printf "buildtools\ny" | ./${SDK_BUILDTOOLS_INSTALLER} > buildtools.log || { printf 'ERROR: buildtools installation failed:\n' ; cat buildtools.log ; echo "printf 'ERROR: this SDK was not fully installed and needs reinstalling\n'" >> $env_setup_script ; exit 1 ; }
659
660 # Delete the buildtools tar file since it won't be used again
661 rm -f ./${SDK_BUILDTOOLS_INSTALLER}
662 # We don't need the log either since it succeeded
663 rm -f buildtools.log
664
665 # Make sure when the user sets up the environment, they also get
666 # the buildtools-tarball tools in their path.
667 echo "# Save and reset OECORE_NATIVE_SYSROOT as buildtools may change it" >> $env_setup_script
668 echo "SAVED=\"\$OECORE_NATIVE_SYSROOT\"" >> $env_setup_script
669 echo ". $target_sdk_dir/buildtools/environment-setup*" >> $env_setup_script
670 echo "OECORE_NATIVE_SYSROOT=\"\$SAVED\"" >> $env_setup_script
671 fi
672
673 # Allow bitbake environment setup to be ran as part of this sdk.
674 echo "export OE_SKIP_SDK_CHECK=1" >> $env_setup_script
675 # Work around runqemu not knowing how to get this information within the eSDK
676 echo "export DEPLOY_DIR_IMAGE=$target_sdk_dir/tmp/${@os.path.relpath(d.getVar('DEPLOY_DIR_IMAGE'), d.getVar('TMPDIR'))}" >> $env_setup_script
677
678 # A bit of another hack, but we need this in the path only for devtool
679 # so put it at the end of $PATH.
680 echo "export PATH=$target_sdk_dir/sysroots/${SDK_SYS}${bindir_nativesdk}:\$PATH" >> $env_setup_script
681
682 echo "printf 'SDK environment now set up; additionally you may now run devtool to perform development tasks.\nRun devtool --help for further details.\n'" >> $env_setup_script
683
684 # Warn if trying to use external bitbake and the ext SDK together
685 echo "(which bitbake > /dev/null 2>&1 && echo 'WARNING: attempting to use the extensible SDK in an environment set up to run bitbake - this may lead to unexpected results. Please source this script in a new shell session instead.') || true" >> $env_setup_script
686
687 if [ "$prepare_buildsystem" != "no" -a -n "${SDK_BUILDTOOLS_INSTALLER}" ]; then
688 printf "Preparing build system...\n"
689 # dash which is /bin/sh on Ubuntu will not preserve the
690 # current working directory when first ran, nor will it set $1 when
691 # sourcing a script. That is why this has to look so ugly.
692 LOGFILE="$target_sdk_dir/preparing_build_system.log"
693 sh -c ". buildtools/environment-setup* > $LOGFILE && cd $target_sdk_dir/`dirname ${oe_init_build_env_path}` && set $target_sdk_dir && . $target_sdk_dir/${oe_init_build_env_path} $target_sdk_dir >> $LOGFILE && python3 $target_sdk_dir/ext-sdk-prepare.py $LOGFILE '${SDK_INSTALL_TARGETS}'" || { echo "printf 'ERROR: this SDK was not fully installed and needs reinstalling\n'" >> $env_setup_script ; exit 1 ; }
694 fi
695 if [ -e $target_sdk_dir/ext-sdk-prepare.py ]; then
696 rm $target_sdk_dir/ext-sdk-prepare.py
697 fi
698 echo done
699}
700
701SDK_POST_INSTALL_COMMAND_task-populate-sdk-ext = "${sdk_ext_postinst}"
702
703SDK_POSTPROCESS_COMMAND_prepend_task-populate-sdk-ext = "copy_buildsystem; install_tools; "
704
705SDK_INSTALL_TARGETS = ""
706fakeroot python do_populate_sdk_ext() {
707 # FIXME hopefully we can remove this restriction at some point, but uninative
708 # currently forces this upon us
709 if d.getVar('SDK_ARCH') != d.getVar('BUILD_ARCH'):
710 bb.fatal('The extensible SDK can currently only be built for the same architecture as the machine being built on - SDK_ARCH is set to %s (likely via setting SDKMACHINE) which is different from the architecture of the build machine (%s). Unable to continue.' % (d.getVar('SDK_ARCH'), d.getVar('BUILD_ARCH')))
711
712 d.setVar('SDK_INSTALL_TARGETS', get_sdk_install_targets(d))
713 if d.getVar('SDK_INCLUDE_BUILDTOOLS') == '1':
714 buildtools_fn = get_current_buildtools(d)
715 else:
716 buildtools_fn = None
717 d.setVar('SDK_REQUIRED_UTILITIES', get_sdk_required_utilities(buildtools_fn, d))
718 d.setVar('SDK_BUILDTOOLS_INSTALLER', buildtools_fn)
719 d.setVar('SDKDEPLOYDIR', '${SDKEXTDEPLOYDIR}')
720 # ESDKs have a libc from the buildtools so ensure we don't ship linguas twice
721 d.delVar('SDKIMAGE_LINGUAS')
722 if d.getVar("SDK_INCLUDE_NATIVESDK") == '1':
723 generate_nativesdk_lockedsigs(d)
724 populate_sdk_common(d)
725}
726
727def generate_nativesdk_lockedsigs(d):
728 import oe.copy_buildsystem
729 sigfile = d.getVar('WORKDIR') + '/locked-sigs_nativesdk.inc'
730 oe.copy_buildsystem.generate_locked_sigs(sigfile, d)
731
732def get_ext_sdk_depends(d):
733 # Note: the deps varflag is a list not a string, so we need to specify expand=False
734 deps = d.getVarFlag('do_image_complete', 'deps', False)
735 pn = d.getVar('PN')
736 deplist = ['%s:%s' % (pn, dep) for dep in deps]
737 tasklist = bb.build.tasksbetween('do_image_complete', 'do_build', d)
738 tasklist.append('do_rootfs')
739 for task in tasklist:
740 deplist.extend((d.getVarFlag(task, 'depends') or '').split())
741 return ' '.join(deplist)
742
743python do_sdk_depends() {
744 # We have to do this separately in its own task so we avoid recursing into
745 # dependencies we don't need to (e.g. buildtools-tarball) and bringing those
746 # into the SDK's sstate-cache
747 import oe.copy_buildsystem
748 sigfile = d.getVar('WORKDIR') + '/locked-sigs.inc'
749 oe.copy_buildsystem.generate_locked_sigs(sigfile, d)
750}
751addtask sdk_depends
752
753do_sdk_depends[dirs] = "${WORKDIR}"
754do_sdk_depends[depends] = "${@get_ext_sdk_depends(d)} meta-extsdk-toolchain:do_populate_sysroot"
755do_sdk_depends[recrdeptask] = "${@d.getVarFlag('do_populate_sdk', 'recrdeptask', False)}"
756do_sdk_depends[recrdeptask] += "do_populate_lic do_package_qa do_populate_sysroot do_deploy ${SDK_RECRDEP_TASKS}"
757do_sdk_depends[rdepends] = "${@get_sdk_ext_rdepends(d)}"
758
759def get_sdk_ext_rdepends(d):
760 localdata = d.createCopy()
761 localdata.appendVar('OVERRIDES', ':task-populate-sdk-ext')
762 return localdata.getVarFlag('do_populate_sdk', 'rdepends')
763
764do_populate_sdk_ext[dirs] = "${@d.getVarFlag('do_populate_sdk', 'dirs', False)}"
765
766do_populate_sdk_ext[depends] = "${@d.getVarFlag('do_populate_sdk', 'depends', False)} \
767 ${@'buildtools-tarball:do_populate_sdk' if d.getVar('SDK_INCLUDE_BUILDTOOLS') == '1' else ''} \
768 ${@'meta-world-pkgdata:do_collect_packagedata' if d.getVar('SDK_INCLUDE_PKGDATA') == '1' else ''} \
769 ${@'meta-extsdk-toolchain:do_locked_sigs' if d.getVar('SDK_INCLUDE_TOOLCHAIN') == '1' else ''}"
770
771# We must avoid depending on do_build here if rm_work.bbclass is active,
772# because otherwise do_rm_work may run before do_populate_sdk_ext itself.
773# We can't mark do_populate_sdk_ext and do_sdk_depends as having to
774# run before do_rm_work, because then they would also run as part
775# of normal builds.
776do_populate_sdk_ext[rdepends] += "${@' '.join([x + ':' + (d.getVar('RM_WORK_BUILD_WITHOUT') or 'do_build') for x in d.getVar('SDK_TARGETS').split()])}"
777
778# Make sure code changes can result in rebuild
779do_populate_sdk_ext[vardeps] += "copy_buildsystem \
780 sdk_ext_postinst"
781
782# Since any change in the metadata of any layer should cause a rebuild of the
783# sdk(since the layers are put in the sdk) set the task to nostamp so it
784# always runs.
785do_populate_sdk_ext[nostamp] = "1"
786
787SDKEXTDEPLOYDIR = "${WORKDIR}/deploy-${PN}-populate-sdk-ext"
788
789SSTATETASKS += "do_populate_sdk_ext"
790SSTATE_SKIP_CREATION_task-populate-sdk-ext = '1'
791do_populate_sdk_ext[cleandirs] = "${SDKEXTDEPLOYDIR}"
792do_populate_sdk_ext[sstate-inputdirs] = "${SDKEXTDEPLOYDIR}"
793do_populate_sdk_ext[sstate-outputdirs] = "${SDK_DEPLOY}"
794do_populate_sdk_ext[stamp-extra-info] = "${MACHINE_ARCH}"
795
796addtask populate_sdk_ext after do_sdk_depends
diff --git a/meta/classes/prexport.bbclass b/meta/classes/prexport.bbclass
index 6dcf99e29f..e5098e3308 100644
--- a/meta/classes/prexport.bbclass
+++ b/meta/classes/prexport.bbclass
@@ -1,3 +1,9 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
1PRSERV_DUMPOPT_VERSION = "${PRAUTOINX}" 7PRSERV_DUMPOPT_VERSION = "${PRAUTOINX}"
2PRSERV_DUMPOPT_PKGARCH = "" 8PRSERV_DUMPOPT_PKGARCH = ""
3PRSERV_DUMPOPT_CHECKSUM = "" 9PRSERV_DUMPOPT_CHECKSUM = ""
diff --git a/meta/classes/primport.bbclass b/meta/classes/primport.bbclass
index 8ed45f03f0..00924174c1 100644
--- a/meta/classes/primport.bbclass
+++ b/meta/classes/primport.bbclass
@@ -1,3 +1,9 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
1python primport_handler () { 7python primport_handler () {
2 import bb.event 8 import bb.event
3 if not e.data: 9 if not e.data:
diff --git a/meta/classes/ptest-gnome.bbclass b/meta/classes/ptest-gnome.bbclass
deleted file mode 100644
index 478a33474d..0000000000
--- a/meta/classes/ptest-gnome.bbclass
+++ /dev/null
@@ -1,8 +0,0 @@
1inherit ptest
2
3EXTRA_OECONF_append = " ${@bb.utils.contains('PTEST_ENABLED', '1', '--enable-installed-tests', '--disable-installed-tests', d)}"
4
5FILES_${PN}-ptest += "${libexecdir}/installed-tests/ \
6 ${datadir}/installed-tests/"
7
8RDEPENDS_${PN}-ptest += "gnome-desktop-testing"
diff --git a/meta/classes/ptest-perl.bbclass b/meta/classes/ptest-perl.bbclass
deleted file mode 100644
index a4bc40b51a..0000000000
--- a/meta/classes/ptest-perl.bbclass
+++ /dev/null
@@ -1,30 +0,0 @@
1inherit ptest
2
3FILESEXTRAPATHS_prepend := "${COREBASE}/meta/files:"
4
5SRC_URI += "file://ptest-perl/run-ptest"
6
7do_install_ptest_perl() {
8 install -d ${D}${PTEST_PATH}
9 if [ ! -f ${D}${PTEST_PATH}/run-ptest ]; then
10 install -m 0755 ${WORKDIR}/ptest-perl/run-ptest ${D}${PTEST_PATH}
11 fi
12 cp -r ${B}/t ${D}${PTEST_PATH}
13 chown -R root:root ${D}${PTEST_PATH}
14}
15
16FILES_${PN}-ptest_prepend = "${PTEST_PATH}/t/* ${PTEST_PATH}/run-ptest "
17
18RDEPENDS_${PN}-ptest_prepend = "perl "
19
20addtask install_ptest_perl after do_install_ptest_base before do_package
21
22python () {
23 if not bb.data.inherits_class('native', d) and not bb.data.inherits_class('cross', d):
24 d.setVarFlag('do_install_ptest_perl', 'fakeroot', '1')
25
26 # Remove all '*ptest_perl' tasks when ptest is not enabled
27 if not(d.getVar('PTEST_ENABLED') == "1"):
28 for i in ['do_install_ptest_perl']:
29 bb.build.deltask(i, d)
30}
diff --git a/meta/classes/ptest.bbclass b/meta/classes/ptest.bbclass
deleted file mode 100644
index 47611edea2..0000000000
--- a/meta/classes/ptest.bbclass
+++ /dev/null
@@ -1,119 +0,0 @@
1SUMMARY_${PN}-ptest ?= "${SUMMARY} - Package test files"
2DESCRIPTION_${PN}-ptest ?= "${DESCRIPTION} \
3This package contains a test directory ${PTEST_PATH} for package test purposes."
4
5PTEST_PATH ?= "${libdir}/${BPN}/ptest"
6PTEST_BUILD_HOST_FILES ?= "Makefile"
7PTEST_BUILD_HOST_PATTERN ?= ""
8
9FILES_${PN}-ptest += "${PTEST_PATH}"
10SECTION_${PN}-ptest = "devel"
11ALLOW_EMPTY_${PN}-ptest = "1"
12PTEST_ENABLED = "${@bb.utils.contains('DISTRO_FEATURES', 'ptest', '1', '0', d)}"
13PTEST_ENABLED_class-native = ""
14PTEST_ENABLED_class-nativesdk = ""
15PTEST_ENABLED_class-cross-canadian = ""
16RDEPENDS_${PN}-ptest += "${PN}"
17RDEPENDS_${PN}-ptest_class-native = ""
18RDEPENDS_${PN}-ptest_class-nativesdk = ""
19RRECOMMENDS_${PN}-ptest += "ptest-runner"
20
21PACKAGES =+ "${@bb.utils.contains('PTEST_ENABLED', '1', '${PN}-ptest', '', d)}"
22
23do_configure_ptest() {
24 :
25}
26
27do_configure_ptest_base() {
28 do_configure_ptest
29}
30
31do_compile_ptest() {
32 :
33}
34
35do_compile_ptest_base() {
36 do_compile_ptest
37}
38
39do_install_ptest() {
40 :
41}
42
43do_install_ptest_base() {
44 if [ -f ${WORKDIR}/run-ptest ]; then
45 install -D ${WORKDIR}/run-ptest ${D}${PTEST_PATH}/run-ptest
46 fi
47 if grep -q install-ptest: Makefile; then
48 oe_runmake DESTDIR=${D}${PTEST_PATH} install-ptest
49 fi
50 do_install_ptest
51 chown -R root:root ${D}${PTEST_PATH}
52
53 # Strip build host paths from any installed Makefile
54 for filename in ${PTEST_BUILD_HOST_FILES}; do
55 for installed_ptest_file in $(find ${D}${PTEST_PATH} -type f -name $filename); do
56 bbnote "Stripping host paths from: $installed_ptest_file"
57 sed -e 's#${HOSTTOOLS_DIR}/*##g' \
58 -e 's#${WORKDIR}/*=#.=#g' \
59 -e 's#${WORKDIR}/*##g' \
60 -i $installed_ptest_file
61 if [ -n "${PTEST_BUILD_HOST_PATTERN}" ]; then
62 sed -E '/${PTEST_BUILD_HOST_PATTERN}/d' \
63 -i $installed_ptest_file
64 fi
65 done
66 done
67}
68
69PTEST_BINDIR_PKGD_PATH = "${PKGD}${PTEST_PATH}/bin"
70
71# This function needs to run after apply_update_alternative_renames because the
72# aforementioned function will update the ALTERNATIVE_LINK_NAME flag. Append is
73# used here to make this function to run as late as possible.
74PACKAGE_PREPROCESS_FUNCS_append = "${@bb.utils.contains('PTEST_BINDIR', '1', \
75 bb.utils.contains('PTEST_ENABLED', '1', ' ptest_update_alternatives', '', d), '', d)}"
76
77python ptest_update_alternatives() {
78 """
79 This function will generate the symlinks in the PTEST_BINDIR_PKGD_PATH
80 to match the renamed binaries by update-alternatives.
81 """
82
83 if not bb.data.inherits_class('update-alternatives', d) \
84 or not update_alternatives_enabled(d):
85 return
86
87 bb.note("Generating symlinks for ptest")
88 bin_paths = { d.getVar("bindir"), d.getVar("base_bindir"),
89 d.getVar("sbindir"), d.getVar("base_sbindir") }
90 ptest_bindir = d.getVar("PTEST_BINDIR_PKGD_PATH")
91 os.mkdir(ptest_bindir)
92 for pkg in (d.getVar('PACKAGES') or "").split():
93 alternatives = update_alternatives_alt_targets(d, pkg)
94 for alt_name, alt_link, alt_target, _ in alternatives:
95 # Some alternatives are for man pages,
96 # check if the alternative is in PATH
97 if os.path.dirname(alt_link) in bin_paths:
98 os.symlink(alt_target, os.path.join(ptest_bindir, alt_name))
99}
100
101do_configure_ptest_base[dirs] = "${B}"
102do_compile_ptest_base[dirs] = "${B}"
103do_install_ptest_base[dirs] = "${B}"
104do_install_ptest_base[cleandirs] = "${D}${PTEST_PATH}"
105
106addtask configure_ptest_base after do_configure before do_compile
107addtask compile_ptest_base after do_compile before do_install
108addtask install_ptest_base after do_install before do_package do_populate_sysroot
109
110python () {
111 if not bb.data.inherits_class('native', d) and not bb.data.inherits_class('cross', d):
112 d.setVarFlag('do_install_ptest_base', 'fakeroot', '1')
113 d.setVarFlag('do_install_ptest_base', 'umask', '022')
114
115 # Remove all '*ptest_base' tasks when ptest is not enabled
116 if not(d.getVar('PTEST_ENABLED') == "1"):
117 for i in ['do_configure_ptest_base', 'do_compile_ptest_base', 'do_install_ptest_base']:
118 bb.build.deltask(i, d)
119}
diff --git a/meta/classes/pypi.bbclass b/meta/classes/pypi.bbclass
deleted file mode 100644
index 87b4c85fc0..0000000000
--- a/meta/classes/pypi.bbclass
+++ /dev/null
@@ -1,26 +0,0 @@
1def pypi_package(d):
2 bpn = d.getVar('BPN')
3 if bpn.startswith('python-'):
4 return bpn[7:]
5 elif bpn.startswith('python3-'):
6 return bpn[8:]
7 return bpn
8
9PYPI_PACKAGE ?= "${@pypi_package(d)}"
10PYPI_PACKAGE_EXT ?= "tar.gz"
11
12def pypi_src_uri(d):
13 package = d.getVar('PYPI_PACKAGE')
14 package_ext = d.getVar('PYPI_PACKAGE_EXT')
15 pv = d.getVar('PV')
16 return 'https://files.pythonhosted.org/packages/source/%s/%s/%s-%s.%s' % (package[0], package, package, pv, package_ext)
17
18PYPI_SRC_URI ?= "${@pypi_src_uri(d)}"
19
20HOMEPAGE ?= "https://pypi.python.org/pypi/${PYPI_PACKAGE}/"
21SECTION = "devel/python"
22SRC_URI += "${PYPI_SRC_URI}"
23S = "${WORKDIR}/${PYPI_PACKAGE}-${PV}"
24
25UPSTREAM_CHECK_URI ?= "https://pypi.org/project/${PYPI_PACKAGE}/"
26UPSTREAM_CHECK_REGEX ?= "/${PYPI_PACKAGE}/(?P<pver>(\d+[\.\-_]*)+)/"
diff --git a/meta/classes/python3-dir.bbclass b/meta/classes/python3-dir.bbclass
deleted file mode 100644
index f51f971fc5..0000000000
--- a/meta/classes/python3-dir.bbclass
+++ /dev/null
@@ -1,5 +0,0 @@
1PYTHON_BASEVERSION = "3.9"
2PYTHON_ABI = ""
3PYTHON_DIR = "python${PYTHON_BASEVERSION}"
4PYTHON_PN = "python3"
5PYTHON_SITEPACKAGES_DIR = "${libdir}/${PYTHON_DIR}/site-packages"
diff --git a/meta/classes/python3native.bbclass b/meta/classes/python3native.bbclass
deleted file mode 100644
index 2e3a88c126..0000000000
--- a/meta/classes/python3native.bbclass
+++ /dev/null
@@ -1,24 +0,0 @@
1inherit python3-dir
2
3PYTHON="${STAGING_BINDIR_NATIVE}/python3-native/python3"
4EXTRANATIVEPATH += "python3-native"
5DEPENDS_append = " python3-native "
6
7# python-config and other scripts are using distutils modules
8# which we patch to access these variables
9export STAGING_INCDIR
10export STAGING_LIBDIR
11
12# Packages can use
13# find_package(PythonInterp REQUIRED)
14# find_package(PythonLibs REQUIRED)
15# which ends up using libs/includes from build host
16# Therefore pre-empt that effort
17export PYTHON_LIBRARY="${STAGING_LIBDIR}/lib${PYTHON_DIR}${PYTHON_ABI}.so"
18export PYTHON_INCLUDE_DIR="${STAGING_INCDIR}/${PYTHON_DIR}${PYTHON_ABI}"
19
20# suppress host user's site-packages dirs.
21export PYTHONNOUSERSITE = "1"
22
23# autoconf macros will use their internal default preference otherwise
24export PYTHON
diff --git a/meta/classes/python3targetconfig.bbclass b/meta/classes/python3targetconfig.bbclass
deleted file mode 100644
index fc1025c207..0000000000
--- a/meta/classes/python3targetconfig.bbclass
+++ /dev/null
@@ -1,17 +0,0 @@
1inherit python3native
2
3EXTRA_PYTHON_DEPENDS ?= ""
4EXTRA_PYTHON_DEPENDS_class-target = "python3"
5DEPENDS_append = " ${EXTRA_PYTHON_DEPENDS}"
6
7do_configure_prepend_class-target() {
8 export _PYTHON_SYSCONFIGDATA_NAME="_sysconfigdata"
9}
10
11do_compile_prepend_class-target() {
12 export _PYTHON_SYSCONFIGDATA_NAME="_sysconfigdata"
13}
14
15do_install_prepend_class-target() {
16 export _PYTHON_SYSCONFIGDATA_NAME="_sysconfigdata"
17}
diff --git a/meta/classes/qemu.bbclass b/meta/classes/qemu.bbclass
deleted file mode 100644
index 55bdff816b..0000000000
--- a/meta/classes/qemu.bbclass
+++ /dev/null
@@ -1,67 +0,0 @@
1#
2# This class contains functions for recipes that need QEMU or test for its
3# existence.
4#
5
6def qemu_target_binary(data):
7 package_arch = data.getVar("PACKAGE_ARCH")
8 qemu_target_binary = (data.getVar("QEMU_TARGET_BINARY_%s" % package_arch) or "")
9 if qemu_target_binary:
10 return qemu_target_binary
11
12 target_arch = data.getVar("TARGET_ARCH")
13 if target_arch in ("i486", "i586", "i686"):
14 target_arch = "i386"
15 elif target_arch == "powerpc":
16 target_arch = "ppc"
17 elif target_arch == "powerpc64":
18 target_arch = "ppc64"
19 elif target_arch == "powerpc64le":
20 target_arch = "ppc64le"
21
22 return "qemu-" + target_arch
23
24def qemu_wrapper_cmdline(data, rootfs_path, library_paths):
25 import string
26
27 qemu_binary = qemu_target_binary(data)
28 if qemu_binary == "qemu-allarch":
29 qemu_binary = "qemuwrapper"
30
31 qemu_options = data.getVar("QEMU_OPTIONS")
32
33 return "PSEUDO_UNLOAD=1 " + qemu_binary + " " + qemu_options + " -L " + rootfs_path\
34 + " -E LD_LIBRARY_PATH=" + ":".join(library_paths) + " "
35
36# Next function will return a string containing the command that is needed to
37# to run a certain binary through qemu. For example, in order to make a certain
38# postinstall scriptlet run at do_rootfs time and running the postinstall is
39# architecture dependent, we can run it through qemu. For example, in the
40# postinstall scriptlet, we could use the following:
41#
42# ${@qemu_run_binary(d, '$D', '/usr/bin/test_app')} [test_app arguments]
43#
44def qemu_run_binary(data, rootfs_path, binary):
45 libdir = rootfs_path + data.getVar("libdir", False)
46 base_libdir = rootfs_path + data.getVar("base_libdir", False)
47
48 return qemu_wrapper_cmdline(data, rootfs_path, [libdir, base_libdir]) + rootfs_path + binary
49
50# QEMU_EXTRAOPTIONS is not meant to be directly used, the extensions are
51# PACKAGE_ARCH, *NOT* overrides.
52# In some cases (e.g. ppc) simply being arch specific (apparently) isn't good
53# enough and a PACKAGE_ARCH specific -cpu option is needed (hence we have to do
54# this dance). For others (e.g. arm) a -cpu option is not necessary, since the
55# qemu-arm default CPU supports all required architecture levels.
56
57QEMU_OPTIONS = "-r ${OLDEST_KERNEL} ${@d.getVar("QEMU_EXTRAOPTIONS_%s" % d.getVar('PACKAGE_ARCH')) or ""}"
58QEMU_OPTIONS[vardeps] += "QEMU_EXTRAOPTIONS_${PACKAGE_ARCH}"
59
60QEMU_EXTRAOPTIONS_ppce500v2 = " -cpu e500v2"
61QEMU_EXTRAOPTIONS_ppce500mc = " -cpu e500mc"
62QEMU_EXTRAOPTIONS_ppce5500 = " -cpu e500mc"
63QEMU_EXTRAOPTIONS_ppc64e5500 = " -cpu e500mc"
64QEMU_EXTRAOPTIONS_ppce6500 = " -cpu e500mc"
65QEMU_EXTRAOPTIONS_ppc64e6500 = " -cpu e500mc"
66QEMU_EXTRAOPTIONS_ppc7400 = " -cpu 7400"
67QEMU_EXTRAOPTIONS_powerpc64le = " -cpu POWER8"
diff --git a/meta/classes/qemuboot.bbclass b/meta/classes/qemuboot.bbclass
deleted file mode 100644
index 4b7532b304..0000000000
--- a/meta/classes/qemuboot.bbclass
+++ /dev/null
@@ -1,148 +0,0 @@
1# Help runqemu boot target board, "QB" means Qemu Boot, the following
2# vars can be set in conf files, such as <bsp.conf> to make it can be
3# boot by runqemu:
4#
5# QB_SYSTEM_NAME: qemu name, e.g., "qemu-system-i386"
6#
7# QB_OPT_APPEND: options to append to qemu, e.g., "-device usb-mouse"
8#
9# QB_DEFAULT_KERNEL: default kernel to boot, e.g., "bzImage"
10#
11# QB_DEFAULT_FSTYPE: default FSTYPE to boot, e.g., "ext4"
12#
13# QB_MEM: memory, e.g., "-m 512"
14#
15# QB_MACHINE: qemu machine, e.g., "-machine virt"
16#
17# QB_CPU: qemu cpu, e.g., "-cpu qemu32"
18#
19# QB_CPU_KVM: the similar to QB_CPU, but used when kvm, e.g., '-cpu kvm64',
20# set it when support kvm.
21#
22# QB_KERNEL_CMDLINE_APPEND: options to append to kernel's -append
23# option, e.g., "console=ttyS0 console=tty"
24#
25# QB_DTB: qemu dtb name
26#
27# QB_AUDIO_DRV: qemu audio driver, e.g., "alsa", set it when support audio
28#
29# QB_AUDIO_OPT: qemu audio option, e.g., "-soundhw ac97,es1370", used
30# when QB_AUDIO_DRV is set.
31#
32# QB_RNG: Pass-through for host random number generator, it can speedup boot
33# in system mode, where system is experiencing entropy starvation
34#
35# QB_KERNEL_ROOT: kernel's root, e.g., /dev/vda
36#
37# QB_NETWORK_DEVICE: network device, e.g., "-device virtio-net-pci,netdev=net0,mac=@MAC@",
38# it needs work with QB_TAP_OPT and QB_SLIRP_OPT.
39# Note, runqemu will replace @MAC@ with a predefined mac, you can set
40# a custom one, but that may cause conflicts when multiple qemus are
41# running on the same host.
42# Note: If more than one interface of type -device virtio-net-device gets added,
43# QB_NETWORK_DEVICE_prepend might be used, since Qemu enumerates the eth*
44# devices in reverse order to -device arguments.
45#
46# QB_TAP_OPT: network option for 'tap' mode, e.g.,
47# "-netdev tap,id=net0,ifname=@TAP@,script=no,downscript=no"
48# Note, runqemu will replace "@TAP@" with the one which is used, such as tap0, tap1 ...
49#
50# QB_SLIRP_OPT: network option for SLIRP mode, e.g., -netdev user,id=net0"
51#
52# QB_CMDLINE_IP_SLIRP: If QB_NETWORK_DEVICE adds more than one network interface to qemu, usually the
53# ip= kernel comand line argument needs to be changed accordingly. Details are documented
54# in the kernel docuemntation https://www.kernel.org/doc/Documentation/filesystems/nfs/nfsroot.txt
55# Example to configure only the first interface: "ip=eth0:dhcp"
56# QB_CMDLINE_IP_TAP: This parameter is similar to the QB_CMDLINE_IP_SLIRP parameter. Since the tap interface requires
57# static IP configuration @CLIENT@ and @GATEWAY@ place holders are replaced by the IP and the gateway
58# address of the qemu guest by runqemu.
59# Example: "ip=192.168.7.@CLIENT@::192.168.7.@GATEWAY@:255.255.255.0::eth0"
60#
61# QB_ROOTFS_OPT: used as rootfs, e.g.,
62# "-drive id=disk0,file=@ROOTFS@,if=none,format=raw -device virtio-blk-device,drive=disk0"
63# Note, runqemu will replace "@ROOTFS@" with the one which is used, such as core-image-minimal-qemuarm64.ext4.
64#
65# QB_SERIAL_OPT: serial port, e.g., "-serial mon:stdio"
66#
67# QB_TCPSERIAL_OPT: tcp serial port option, e.g.,
68# " -device virtio-serial-device -chardev socket,id=virtcon,port=@PORT@,host=127.0.0.1 -device virtconsole,chardev=virtcon"
69# Note, runqemu will replace "@PORT@" with the port number which is used.
70#
71# QB_ROOTFS_EXTRA_OPT: extra options to be appended to the rootfs device in case there is none specified by QB_ROOTFS_OPT.
72# Can be used to automatically determine the image from the other variables
73# but define things link 'bootindex' when booting from EFI or 'readonly' when using squashfs
74# without the need to specify a dedicated qemu configuration
75# Usage:
76# IMAGE_CLASSES += "qemuboot"
77# See "runqemu help" for more info
78
79QB_MEM ?= "-m 256"
80QB_SERIAL_OPT ?= "-serial mon:stdio -serial null"
81QB_DEFAULT_KERNEL ?= "${KERNEL_IMAGETYPE}"
82QB_DEFAULT_FSTYPE ?= "ext4"
83QB_RNG ?= "-object rng-random,filename=/dev/urandom,id=rng0 -device virtio-rng-pci,rng=rng0"
84QB_OPT_APPEND ?= ""
85QB_NETWORK_DEVICE ?= "-device virtio-net-pci,netdev=net0,mac=@MAC@"
86QB_CMDLINE_IP_SLIRP ?= "ip=dhcp"
87QB_CMDLINE_IP_TAP ?= "ip=192.168.7.@CLIENT@::192.168.7.@GATEWAY@:255.255.255.0"
88QB_ROOTFS_EXTRA_OPT ?= ""
89
90# This should be kept align with ROOT_VM
91QB_DRIVE_TYPE ?= "/dev/sd"
92
93inherit image-artifact-names
94
95# Create qemuboot.conf
96addtask do_write_qemuboot_conf after do_rootfs before do_image
97
98def qemuboot_vars(d):
99 build_vars = ['MACHINE', 'TUNE_ARCH', 'DEPLOY_DIR_IMAGE',
100 'KERNEL_IMAGETYPE', 'IMAGE_NAME', 'IMAGE_LINK_NAME',
101 'STAGING_DIR_NATIVE', 'STAGING_BINDIR_NATIVE',
102 'STAGING_DIR_HOST']
103 return build_vars + [k for k in d.keys() if k.startswith('QB_')]
104
105do_write_qemuboot_conf[vardeps] += "${@' '.join(qemuboot_vars(d))}"
106do_write_qemuboot_conf[vardepsexclude] += "TOPDIR"
107python do_write_qemuboot_conf() {
108 import configparser
109
110 qemuboot = "%s/%s.qemuboot.conf" % (d.getVar('IMGDEPLOYDIR'), d.getVar('IMAGE_NAME'))
111 qemuboot_link = "%s/%s.qemuboot.conf" % (d.getVar('IMGDEPLOYDIR'), d.getVar('IMAGE_LINK_NAME'))
112 finalpath = d.getVar("DEPLOY_DIR_IMAGE")
113 topdir = d.getVar('TOPDIR')
114 cf = configparser.ConfigParser()
115 cf.add_section('config_bsp')
116 for k in sorted(qemuboot_vars(d)):
117 # qemu-helper-native sysroot is not removed by rm_work and
118 # contains all tools required by runqemu
119 if k == 'STAGING_BINDIR_NATIVE':
120 val = os.path.join(d.getVar('BASE_WORKDIR'), d.getVar('BUILD_SYS'),
121 'qemu-helper-native/1.0-r1/recipe-sysroot-native/usr/bin/')
122 else:
123 val = d.getVar(k)
124 # we only want to write out relative paths so that we can relocate images
125 # and still run them
126 if val.startswith(topdir):
127 val = os.path.relpath(val, finalpath)
128 cf.set('config_bsp', k, '%s' % val)
129
130 # QB_DEFAULT_KERNEL's value of KERNEL_IMAGETYPE is the name of a symlink
131 # to the kernel file, which hinders relocatability of the qb conf.
132 # Read the link and replace it with the full filename of the target.
133 kernel_link = os.path.join(d.getVar('DEPLOY_DIR_IMAGE'), d.getVar('QB_DEFAULT_KERNEL'))
134 kernel = os.path.realpath(kernel_link)
135 # we only want to write out relative paths so that we can relocate images
136 # and still run them
137 kernel = os.path.relpath(kernel, finalpath)
138 cf.set('config_bsp', 'QB_DEFAULT_KERNEL', kernel)
139
140 bb.utils.mkdirhier(os.path.dirname(qemuboot))
141 with open(qemuboot, 'w') as f:
142 cf.write(f)
143
144 if qemuboot_link != qemuboot:
145 if os.path.lexists(qemuboot_link):
146 os.remove(qemuboot_link)
147 os.symlink(os.path.basename(qemuboot), qemuboot_link)
148}
diff --git a/meta/classes/recipe_sanity.bbclass b/meta/classes/recipe_sanity.bbclass
index 7fa4a849ea..a5cc4315fb 100644
--- a/meta/classes/recipe_sanity.bbclass
+++ b/meta/classes/recipe_sanity.bbclass
@@ -1,3 +1,9 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
1def __note(msg, d): 7def __note(msg, d):
2 bb.note("%s: recipe_sanity: %s" % (d.getVar("P"), msg)) 8 bb.note("%s: recipe_sanity: %s" % (d.getVar("P"), msg))
3 9
@@ -10,7 +16,7 @@ def bad_runtime_vars(cfgdata, d):
10 for var in d.getVar("__recipe_sanity_badruntimevars").split(): 16 for var in d.getVar("__recipe_sanity_badruntimevars").split():
11 val = d.getVar(var, False) 17 val = d.getVar(var, False)
12 if val and val != cfgdata.get(var): 18 if val and val != cfgdata.get(var):
13 __note("%s should be %s_${PN}" % (var, var), d) 19 __note("%s should be %s:${PN}" % (var, var), d)
14 20
15__recipe_sanity_reqvars = "DESCRIPTION" 21__recipe_sanity_reqvars = "DESCRIPTION"
16__recipe_sanity_reqdiffvars = "" 22__recipe_sanity_reqdiffvars = ""
diff --git a/meta/classes/relative_symlinks.bbclass b/meta/classes/relative_symlinks.bbclass
index 3157737347..9ee20e0d09 100644
--- a/meta/classes/relative_symlinks.bbclass
+++ b/meta/classes/relative_symlinks.bbclass
@@ -1,3 +1,9 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
1do_install[postfuncs] += "install_relative_symlinks" 7do_install[postfuncs] += "install_relative_symlinks"
2 8
3python install_relative_symlinks () { 9python install_relative_symlinks () {
diff --git a/meta/classes/relocatable.bbclass b/meta/classes/relocatable.bbclass
index af04be5cca..d0a623fb0a 100644
--- a/meta/classes/relocatable.bbclass
+++ b/meta/classes/relocatable.bbclass
@@ -1,3 +1,9 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
1inherit chrpath 7inherit chrpath
2 8
3SYSROOT_PREPROCESS_FUNCS += "relocatable_binaries_preprocess relocatable_native_pcfiles" 9SYSROOT_PREPROCESS_FUNCS += "relocatable_binaries_preprocess relocatable_native_pcfiles"
diff --git a/meta/classes/remove-libtool.bbclass b/meta/classes/remove-libtool.bbclass
index 3fd0cd58f9..8e987388c8 100644
--- a/meta/classes/remove-libtool.bbclass
+++ b/meta/classes/remove-libtool.bbclass
@@ -1,3 +1,9 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
1# This class removes libtool .la files after do_install 7# This class removes libtool .la files after do_install
2 8
3REMOVE_LIBTOOL_LA ?= "1" 9REMOVE_LIBTOOL_LA ?= "1"
diff --git a/meta/classes/report-error.bbclass b/meta/classes/report-error.bbclass
index 9cb6b0bd31..1452513a66 100644
--- a/meta/classes/report-error.bbclass
+++ b/meta/classes/report-error.bbclass
@@ -4,9 +4,8 @@
4# Copyright (C) 2013 Intel Corporation 4# Copyright (C) 2013 Intel Corporation
5# Author: Andreea Brandusa Proca <andreea.b.proca@intel.com> 5# Author: Andreea Brandusa Proca <andreea.b.proca@intel.com>
6# 6#
7# Licensed under the MIT license, see COPYING.MIT for details 7# SPDX-License-Identifier: MIT
8 8#
9inherit base
10 9
11ERR_REPORT_DIR ?= "${LOG_DIR}/error-report" 10ERR_REPORT_DIR ?= "${LOG_DIR}/error-report"
12 11
@@ -40,6 +39,19 @@ def get_conf_data(e, filename):
40 jsonstring=jsonstring + line 39 jsonstring=jsonstring + line
41 return jsonstring 40 return jsonstring
42 41
42def get_common_data(e):
43 data = {}
44 data['machine'] = e.data.getVar("MACHINE")
45 data['build_sys'] = e.data.getVar("BUILD_SYS")
46 data['distro'] = e.data.getVar("DISTRO")
47 data['target_sys'] = e.data.getVar("TARGET_SYS")
48 data['branch_commit'] = str(oe.buildcfg.detect_branch(e.data)) + ": " + str(oe.buildcfg.detect_revision(e.data))
49 data['bitbake_version'] = e.data.getVar("BB_VERSION")
50 data['layer_version'] = get_layers_branch_rev(e.data)
51 data['local_conf'] = get_conf_data(e, 'local.conf')
52 data['auto_conf'] = get_conf_data(e, 'auto.conf')
53 return data
54
43python errorreport_handler () { 55python errorreport_handler () {
44 import json 56 import json
45 import codecs 57 import codecs
@@ -57,19 +69,10 @@ python errorreport_handler () {
57 if isinstance(e, bb.event.BuildStarted): 69 if isinstance(e, bb.event.BuildStarted):
58 bb.utils.mkdirhier(logpath) 70 bb.utils.mkdirhier(logpath)
59 data = {} 71 data = {}
60 machine = e.data.getVar("MACHINE") 72 data = get_common_data(e)
61 data['machine'] = machine
62 data['build_sys'] = e.data.getVar("BUILD_SYS")
63 data['nativelsb'] = nativelsb() 73 data['nativelsb'] = nativelsb()
64 data['distro'] = e.data.getVar("DISTRO")
65 data['target_sys'] = e.data.getVar("TARGET_SYS")
66 data['failures'] = [] 74 data['failures'] = []
67 data['component'] = " ".join(e.getPkgs()) 75 data['component'] = " ".join(e.getPkgs())
68 data['branch_commit'] = str(base_detect_branch(e.data)) + ": " + str(base_detect_revision(e.data))
69 data['bitbake_version'] = e.data.getVar("BB_VERSION")
70 data['layer_version'] = get_layers_branch_rev(e.data)
71 data['local_conf'] = get_conf_data(e, 'local.conf')
72 data['auto_conf'] = get_conf_data(e, 'auto.conf')
73 lock = bb.utils.lockfile(datafile + '.lock') 76 lock = bb.utils.lockfile(datafile + '.lock')
74 errorreport_savedata(e, data, "error-report.txt") 77 errorreport_savedata(e, data, "error-report.txt")
75 bb.utils.unlockfile(lock) 78 bb.utils.unlockfile(lock)
@@ -108,6 +111,37 @@ python errorreport_handler () {
108 errorreport_savedata(e, jsondata, "error-report.txt") 111 errorreport_savedata(e, jsondata, "error-report.txt")
109 bb.utils.unlockfile(lock) 112 bb.utils.unlockfile(lock)
110 113
114 elif isinstance(e, bb.event.NoProvider):
115 bb.utils.mkdirhier(logpath)
116 data = {}
117 data = get_common_data(e)
118 data['nativelsb'] = nativelsb()
119 data['failures'] = []
120 data['component'] = str(e._item)
121 taskdata={}
122 taskdata['log'] = str(e)
123 taskdata['package'] = str(e._item)
124 taskdata['task'] = "Nothing provides " + "'" + str(e._item) + "'"
125 data['failures'].append(taskdata)
126 lock = bb.utils.lockfile(datafile + '.lock')
127 errorreport_savedata(e, data, "error-report.txt")
128 bb.utils.unlockfile(lock)
129
130 elif isinstance(e, bb.event.ParseError):
131 bb.utils.mkdirhier(logpath)
132 data = {}
133 data = get_common_data(e)
134 data['nativelsb'] = nativelsb()
135 data['failures'] = []
136 data['component'] = "parse"
137 taskdata={}
138 taskdata['log'] = str(e._msg)
139 taskdata['task'] = str(e._msg)
140 data['failures'].append(taskdata)
141 lock = bb.utils.lockfile(datafile + '.lock')
142 errorreport_savedata(e, data, "error-report.txt")
143 bb.utils.unlockfile(lock)
144
111 elif isinstance(e, bb.event.BuildCompleted): 145 elif isinstance(e, bb.event.BuildCompleted):
112 lock = bb.utils.lockfile(datafile + '.lock') 146 lock = bb.utils.lockfile(datafile + '.lock')
113 jsondata = json.loads(errorreport_getdata(e)) 147 jsondata = json.loads(errorreport_getdata(e))
@@ -121,4 +155,4 @@ python errorreport_handler () {
121} 155}
122 156
123addhandler errorreport_handler 157addhandler errorreport_handler
124errorreport_handler[eventmask] = "bb.event.BuildStarted bb.event.BuildCompleted bb.build.TaskFailed" 158errorreport_handler[eventmask] = "bb.event.BuildStarted bb.event.BuildCompleted bb.build.TaskFailed bb.event.NoProvider bb.event.ParseError"
diff --git a/meta/classes/reproducible_build.bbclass b/meta/classes/reproducible_build.bbclass
deleted file mode 100644
index f06e00d70d..0000000000
--- a/meta/classes/reproducible_build.bbclass
+++ /dev/null
@@ -1,125 +0,0 @@
1# reproducible_build.bbclass
2#
3# Sets SOURCE_DATE_EPOCH in each component's build environment.
4# Upstream components (generally) respect this environment variable,
5# using it in place of the "current" date and time.
6# See https://reproducible-builds.org/specs/source-date-epoch/
7#
8# After sources are unpacked but before they are patched, we set a reproducible value for SOURCE_DATE_EPOCH.
9# This value should be reproducible for anyone who builds the same revision from the same sources.
10#
11# There are 4 ways we determine SOURCE_DATE_EPOCH:
12#
13# 1. Use the value from __source_date_epoch.txt file if this file exists.
14# This file was most likely created in the previous build by one of the following methods 2,3,4.
15# Alternatively, it can be provided by a recipe via SRC_URI.
16#
17# If the file does not exist:
18#
19# 2. If there is a git checkout, use the last git commit timestamp.
20# Git does not preserve file timestamps on checkout.
21#
22# 3. Use the mtime of "known" files such as NEWS, CHANGLELOG, ...
23# This works for well-kept repositories distributed via tarball.
24#
25# 4. Use the modification time of the youngest file in the source tree, if there is one.
26# This will be the newest file from the distribution tarball, if any.
27#
28# 5. Fall back to a fixed timestamp.
29#
30# Once the value of SOURCE_DATE_EPOCH is determined, it is stored in the recipe's SDE_FILE.
31# If none of these mechanisms are suitable, replace the do_deploy_source_date_epoch task
32# with recipe-specific functionality to write the appropriate SOURCE_DATE_EPOCH into the SDE_FILE.
33#
34# If this file is found by other tasks, the value is exported in the SOURCE_DATE_EPOCH variable.
35# SOURCE_DATE_EPOCH is set for all tasks that might use it (do_configure, do_compile, do_package, ...)
36
37BUILD_REPRODUCIBLE_BINARIES ??= '1'
38inherit ${@oe.utils.ifelse(d.getVar('BUILD_REPRODUCIBLE_BINARIES') == '1', 'reproducible_build_simple', '')}
39
40SDE_DIR = "${WORKDIR}/source-date-epoch"
41SDE_FILE = "${SDE_DIR}/__source_date_epoch.txt"
42SDE_DEPLOYDIR = "${WORKDIR}/deploy-source-date-epoch"
43
44# A SOURCE_DATE_EPOCH of '0' might be misinterpreted as no SDE
45export SOURCE_DATE_EPOCH_FALLBACK ??= "1302044400"
46
47SSTATETASKS += "do_deploy_source_date_epoch"
48
49do_deploy_source_date_epoch () {
50 mkdir -p ${SDE_DEPLOYDIR}
51 if [ -e ${SDE_FILE} ]; then
52 echo "Deploying SDE from ${SDE_FILE} -> ${SDE_DEPLOYDIR}."
53 cp -p ${SDE_FILE} ${SDE_DEPLOYDIR}/__source_date_epoch.txt
54 else
55 echo "${SDE_FILE} not found!"
56 fi
57}
58
59python do_deploy_source_date_epoch_setscene () {
60 sstate_setscene(d)
61 bb.utils.mkdirhier(d.getVar('SDE_DIR'))
62 sde_file = os.path.join(d.getVar('SDE_DEPLOYDIR'), '__source_date_epoch.txt')
63 if os.path.exists(sde_file):
64 target = d.getVar('SDE_FILE')
65 bb.debug(1, "Moving setscene SDE file %s -> %s" % (sde_file, target))
66 os.rename(sde_file, target)
67 else:
68 bb.debug(1, "%s not found!" % sde_file)
69}
70
71do_deploy_source_date_epoch[dirs] = "${SDE_DEPLOYDIR}"
72do_deploy_source_date_epoch[sstate-plaindirs] = "${SDE_DEPLOYDIR}"
73addtask do_deploy_source_date_epoch_setscene
74addtask do_deploy_source_date_epoch before do_configure after do_patch
75
76python create_source_date_epoch_stamp() {
77 import oe.reproducible
78
79 epochfile = d.getVar('SDE_FILE')
80 # If it exists we need to regenerate as the sources may have changed
81 if os.path.isfile(epochfile):
82 bb.debug(1, "Deleting existing SOURCE_DATE_EPOCH from: %s" % epochfile)
83 os.remove(epochfile)
84
85 source_date_epoch = oe.reproducible.get_source_date_epoch(d, d.getVar('S'))
86
87 bb.debug(1, "SOURCE_DATE_EPOCH: %d" % source_date_epoch)
88 bb.utils.mkdirhier(d.getVar('SDE_DIR'))
89 with open(epochfile, 'w') as f:
90 f.write(str(source_date_epoch))
91}
92
93def get_source_date_epoch_value(d):
94 cached = d.getVar('__CACHED_SOURCE_DATE_EPOCH')
95 if cached:
96 return cached
97
98 epochfile = d.getVar('SDE_FILE')
99 source_date_epoch = int(d.getVar('SOURCE_DATE_EPOCH_FALLBACK'))
100 if os.path.isfile(epochfile):
101 with open(epochfile, 'r') as f:
102 s = f.read()
103 try:
104 source_date_epoch = int(s)
105 # workaround for old sstate with SDE_FILE content being 0 - use SOURCE_DATE_EPOCH_FALLBACK
106 if source_date_epoch == 0 :
107 source_date_epoch = int(d.getVar('SOURCE_DATE_EPOCH_FALLBACK'))
108 bb.warn("SOURCE_DATE_EPOCH value from sstate '%s' is deprecated/invalid. Reverting to SOURCE_DATE_EPOCH_FALLBACK '%s'" % (s, source_date_epoch))
109 except ValueError:
110 bb.warn("SOURCE_DATE_EPOCH value '%s' is invalid. Reverting to SOURCE_DATE_EPOCH_FALLBACK" % s)
111 source_date_epoch = int(d.getVar('SOURCE_DATE_EPOCH_FALLBACK'))
112 bb.debug(1, "SOURCE_DATE_EPOCH: %d" % source_date_epoch)
113 else:
114 bb.debug(1, "Cannot find %s. SOURCE_DATE_EPOCH will default to %d" % (epochfile, source_date_epoch))
115
116 d.setVar('__CACHED_SOURCE_DATE_EPOCH', str(source_date_epoch))
117 return str(source_date_epoch)
118
119export SOURCE_DATE_EPOCH ?= "${@get_source_date_epoch_value(d)}"
120BB_HASHBASE_WHITELIST += "SOURCE_DATE_EPOCH"
121
122python () {
123 if d.getVar('BUILD_REPRODUCIBLE_BINARIES') == '1':
124 d.appendVarFlag("do_unpack", "postfuncs", " create_source_date_epoch_stamp")
125}
diff --git a/meta/classes/reproducible_build_simple.bbclass b/meta/classes/reproducible_build_simple.bbclass
deleted file mode 100644
index 393372993d..0000000000
--- a/meta/classes/reproducible_build_simple.bbclass
+++ /dev/null
@@ -1,9 +0,0 @@
1# Setup default environment for reproducible builds.
2
3BUILD_REPRODUCIBLE_BINARIES = "1"
4
5export PYTHONHASHSEED = "0"
6export PERL_HASH_SEED = "0"
7export SOURCE_DATE_EPOCH ??= "1520598896"
8
9REPRODUCIBLE_TIMESTAMP_ROOTFS ??= "1520598896"
diff --git a/meta/classes/rm_work.bbclass b/meta/classes/rm_work.bbclass
index 01c2ab1c78..52ecfafb72 100644
--- a/meta/classes/rm_work.bbclass
+++ b/meta/classes/rm_work.bbclass
@@ -1,4 +1,10 @@
1# 1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7#
2# Removes source after build 8# Removes source after build
3# 9#
4# To use it add that line to conf/local.conf: 10# To use it add that line to conf/local.conf:
@@ -13,7 +19,7 @@
13# Recipes can also configure which entries in their ${WORKDIR} 19# Recipes can also configure which entries in their ${WORKDIR}
14# are preserved besides temp, which already gets excluded by default 20# are preserved besides temp, which already gets excluded by default
15# because it contains logs: 21# because it contains logs:
16# do_install_append () { 22# do_install:append () {
17# echo "bar" >${WORKDIR}/foo 23# echo "bar" >${WORKDIR}/foo
18# } 24# }
19# RM_WORK_EXCLUDE_ITEMS += "foo" 25# RM_WORK_EXCLUDE_ITEMS += "foo"
@@ -24,9 +30,16 @@ RM_WORK_EXCLUDE_ITEMS = "temp"
24BB_SCHEDULER ?= "completion" 30BB_SCHEDULER ?= "completion"
25 31
26# Run the rm_work task in the idle scheduling class 32# Run the rm_work task in the idle scheduling class
27BB_TASK_IONICE_LEVEL_task-rm_work = "3.0" 33BB_TASK_IONICE_LEVEL:task-rm_work = "3.0"
28 34
29do_rm_work () { 35do_rm_work () {
36 # Force using the HOSTTOOLS 'rm' - otherwise the SYSROOT_NATIVE 'rm' can be selected depending on PATH
37 # Avoids race-condition accessing 'rm' when deleting WORKDIR folders at the end of this function
38 RM_BIN="$(PATH=${HOSTTOOLS_DIR} command -v rm)"
39 if [ -z "${RM_BIN}" ]; then
40 bbfatal "Binary 'rm' not found in HOSTTOOLS_DIR, cannot remove WORKDIR data."
41 fi
42
30 # If the recipe name is in the RM_WORK_EXCLUDE, skip the recipe. 43 # If the recipe name is in the RM_WORK_EXCLUDE, skip the recipe.
31 for p in ${RM_WORK_EXCLUDE}; do 44 for p in ${RM_WORK_EXCLUDE}; do
32 if [ "$p" = "${PN}" ]; then 45 if [ "$p" = "${PN}" ]; then
@@ -44,55 +57,58 @@ do_rm_work () {
44 # Change normal stamps into setscene stamps as they better reflect the 57 # Change normal stamps into setscene stamps as they better reflect the
45 # fact WORKDIR is now empty 58 # fact WORKDIR is now empty
46 # Also leave noexec stamps since setscene stamps don't cover them 59 # Also leave noexec stamps since setscene stamps don't cover them
47 cd `dirname ${STAMP}` 60 STAMPDIR=`dirname ${STAMP}`
48 for i in `basename ${STAMP}`* 61 if test -d $STAMPDIR; then
49 do 62 cd $STAMPDIR
50 case $i in 63 for i in `basename ${STAMP}`*
51 *sigdata*|*sigbasedata*) 64 do
52 # Save/skip anything that looks like a signature data file. 65 case $i in
53 ;; 66 *sigdata*|*sigbasedata*)
54 *do_image_complete_setscene*|*do_image_qa_setscene*) 67 # Save/skip anything that looks like a signature data file.
55 # Ensure we don't 'stack' setscene extensions to these stamps with the sections below 68 ;;
56 ;; 69 *do_image_complete_setscene*|*do_image_qa_setscene*)
57 *do_image_complete*) 70 # Ensure we don't 'stack' setscene extensions to these stamps with the sections below
58 # Promote do_image_complete stamps to setscene versions (ahead of *do_image* below) 71 ;;
59 mv $i `echo $i | sed -e "s#do_image_complete#do_image_complete_setscene#"` 72 *do_image_complete*)
60 ;; 73 # Promote do_image_complete stamps to setscene versions (ahead of *do_image* below)
61 *do_image_qa*) 74 mv $i `echo $i | sed -e "s#do_image_complete#do_image_complete_setscene#"`
62 # Promote do_image_qa stamps to setscene versions (ahead of *do_image* below) 75 ;;
63 mv $i `echo $i | sed -e "s#do_image_qa#do_image_qa_setscene#"` 76 *do_image_qa*)
64 ;; 77 # Promote do_image_qa stamps to setscene versions (ahead of *do_image* below)
65 *do_package_write*|*do_rootfs*|*do_image*|*do_bootimg*|*do_write_qemuboot_conf*|*do_build*) 78 mv $i `echo $i | sed -e "s#do_image_qa#do_image_qa_setscene#"`
66 ;; 79 ;;
67 *do_addto_recipe_sysroot*) 80 *do_package_write*|*do_rootfs*|*do_image*|*do_bootimg*|*do_write_qemuboot_conf*|*do_build*)
68 # Preserve recipe-sysroot-native if do_addto_recipe_sysroot has been used 81 ;;
69 excludes="$excludes recipe-sysroot-native" 82 *do_addto_recipe_sysroot*)
70 ;; 83 # Preserve recipe-sysroot-native if do_addto_recipe_sysroot has been used
71 *do_package|*do_package.*|*do_package_setscene.*) 84 excludes="$excludes recipe-sysroot-native"
72 # We remove do_package entirely, including any 85 ;;
73 # sstate version since otherwise we'd need to leave 'plaindirs' around 86 *do_package|*do_package.*|*do_package_setscene.*)
74 # such as 'packages' and 'packages-split' and these can be large. No end 87 # We remove do_package entirely, including any
75 # of chain tasks depend directly on do_package anymore. 88 # sstate version since otherwise we'd need to leave 'plaindirs' around
76 rm -f $i; 89 # such as 'packages' and 'packages-split' and these can be large. No end
77 ;; 90 # of chain tasks depend directly on do_package anymore.
78 *_setscene*) 91 "${RM_BIN}" -f -- $i;
79 # Skip stamps which are already setscene versions 92 ;;
80 ;; 93 *_setscene*)
81 *) 94 # Skip stamps which are already setscene versions
82 # For everything else: if suitable, promote the stamp to a setscene 95 ;;
83 # version, otherwise remove it 96 *)
84 for j in ${SSTATETASKS} do_shared_workdir 97 # For everything else: if suitable, promote the stamp to a setscene
85 do 98 # version, otherwise remove it
86 case $i in 99 for j in ${SSTATETASKS} do_shared_workdir
87 *$j|*$j.*) 100 do
88 mv $i `echo $i | sed -e "s#${j}#${j}_setscene#"` 101 case $i in
89 break 102 *$j|*$j.*)
90 ;; 103 mv $i `echo $i | sed -e "s#${j}#${j}_setscene#"`
91 esac 104 break
92 done 105 ;;
93 rm -f $i 106 esac
94 esac 107 done
95 done 108 "${RM_BIN}" -f -- $i
109 esac
110 done
111 fi
96 112
97 cd ${WORKDIR} 113 cd ${WORKDIR}
98 for dir in * 114 for dir in *
@@ -100,12 +116,14 @@ do_rm_work () {
100 # Retain only logs and other files in temp, safely ignore 116 # Retain only logs and other files in temp, safely ignore
101 # failures of removing pseudo folers on NFS2/3 server. 117 # failures of removing pseudo folers on NFS2/3 server.
102 if [ $dir = 'pseudo' ]; then 118 if [ $dir = 'pseudo' ]; then
103 rm -rf $dir 2> /dev/null || true 119 "${RM_BIN}" -rf -- $dir 2> /dev/null || true
104 elif ! echo "$excludes" | grep -q -w "$dir"; then 120 elif ! echo "$excludes" | grep -q -w "$dir"; then
105 rm -rf $dir 121 "${RM_BIN}" -rf -- $dir
106 fi 122 fi
107 done 123 done
108} 124}
125do_rm_work[vardepsexclude] += "SSTATETASKS"
126
109do_rm_work_all () { 127do_rm_work_all () {
110 : 128 :
111} 129}
@@ -172,7 +190,7 @@ python inject_rm_work() {
172 # other recipes and thus will typically run much later than completion of 190 # other recipes and thus will typically run much later than completion of
173 # work in the recipe itself. 191 # work in the recipe itself.
174 # In practice, addtask() here merely updates the dependencies. 192 # In practice, addtask() here merely updates the dependencies.
175 bb.build.addtask('do_rm_work', 'do_build', ' '.join(deps), d) 193 bb.build.addtask('do_rm_work', 'do_rm_work_all do_build', ' '.join(deps), d)
176 194
177 # Always update do_build_without_rm_work dependencies. 195 # Always update do_build_without_rm_work dependencies.
178 bb.build.addtask('do_build_without_rm_work', '', ' '.join(deps), d) 196 bb.build.addtask('do_build_without_rm_work', '', ' '.join(deps), d)
diff --git a/meta/classes/rm_work_and_downloads.bbclass b/meta/classes/rm_work_and_downloads.bbclass
index 7c00bea597..2695a3807f 100644
--- a/meta/classes/rm_work_and_downloads.bbclass
+++ b/meta/classes/rm_work_and_downloads.bbclass
@@ -1,8 +1,7 @@
1# Author: Patrick Ohly <patrick.ohly@intel.com> 1# Author: Patrick Ohly <patrick.ohly@intel.com>
2# Copyright: Copyright (C) 2015 Intel Corporation 2# Copyright: Copyright (C) 2015 Intel Corporation
3# 3#
4# This file is licensed under the MIT license, see COPYING.MIT in 4# SPDX-License-Identifier: MIT
5# this source distribution for the terms.
6 5
7# This class is used like rm_work: 6# This class is used like rm_work:
8# INHERIT += "rm_work_and_downloads" 7# INHERIT += "rm_work_and_downloads"
@@ -28,6 +27,6 @@ inherit rm_work
28 27
29# Instead go up one level and remove ourself. 28# Instead go up one level and remove ourself.
30DL_DIR = "${BASE_WORKDIR}/${MULTIMACH_TARGET_SYS}/${PN}/downloads" 29DL_DIR = "${BASE_WORKDIR}/${MULTIMACH_TARGET_SYS}/${PN}/downloads"
31do_rm_work_append () { 30do_rm_work:append () {
32 rm -rf ${DL_DIR} 31 rm -rf ${DL_DIR}
33} 32}
diff --git a/meta/classes/rootfs-postcommands.bbclass b/meta/classes/rootfs-postcommands.bbclass
deleted file mode 100644
index 1f27a3d07a..0000000000
--- a/meta/classes/rootfs-postcommands.bbclass
+++ /dev/null
@@ -1,375 +0,0 @@
1
2# Zap the root password if debug-tweaks feature is not enabled
3ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'empty-root-password' ], "", "zap_empty_root_password; ",d)}'
4
5# Allow dropbear/openssh to accept logins from accounts with an empty password string if debug-tweaks or allow-empty-password is enabled
6ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'allow-empty-password' ], "ssh_allow_empty_password; ", "",d)}'
7
8# Allow dropbear/openssh to accept root logins if debug-tweaks or allow-root-login is enabled
9ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'allow-root-login' ], "ssh_allow_root_login; ", "",d)}'
10
11# Enable postinst logging if debug-tweaks is enabled
12ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'post-install-logging' ], "postinst_enable_logging; ", "",d)}'
13
14# Create /etc/timestamp during image construction to give a reasonably sane default time setting
15ROOTFS_POSTPROCESS_COMMAND += "rootfs_update_timestamp; "
16
17# Tweak the mount options for rootfs in /etc/fstab if read-only-rootfs is enabled
18ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs", "read_only_rootfs_hook; ", "",d)}'
19
20# We also need to do the same for the kernel boot parameters,
21# otherwise kernel or initramfs end up mounting the rootfs read/write
22# (the default) if supported by the underlying storage.
23#
24# We do this with _append because the default value might get set later with ?=
25# and we don't want to disable such a default that by setting a value here.
26APPEND_append = '${@bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs", " ro", "", d)}'
27
28# Generates test data file with data store variables expanded in json format
29ROOTFS_POSTPROCESS_COMMAND += "write_image_test_data; "
30
31# Write manifest
32IMAGE_MANIFEST = "${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.manifest"
33ROOTFS_POSTUNINSTALL_COMMAND =+ "write_image_manifest ; "
34# Set default postinst log file
35POSTINST_LOGFILE ?= "${localstatedir}/log/postinstall.log"
36# Set default target for systemd images
37SYSTEMD_DEFAULT_TARGET ?= '${@bb.utils.contains("IMAGE_FEATURES", "x11-base", "graphical.target", "multi-user.target", d)}'
38ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("DISTRO_FEATURES", "systemd", "set_systemd_default_target; systemd_create_users;", "", d)}'
39
40ROOTFS_POSTPROCESS_COMMAND += 'empty_var_volatile;'
41
42inherit image-artifact-names
43
44# Sort the user and group entries in /etc by ID in order to make the content
45# deterministic. Package installs are not deterministic, causing the ordering
46# of entries to change between builds. In case that this isn't desired,
47# the command can be overridden.
48#
49# Note that useradd-staticids.bbclass has to be used to ensure that
50# the numeric IDs of dynamically created entries remain stable.
51#
52# We want this to run as late as possible, in particular after
53# systemd_sysusers_create and set_user_group. Using _append is not
54# enough for that, set_user_group is added that way and would end
55# up running after us.
56SORT_PASSWD_POSTPROCESS_COMMAND ??= " sort_passwd; "
57python () {
58 d.appendVar('ROOTFS_POSTPROCESS_COMMAND', '${SORT_PASSWD_POSTPROCESS_COMMAND}')
59 d.appendVar('ROOTFS_POSTPROCESS_COMMAND', 'rootfs_reproducible;')
60}
61
62systemd_create_users () {
63 for conffile in ${IMAGE_ROOTFS}/usr/lib/sysusers.d/systemd.conf ${IMAGE_ROOTFS}/usr/lib/sysusers.d/systemd-remote.conf; do
64 [ -e $conffile ] || continue
65 grep -v "^#" $conffile | sed -e '/^$/d' | while read type name id comment; do
66 if [ "$type" = "u" ]; then
67 useradd_params="--shell /sbin/nologin"
68 [ "$id" != "-" ] && useradd_params="$useradd_params --uid $id"
69 [ "$comment" != "-" ] && useradd_params="$useradd_params --comment $comment"
70 useradd_params="$useradd_params --system $name"
71 eval useradd --root ${IMAGE_ROOTFS} $useradd_params || true
72 elif [ "$type" = "g" ]; then
73 groupadd_params=""
74 [ "$id" != "-" ] && groupadd_params="$groupadd_params --gid $id"
75 groupadd_params="$groupadd_params --system $name"
76 eval groupadd --root ${IMAGE_ROOTFS} $groupadd_params || true
77 elif [ "$type" = "m" ]; then
78 group=$id
79 if [ ! `grep -q "^${group}:" ${IMAGE_ROOTFS}${sysconfdir}/group` ]; then
80 eval groupadd --root ${IMAGE_ROOTFS} --system $group
81 fi
82 if [ ! `grep -q "^${name}:" ${IMAGE_ROOTFS}${sysconfdir}/passwd` ]; then
83 eval useradd --root ${IMAGE_ROOTFS} --shell /sbin/nologin --system $name
84 fi
85 eval usermod --root ${IMAGE_ROOTFS} -a -G $group $name
86 fi
87 done
88 done
89}
90
91#
92# A hook function to support read-only-rootfs IMAGE_FEATURES
93#
94read_only_rootfs_hook () {
95 # Tweak the mount option and fs_passno for rootfs in fstab
96 if [ -f ${IMAGE_ROOTFS}/etc/fstab ]; then
97 sed -i -e '/^[#[:space:]]*\/dev\/root/{s/defaults/ro/;s/\([[:space:]]*[[:digit:]]\)\([[:space:]]*\)[[:digit:]]$/\1\20/}' ${IMAGE_ROOTFS}/etc/fstab
98 fi
99
100 # Tweak the "mount -o remount,rw /" command in busybox-inittab inittab
101 if [ -f ${IMAGE_ROOTFS}/etc/inittab ]; then
102 sed -i 's|/bin/mount -o remount,rw /|/bin/mount -o remount,ro /|' ${IMAGE_ROOTFS}/etc/inittab
103 fi
104
105 # If we're using openssh and the /etc/ssh directory has no pre-generated keys,
106 # we should configure openssh to use the configuration file /etc/ssh/sshd_config_readonly
107 # and the keys under /var/run/ssh.
108 if [ -d ${IMAGE_ROOTFS}/etc/ssh ]; then
109 if [ -e ${IMAGE_ROOTFS}/etc/ssh/ssh_host_rsa_key ]; then
110 echo "SYSCONFDIR=\${SYSCONFDIR:-/etc/ssh}" >> ${IMAGE_ROOTFS}/etc/default/ssh
111 echo "SSHD_OPTS=" >> ${IMAGE_ROOTFS}/etc/default/ssh
112 else
113 echo "SYSCONFDIR=\${SYSCONFDIR:-/var/run/ssh}" >> ${IMAGE_ROOTFS}/etc/default/ssh
114 echo "SSHD_OPTS='-f /etc/ssh/sshd_config_readonly'" >> ${IMAGE_ROOTFS}/etc/default/ssh
115 fi
116 fi
117
118 # Also tweak the key location for dropbear in the same way.
119 if [ -d ${IMAGE_ROOTFS}/etc/dropbear ]; then
120 if [ ! -e ${IMAGE_ROOTFS}/etc/dropbear/dropbear_rsa_host_key ]; then
121 echo "DROPBEAR_RSAKEY_DIR=/var/lib/dropbear" >> ${IMAGE_ROOTFS}/etc/default/dropbear
122 fi
123 fi
124
125 if ${@bb.utils.contains("DISTRO_FEATURES", "sysvinit", "true", "false", d)}; then
126 # Change the value of ROOTFS_READ_ONLY in /etc/default/rcS to yes
127 if [ -e ${IMAGE_ROOTFS}/etc/default/rcS ]; then
128 sed -i 's/ROOTFS_READ_ONLY=no/ROOTFS_READ_ONLY=yes/' ${IMAGE_ROOTFS}/etc/default/rcS
129 fi
130 # Run populate-volatile.sh at rootfs time to set up basic files
131 # and directories to support read-only rootfs.
132 if [ -x ${IMAGE_ROOTFS}/etc/init.d/populate-volatile.sh ]; then
133 ${IMAGE_ROOTFS}/etc/init.d/populate-volatile.sh
134 fi
135 fi
136
137 if ${@bb.utils.contains("DISTRO_FEATURES", "systemd", "true", "false", d)}; then
138 # Create machine-id
139 # 20:12 < mezcalero> koen: you have three options: a) run systemd-machine-id-setup at install time, b) have / read-only and an empty file there (for stateless) and c) boot with / writable
140 touch ${IMAGE_ROOTFS}${sysconfdir}/machine-id
141 fi
142}
143
144#
145# This function is intended to disallow empty root password if 'debug-tweaks' is not in IMAGE_FEATURES.
146#
147zap_empty_root_password () {
148 if [ -e ${IMAGE_ROOTFS}/etc/shadow ]; then
149 sed -i 's%^root::%root:*:%' ${IMAGE_ROOTFS}/etc/shadow
150 fi
151 if [ -e ${IMAGE_ROOTFS}/etc/passwd ]; then
152 sed -i 's%^root::%root:*:%' ${IMAGE_ROOTFS}/etc/passwd
153 fi
154}
155
156#
157# allow dropbear/openssh to accept logins from accounts with an empty password string
158#
159ssh_allow_empty_password () {
160 for config in sshd_config sshd_config_readonly; do
161 if [ -e ${IMAGE_ROOTFS}${sysconfdir}/ssh/$config ]; then
162 sed -i 's/^[#[:space:]]*PermitEmptyPasswords.*/PermitEmptyPasswords yes/' ${IMAGE_ROOTFS}${sysconfdir}/ssh/$config
163 fi
164 done
165
166 if [ -e ${IMAGE_ROOTFS}${sbindir}/dropbear ] ; then
167 if grep -q DROPBEAR_EXTRA_ARGS ${IMAGE_ROOTFS}${sysconfdir}/default/dropbear 2>/dev/null ; then
168 if ! grep -q "DROPBEAR_EXTRA_ARGS=.*-B" ${IMAGE_ROOTFS}${sysconfdir}/default/dropbear ; then
169 sed -i 's/^DROPBEAR_EXTRA_ARGS="*\([^"]*\)"*/DROPBEAR_EXTRA_ARGS="\1 -B"/' ${IMAGE_ROOTFS}${sysconfdir}/default/dropbear
170 fi
171 else
172 printf '\nDROPBEAR_EXTRA_ARGS="-B"\n' >> ${IMAGE_ROOTFS}${sysconfdir}/default/dropbear
173 fi
174 fi
175
176 if [ -d ${IMAGE_ROOTFS}${sysconfdir}/pam.d ] ; then
177 for f in `find ${IMAGE_ROOTFS}${sysconfdir}/pam.d/* -type f -exec test -e {} \; -print`
178 do
179 sed -i 's/nullok_secure/nullok/' $f
180 done
181 fi
182}
183
184#
185# allow dropbear/openssh to accept root logins
186#
187ssh_allow_root_login () {
188 for config in sshd_config sshd_config_readonly; do
189 if [ -e ${IMAGE_ROOTFS}${sysconfdir}/ssh/$config ]; then
190 sed -i 's/^[#[:space:]]*PermitRootLogin.*/PermitRootLogin yes/' ${IMAGE_ROOTFS}${sysconfdir}/ssh/$config
191 fi
192 done
193
194 if [ -e ${IMAGE_ROOTFS}${sbindir}/dropbear ] ; then
195 if grep -q DROPBEAR_EXTRA_ARGS ${IMAGE_ROOTFS}${sysconfdir}/default/dropbear 2>/dev/null ; then
196 sed -i '/^DROPBEAR_EXTRA_ARGS=/ s/-w//' ${IMAGE_ROOTFS}${sysconfdir}/default/dropbear
197 fi
198 fi
199}
200
201python sort_passwd () {
202 import rootfspostcommands
203 rootfspostcommands.sort_passwd(d.expand('${IMAGE_ROOTFS}${sysconfdir}'))
204}
205
206#
207# Enable postinst logging if debug-tweaks is enabled
208#
209postinst_enable_logging () {
210 mkdir -p ${IMAGE_ROOTFS}${sysconfdir}/default
211 echo "POSTINST_LOGGING=1" >> ${IMAGE_ROOTFS}${sysconfdir}/default/postinst
212 echo "LOGFILE=${POSTINST_LOGFILE}" >> ${IMAGE_ROOTFS}${sysconfdir}/default/postinst
213}
214
215#
216# Modify systemd default target
217#
218set_systemd_default_target () {
219 if [ -d ${IMAGE_ROOTFS}${sysconfdir}/systemd/system -a -e ${IMAGE_ROOTFS}${systemd_unitdir}/system/${SYSTEMD_DEFAULT_TARGET} ]; then
220 ln -sf ${systemd_unitdir}/system/${SYSTEMD_DEFAULT_TARGET} ${IMAGE_ROOTFS}${sysconfdir}/systemd/system/default.target
221 fi
222}
223
224# If /var/volatile is not empty, we have seen problems where programs such as the
225# journal make assumptions based on the contents of /var/volatile. The journal
226# would then write to /var/volatile before it was mounted, thus hiding the
227# items previously written.
228#
229# This change is to attempt to fix those types of issues in a way that doesn't
230# affect users that may not be using /var/volatile.
231empty_var_volatile () {
232 if [ -e ${IMAGE_ROOTFS}/etc/fstab ]; then
233 match=`awk '$1 !~ "#" && $2 ~ /\/var\/volatile/{print $2}' ${IMAGE_ROOTFS}/etc/fstab 2> /dev/null`
234 if [ -n "$match" ]; then
235 find ${IMAGE_ROOTFS}/var/volatile -mindepth 1 -delete
236 fi
237 fi
238}
239
240# Turn any symbolic /sbin/init link into a file
241remove_init_link () {
242 if [ -h ${IMAGE_ROOTFS}/sbin/init ]; then
243 LINKFILE=${IMAGE_ROOTFS}`readlink ${IMAGE_ROOTFS}/sbin/init`
244 rm ${IMAGE_ROOTFS}/sbin/init
245 cp $LINKFILE ${IMAGE_ROOTFS}/sbin/init
246 fi
247}
248
249make_zimage_symlink_relative () {
250 if [ -L ${IMAGE_ROOTFS}/boot/zImage ]; then
251 (cd ${IMAGE_ROOTFS}/boot/ && for i in `ls zImage-* | sort`; do ln -sf $i zImage; done)
252 fi
253}
254
255python write_image_manifest () {
256 from oe.rootfs import image_list_installed_packages
257 from oe.utils import format_pkg_list
258
259 deploy_dir = d.getVar('IMGDEPLOYDIR')
260 link_name = d.getVar('IMAGE_LINK_NAME')
261 manifest_name = d.getVar('IMAGE_MANIFEST')
262
263 if not manifest_name:
264 return
265
266 pkgs = image_list_installed_packages(d)
267 with open(manifest_name, 'w+') as image_manifest:
268 image_manifest.write(format_pkg_list(pkgs, "ver"))
269
270 if os.path.exists(manifest_name) and link_name:
271 manifest_link = deploy_dir + "/" + link_name + ".manifest"
272 if os.path.lexists(manifest_link):
273 os.remove(manifest_link)
274 os.symlink(os.path.basename(manifest_name), manifest_link)
275}
276
277# Can be used to create /etc/timestamp during image construction to give a reasonably
278# sane default time setting
279rootfs_update_timestamp () {
280 if [ "${REPRODUCIBLE_TIMESTAMP_ROOTFS}" != "" ]; then
281 # Convert UTC into %4Y%2m%2d%2H%2M%2S
282 sformatted=`date -u -d @${REPRODUCIBLE_TIMESTAMP_ROOTFS} +%4Y%2m%2d%2H%2M%2S`
283 else
284 sformatted=`date -u +%4Y%2m%2d%2H%2M%2S`
285 fi
286 echo $sformatted > ${IMAGE_ROOTFS}/etc/timestamp
287 bbnote "rootfs_update_timestamp: set /etc/timestamp to $sformatted"
288}
289
290# Prevent X from being started
291rootfs_no_x_startup () {
292 if [ -f ${IMAGE_ROOTFS}/etc/init.d/xserver-nodm ]; then
293 chmod a-x ${IMAGE_ROOTFS}/etc/init.d/xserver-nodm
294 fi
295}
296
297rootfs_trim_schemas () {
298 for schema in ${IMAGE_ROOTFS}/etc/gconf/schemas/*.schemas
299 do
300 # Need this in case no files exist
301 if [ -e $schema ]; then
302 oe-trim-schemas $schema > $schema.new
303 mv $schema.new $schema
304 fi
305 done
306}
307
308rootfs_check_host_user_contaminated () {
309 contaminated="${WORKDIR}/host-user-contaminated.txt"
310 HOST_USER_UID="$(PSEUDO_UNLOAD=1 id -u)"
311 HOST_USER_GID="$(PSEUDO_UNLOAD=1 id -g)"
312
313 find "${IMAGE_ROOTFS}" -path "${IMAGE_ROOTFS}/home" -prune -o \
314 -user "$HOST_USER_UID" -print -o -group "$HOST_USER_GID" -print >"$contaminated"
315
316 sed -e "s,${IMAGE_ROOTFS},," $contaminated | while read line; do
317 bbwarn "Path in the rootfs is owned by the same user or group as the user running bitbake:" $line `ls -lan ${IMAGE_ROOTFS}/$line`
318 done
319
320 if [ -s "$contaminated" ]; then
321 bbwarn "/etc/passwd:" `cat ${IMAGE_ROOTFS}/etc/passwd`
322 bbwarn "/etc/group:" `cat ${IMAGE_ROOTFS}/etc/group`
323 fi
324}
325
326# Make any absolute links in a sysroot relative
327rootfs_sysroot_relativelinks () {
328 sysroot-relativelinks.py ${SDK_OUTPUT}/${SDKTARGETSYSROOT}
329}
330
331# Generated test data json file
332python write_image_test_data() {
333 from oe.data import export2json
334
335 deploy_dir = d.getVar('IMGDEPLOYDIR')
336 link_name = d.getVar('IMAGE_LINK_NAME')
337 testdata_name = os.path.join(deploy_dir, "%s.testdata.json" % d.getVar('IMAGE_NAME'))
338
339 searchString = "%s/"%(d.getVar("TOPDIR")).replace("//","/")
340 export2json(d, testdata_name, searchString=searchString, replaceString="")
341
342 if os.path.exists(testdata_name) and link_name:
343 testdata_link = os.path.join(deploy_dir, "%s.testdata.json" % link_name)
344 if os.path.lexists(testdata_link):
345 os.remove(testdata_link)
346 os.symlink(os.path.basename(testdata_name), testdata_link)
347}
348write_image_test_data[vardepsexclude] += "TOPDIR"
349
350# Check for unsatisfied recommendations (RRECOMMENDS)
351python rootfs_log_check_recommends() {
352 log_path = d.expand("${T}/log.do_rootfs")
353 with open(log_path, 'r') as log:
354 for line in log:
355 if 'log_check' in line:
356 continue
357
358 if 'unsatisfied recommendation for' in line:
359 bb.warn('[log_check] %s: %s' % (d.getVar('PN'), line))
360}
361
362# Perform any additional adjustments needed to make rootf binary reproducible
363rootfs_reproducible () {
364 if [ "${REPRODUCIBLE_TIMESTAMP_ROOTFS}" != "" ]; then
365 # Convert UTC into %4Y%2m%2d%2H%2M%2S
366 sformatted=`date -u -d @${REPRODUCIBLE_TIMESTAMP_ROOTFS} +%4Y%2m%2d%2H%2M%2S`
367 echo $sformatted > ${IMAGE_ROOTFS}/etc/version
368 bbnote "rootfs_reproducible: set /etc/version to $sformatted"
369
370 if [ -d ${IMAGE_ROOTFS}${sysconfdir}/gconf ]; then
371 find ${IMAGE_ROOTFS}${sysconfdir}/gconf -name '%gconf.xml' -print0 | xargs -0r \
372 sed -i -e 's@\bmtime="[0-9][0-9]*"@mtime="'${REPRODUCIBLE_TIMESTAMP_ROOTFS}'"@g'
373 fi
374 fi
375}
diff --git a/meta/classes/rootfs_deb.bbclass b/meta/classes/rootfs_deb.bbclass
deleted file mode 100644
index 0469ba7059..0000000000
--- a/meta/classes/rootfs_deb.bbclass
+++ /dev/null
@@ -1,39 +0,0 @@
1#
2# Copyright 2006-2007 Openedhand Ltd.
3#
4
5ROOTFS_PKGMANAGE = "dpkg apt"
6
7do_rootfs[depends] += "dpkg-native:do_populate_sysroot apt-native:do_populate_sysroot"
8do_populate_sdk[depends] += "dpkg-native:do_populate_sysroot apt-native:do_populate_sysroot bzip2-native:do_populate_sysroot"
9do_rootfs[recrdeptask] += "do_package_write_deb do_package_qa"
10do_rootfs[vardeps] += "PACKAGE_FEED_URIS PACKAGE_FEED_BASE_PATHS PACKAGE_FEED_ARCHS"
11
12do_rootfs[lockfiles] += "${DEPLOY_DIR_DEB}/deb.lock"
13do_populate_sdk[lockfiles] += "${DEPLOY_DIR_DEB}/deb.lock"
14do_populate_sdk_ext[lockfiles] += "${DEPLOY_DIR_DEB}/deb.lock"
15
16python rootfs_deb_bad_recommendations() {
17 if d.getVar("BAD_RECOMMENDATIONS"):
18 bb.warn("Debian package install does not support BAD_RECOMMENDATIONS")
19}
20do_rootfs[prefuncs] += "rootfs_deb_bad_recommendations"
21
22DEB_POSTPROCESS_COMMANDS = ""
23
24opkglibdir = "${localstatedir}/lib/opkg"
25
26python () {
27 # Map TARGET_ARCH to Debian's ideas about architectures
28 darch = d.getVar('SDK_ARCH')
29 if darch in ["x86", "i486", "i586", "i686", "pentium"]:
30 d.setVar('DEB_SDK_ARCH', 'i386')
31 elif darch == "x86_64":
32 d.setVar('DEB_SDK_ARCH', 'amd64')
33 elif darch == "arm":
34 d.setVar('DEB_SDK_ARCH', 'armel')
35 elif darch == "aarch64":
36 d.setVar('DEB_SDK_ARCH', 'arm64')
37 else:
38 bb.fatal("Unhandled SDK_ARCH %s" % darch)
39}
diff --git a/meta/classes/rootfs_ipk.bbclass b/meta/classes/rootfs_ipk.bbclass
deleted file mode 100644
index 245c256a6f..0000000000
--- a/meta/classes/rootfs_ipk.bbclass
+++ /dev/null
@@ -1,38 +0,0 @@
1#
2# Creates a root filesystem out of IPKs
3#
4# This rootfs can be mounted via root-nfs or it can be put into an cramfs/jffs etc.
5# See image.bbclass for a usage of this.
6#
7
8EXTRAOPKGCONFIG ?= ""
9ROOTFS_PKGMANAGE = "opkg ${EXTRAOPKGCONFIG}"
10
11do_rootfs[depends] += "opkg-native:do_populate_sysroot opkg-utils-native:do_populate_sysroot"
12do_populate_sdk[depends] += "opkg-native:do_populate_sysroot opkg-utils-native:do_populate_sysroot"
13do_rootfs[recrdeptask] += "do_package_write_ipk do_package_qa"
14do_rootfs[vardeps] += "PACKAGE_FEED_URIS PACKAGE_FEED_BASE_PATHS PACKAGE_FEED_ARCHS"
15
16do_rootfs[lockfiles] += "${WORKDIR}/ipk.lock"
17do_populate_sdk[lockfiles] += "${WORKDIR}/sdk-ipk.lock"
18do_populate_sdk_ext[lockfiles] += "${WORKDIR}/sdk-ipk.lock"
19
20OPKG_PREPROCESS_COMMANDS = ""
21
22OPKG_POSTPROCESS_COMMANDS = ""
23
24OPKGLIBDIR ??= "${localstatedir}/lib"
25
26MULTILIBRE_ALLOW_REP = "${OPKGLIBDIR}/opkg|/usr/lib/opkg"
27
28python () {
29
30 if d.getVar('BUILD_IMAGES_FROM_FEEDS'):
31 flags = d.getVarFlag('do_rootfs', 'recrdeptask')
32 flags = flags.replace("do_package_write_ipk", "")
33 flags = flags.replace("do_deploy", "")
34 flags = flags.replace("do_populate_sysroot", "")
35 d.setVarFlag('do_rootfs', 'recrdeptask', flags)
36 d.setVar('OPKG_PREPROCESS_COMMANDS', "")
37 d.setVar('OPKG_POSTPROCESS_COMMANDS', '')
38}
diff --git a/meta/classes/rootfs_rpm.bbclass b/meta/classes/rootfs_rpm.bbclass
deleted file mode 100644
index 0af7d65b1a..0000000000
--- a/meta/classes/rootfs_rpm.bbclass
+++ /dev/null
@@ -1,39 +0,0 @@
1#
2# Creates a root filesystem out of rpm packages
3#
4
5ROOTFS_PKGMANAGE = "rpm dnf"
6
7# dnf is using our custom distutils, and so will fail without these
8export STAGING_INCDIR
9export STAGING_LIBDIR
10
11# Add 100Meg of extra space for dnf
12IMAGE_ROOTFS_EXTRA_SPACE_append = "${@bb.utils.contains("PACKAGE_INSTALL", "dnf", " + 102400", "", d)}"
13
14# Dnf is python based, so be sure python3-native is available to us.
15EXTRANATIVEPATH += "python3-native"
16
17# opkg is needed for update-alternatives
18RPMROOTFSDEPENDS = "rpm-native:do_populate_sysroot \
19 dnf-native:do_populate_sysroot \
20 createrepo-c-native:do_populate_sysroot \
21 opkg-native:do_populate_sysroot"
22
23do_rootfs[depends] += "${RPMROOTFSDEPENDS}"
24do_populate_sdk[depends] += "${RPMROOTFSDEPENDS}"
25
26do_rootfs[recrdeptask] += "do_package_write_rpm do_package_qa"
27do_rootfs[vardeps] += "PACKAGE_FEED_URIS PACKAGE_FEED_BASE_PATHS PACKAGE_FEED_ARCHS"
28
29python () {
30 if d.getVar('BUILD_IMAGES_FROM_FEEDS'):
31 flags = d.getVarFlag('do_rootfs', 'recrdeptask')
32 flags = flags.replace("do_package_write_rpm", "")
33 flags = flags.replace("do_deploy", "")
34 flags = flags.replace("do_populate_sysroot", "")
35 d.setVarFlag('do_rootfs', 'recrdeptask', flags)
36 d.setVar('RPM_PREPROCESS_COMMANDS', '')
37 d.setVar('RPM_POSTPROCESS_COMMANDS', '')
38
39}
diff --git a/meta/classes/rootfsdebugfiles.bbclass b/meta/classes/rootfsdebugfiles.bbclass
deleted file mode 100644
index 85c7ec7434..0000000000
--- a/meta/classes/rootfsdebugfiles.bbclass
+++ /dev/null
@@ -1,41 +0,0 @@
1# This class installs additional files found on the build host
2# directly into the rootfs.
3#
4# One use case is to install a constant ssh host key in
5# an image that gets created for just one machine. This
6# solves two issues:
7# - host key generation on the device can stall when the
8# kernel has not gathered enough entropy yet (seen in practice
9# under qemu)
10# - ssh complains by default when the host key changes
11#
12# For dropbear, with the ssh host key store along side the local.conf:
13# 1. Extend local.conf:
14# INHERIT += "rootfsdebugfiles"
15# ROOTFS_DEBUG_FILES += "${TOPDIR}/conf/dropbear_rsa_host_key ${IMAGE_ROOTFS}/etc/dropbear/dropbear_rsa_host_key ;"
16# 2. Boot the image once, copy the dropbear_rsa_host_key from
17# the device into your build conf directory.
18# 3. A optional parameter can be used to set file mode
19# of the copied target, for instance:
20# ROOTFS_DEBUG_FILES += "${TOPDIR}/conf/dropbear_rsa_host_key ${IMAGE_ROOTFS}/etc/dropbear/dropbear_rsa_host_key 0600;"
21# in case they might be required to have a specific mode. (Shoundn't be too open, for example)
22#
23# Do not use for production images! It bypasses several
24# core build mechanisms (updating the image when one
25# of the files changes, license tracking in the image
26# manifest, ...).
27
28ROOTFS_DEBUG_FILES ?= ""
29ROOTFS_DEBUG_FILES[doc] = "Lists additional files or directories to be installed with 'cp -a' in the format 'source1 target1;source2 target2;...'"
30
31ROOTFS_POSTPROCESS_COMMAND += "rootfs_debug_files;"
32rootfs_debug_files () {
33 #!/bin/sh -e
34 echo "${ROOTFS_DEBUG_FILES}" | sed -e 's/;/\n/g' | while read source target mode; do
35 if [ -e "$source" ]; then
36 mkdir -p $(dirname $target)
37 cp -a $source $target
38 [ -n "$mode" ] && chmod $mode $target
39 fi
40 done
41}
diff --git a/meta/classes/sanity.bbclass b/meta/classes/sanity.bbclass
deleted file mode 100644
index 485173ab48..0000000000
--- a/meta/classes/sanity.bbclass
+++ /dev/null
@@ -1,1054 +0,0 @@
1#
2# Sanity check the users setup for common misconfigurations
3#
4
5SANITY_REQUIRED_UTILITIES ?= "patch diffstat git bzip2 tar \
6 gzip gawk chrpath wget cpio perl file which"
7
8def bblayers_conf_file(d):
9 return os.path.join(d.getVar('TOPDIR'), 'conf/bblayers.conf')
10
11def sanity_conf_read(fn):
12 with open(fn, 'r') as f:
13 lines = f.readlines()
14 return lines
15
16def sanity_conf_find_line(pattern, lines):
17 import re
18 return next(((index, line)
19 for index, line in enumerate(lines)
20 if re.search(pattern, line)), (None, None))
21
22def sanity_conf_update(fn, lines, version_var_name, new_version):
23 index, line = sanity_conf_find_line(r"^%s" % version_var_name, lines)
24 lines[index] = '%s = "%d"\n' % (version_var_name, new_version)
25 with open(fn, "w") as f:
26 f.write(''.join(lines))
27
28# Functions added to this variable MUST throw a NotImplementedError exception unless
29# they successfully changed the config version in the config file. Exceptions
30# are used since exec_func doesn't handle return values.
31BBLAYERS_CONF_UPDATE_FUNCS += " \
32 conf/bblayers.conf:LCONF_VERSION:LAYER_CONF_VERSION:oecore_update_bblayers \
33 conf/local.conf:CONF_VERSION:LOCALCONF_VERSION:oecore_update_localconf \
34 conf/site.conf:SCONF_VERSION:SITE_CONF_VERSION:oecore_update_siteconf \
35"
36
37SANITY_DIFF_TOOL ?= "meld"
38
39SANITY_LOCALCONF_SAMPLE ?= "${COREBASE}/meta*/conf/local.conf.sample"
40python oecore_update_localconf() {
41 # Check we are using a valid local.conf
42 current_conf = d.getVar('CONF_VERSION')
43 conf_version = d.getVar('LOCALCONF_VERSION')
44
45 failmsg = """Your version of local.conf was generated from an older/newer version of
46local.conf.sample and there have been updates made to this file. Please compare the two
47files and merge any changes before continuing.
48
49Matching the version numbers will remove this message.
50
51\"${SANITY_DIFF_TOOL} conf/local.conf ${SANITY_LOCALCONF_SAMPLE}\"
52
53is a good way to visualise the changes."""
54 failmsg = d.expand(failmsg)
55
56 raise NotImplementedError(failmsg)
57}
58
59SANITY_SITECONF_SAMPLE ?= "${COREBASE}/meta*/conf/site.conf.sample"
60python oecore_update_siteconf() {
61 # If we have a site.conf, check it's valid
62 current_sconf = d.getVar('SCONF_VERSION')
63 sconf_version = d.getVar('SITE_CONF_VERSION')
64
65 failmsg = """Your version of site.conf was generated from an older version of
66site.conf.sample and there have been updates made to this file. Please compare the two
67files and merge any changes before continuing.
68
69Matching the version numbers will remove this message.
70
71\"${SANITY_DIFF_TOOL} conf/site.conf ${SANITY_SITECONF_SAMPLE}\"
72
73is a good way to visualise the changes."""
74 failmsg = d.expand(failmsg)
75
76 raise NotImplementedError(failmsg)
77}
78
79SANITY_BBLAYERCONF_SAMPLE ?= "${COREBASE}/meta*/conf/bblayers.conf.sample"
80python oecore_update_bblayers() {
81 # bblayers.conf is out of date, so see if we can resolve that
82
83 current_lconf = int(d.getVar('LCONF_VERSION'))
84 lconf_version = int(d.getVar('LAYER_CONF_VERSION'))
85
86 failmsg = """Your version of bblayers.conf has the wrong LCONF_VERSION (has ${LCONF_VERSION}, expecting ${LAYER_CONF_VERSION}).
87Please compare your file against bblayers.conf.sample and merge any changes before continuing.
88"${SANITY_DIFF_TOOL} conf/bblayers.conf ${SANITY_BBLAYERCONF_SAMPLE}"
89
90is a good way to visualise the changes."""
91 failmsg = d.expand(failmsg)
92
93 if not current_lconf:
94 raise NotImplementedError(failmsg)
95
96 lines = []
97
98 if current_lconf < 4:
99 raise NotImplementedError(failmsg)
100
101 bblayers_fn = bblayers_conf_file(d)
102 lines = sanity_conf_read(bblayers_fn)
103
104 if current_lconf == 4 and lconf_version > 4:
105 topdir_var = '$' + '{TOPDIR}'
106 index, bbpath_line = sanity_conf_find_line('BBPATH', lines)
107 if bbpath_line:
108 start = bbpath_line.find('"')
109 if start != -1 and (len(bbpath_line) != (start + 1)):
110 if bbpath_line[start + 1] == '"':
111 lines[index] = (bbpath_line[:start + 1] +
112 topdir_var + bbpath_line[start + 1:])
113 else:
114 if not topdir_var in bbpath_line:
115 lines[index] = (bbpath_line[:start + 1] +
116 topdir_var + ':' + bbpath_line[start + 1:])
117 else:
118 raise NotImplementedError(failmsg)
119 else:
120 index, bbfiles_line = sanity_conf_find_line('BBFILES', lines)
121 if bbfiles_line:
122 lines.insert(index, 'BBPATH = "' + topdir_var + '"\n')
123 else:
124 raise NotImplementedError(failmsg)
125
126 current_lconf += 1
127 sanity_conf_update(bblayers_fn, lines, 'LCONF_VERSION', current_lconf)
128 bb.note("Your conf/bblayers.conf has been automatically updated.")
129 return
130
131 elif current_lconf == 5 and lconf_version > 5:
132 # Null update, to avoid issues with people switching between poky and other distros
133 current_lconf = 6
134 sanity_conf_update(bblayers_fn, lines, 'LCONF_VERSION', current_lconf)
135 bb.note("Your conf/bblayers.conf has been automatically updated.")
136 return
137
138 status.addresult()
139
140 elif current_lconf == 6 and lconf_version > 6:
141 # Handle rename of meta-yocto -> meta-poky
142 # This marks the start of separate version numbers but code is needed in OE-Core
143 # for the migration, one last time.
144 layers = d.getVar('BBLAYERS').split()
145 layers = [ os.path.basename(path) for path in layers ]
146 if 'meta-yocto' in layers:
147 found = False
148 while True:
149 index, meta_yocto_line = sanity_conf_find_line(r'.*meta-yocto[\'"\s\n]', lines)
150 if meta_yocto_line:
151 lines[index] = meta_yocto_line.replace('meta-yocto', 'meta-poky')
152 found = True
153 else:
154 break
155 if not found:
156 raise NotImplementedError(failmsg)
157 index, meta_yocto_line = sanity_conf_find_line('LCONF_VERSION.*\n', lines)
158 if meta_yocto_line:
159 lines[index] = 'POKY_BBLAYERS_CONF_VERSION = "1"\n'
160 else:
161 raise NotImplementedError(failmsg)
162 with open(bblayers_fn, "w") as f:
163 f.write(''.join(lines))
164 bb.note("Your conf/bblayers.conf has been automatically updated.")
165 return
166 current_lconf += 1
167 sanity_conf_update(bblayers_fn, lines, 'LCONF_VERSION', current_lconf)
168 bb.note("Your conf/bblayers.conf has been automatically updated.")
169 return
170
171 raise NotImplementedError(failmsg)
172}
173
174def raise_sanity_error(msg, d, network_error=False):
175 if d.getVar("SANITY_USE_EVENTS") == "1":
176 try:
177 bb.event.fire(bb.event.SanityCheckFailed(msg, network_error), d)
178 except TypeError:
179 bb.event.fire(bb.event.SanityCheckFailed(msg), d)
180 return
181
182 bb.fatal(""" OE-core's config sanity checker detected a potential misconfiguration.
183 Either fix the cause of this error or at your own risk disable the checker (see sanity.conf).
184 Following is the list of potential problems / advisories:
185
186 %s""" % msg)
187
188# Check flags associated with a tuning.
189def check_toolchain_tune_args(data, tune, multilib, errs):
190 found_errors = False
191 if check_toolchain_args_present(data, tune, multilib, errs, 'CCARGS'):
192 found_errors = True
193 if check_toolchain_args_present(data, tune, multilib, errs, 'ASARGS'):
194 found_errors = True
195 if check_toolchain_args_present(data, tune, multilib, errs, 'LDARGS'):
196 found_errors = True
197
198 return found_errors
199
200def check_toolchain_args_present(data, tune, multilib, tune_errors, which):
201 args_set = (data.getVar("TUNE_%s" % which) or "").split()
202 args_wanted = (data.getVar("TUNEABI_REQUIRED_%s_tune-%s" % (which, tune)) or "").split()
203 args_missing = []
204
205 # If no args are listed/required, we are done.
206 if not args_wanted:
207 return
208 for arg in args_wanted:
209 if arg not in args_set:
210 args_missing.append(arg)
211
212 found_errors = False
213 if args_missing:
214 found_errors = True
215 tune_errors.append("TUNEABI for %s requires '%s' in TUNE_%s (%s)." %
216 (tune, ' '.join(args_missing), which, ' '.join(args_set)))
217 return found_errors
218
219# Check a single tune for validity.
220def check_toolchain_tune(data, tune, multilib):
221 tune_errors = []
222 if not tune:
223 return "No tuning found for %s multilib." % multilib
224 localdata = bb.data.createCopy(data)
225 if multilib != "default":
226 # Apply the overrides so we can look at the details.
227 overrides = localdata.getVar("OVERRIDES", False) + ":virtclass-multilib-" + multilib
228 localdata.setVar("OVERRIDES", overrides)
229 bb.debug(2, "Sanity-checking tuning '%s' (%s) features:" % (tune, multilib))
230 features = (localdata.getVar("TUNE_FEATURES_tune-%s" % tune) or "").split()
231 if not features:
232 return "Tuning '%s' has no defined features, and cannot be used." % tune
233 valid_tunes = localdata.getVarFlags('TUNEVALID') or {}
234 conflicts = localdata.getVarFlags('TUNECONFLICTS') or {}
235 # [doc] is the documentation for the variable, not a real feature
236 if 'doc' in valid_tunes:
237 del valid_tunes['doc']
238 if 'doc' in conflicts:
239 del conflicts['doc']
240 for feature in features:
241 if feature in conflicts:
242 for conflict in conflicts[feature].split():
243 if conflict in features:
244 tune_errors.append("Feature '%s' conflicts with '%s'." %
245 (feature, conflict))
246 if feature in valid_tunes:
247 bb.debug(2, " %s: %s" % (feature, valid_tunes[feature]))
248 else:
249 tune_errors.append("Feature '%s' is not defined." % feature)
250 whitelist = localdata.getVar("TUNEABI_WHITELIST")
251 if whitelist:
252 tuneabi = localdata.getVar("TUNEABI_tune-%s" % tune)
253 if not tuneabi:
254 tuneabi = tune
255 if True not in [x in whitelist.split() for x in tuneabi.split()]:
256 tune_errors.append("Tuning '%s' (%s) cannot be used with any supported tuning/ABI." %
257 (tune, tuneabi))
258 else:
259 if not check_toolchain_tune_args(localdata, tuneabi, multilib, tune_errors):
260 bb.debug(2, "Sanity check: Compiler args OK for %s." % tune)
261 if tune_errors:
262 return "Tuning '%s' has the following errors:\n" % tune + '\n'.join(tune_errors)
263
264def check_toolchain(data):
265 tune_error_set = []
266 deftune = data.getVar("DEFAULTTUNE")
267 tune_errors = check_toolchain_tune(data, deftune, 'default')
268 if tune_errors:
269 tune_error_set.append(tune_errors)
270
271 multilibs = (data.getVar("MULTILIB_VARIANTS") or "").split()
272 global_multilibs = (data.getVar("MULTILIB_GLOBAL_VARIANTS") or "").split()
273
274 if multilibs:
275 seen_libs = []
276 seen_tunes = []
277 for lib in multilibs:
278 if lib in seen_libs:
279 tune_error_set.append("The multilib '%s' appears more than once." % lib)
280 else:
281 seen_libs.append(lib)
282 if not lib in global_multilibs:
283 tune_error_set.append("Multilib %s is not present in MULTILIB_GLOBAL_VARIANTS" % lib)
284 tune = data.getVar("DEFAULTTUNE_virtclass-multilib-%s" % lib)
285 if tune in seen_tunes:
286 tune_error_set.append("The tuning '%s' appears in more than one multilib." % tune)
287 else:
288 seen_libs.append(tune)
289 if tune == deftune:
290 tune_error_set.append("Multilib '%s' (%s) is also the default tuning." % (lib, deftune))
291 else:
292 tune_errors = check_toolchain_tune(data, tune, lib)
293 if tune_errors:
294 tune_error_set.append(tune_errors)
295 if tune_error_set:
296 return "Toolchain tunings invalid:\n" + '\n'.join(tune_error_set) + "\n"
297
298 return ""
299
300def check_conf_exists(fn, data):
301 bbpath = []
302 fn = data.expand(fn)
303 vbbpath = data.getVar("BBPATH", False)
304 if vbbpath:
305 bbpath += vbbpath.split(":")
306 for p in bbpath:
307 currname = os.path.join(data.expand(p), fn)
308 if os.access(currname, os.R_OK):
309 return True
310 return False
311
312def check_create_long_filename(filepath, pathname):
313 import string, random
314 testfile = os.path.join(filepath, ''.join(random.choice(string.ascii_letters) for x in range(200)))
315 try:
316 if not os.path.exists(filepath):
317 bb.utils.mkdirhier(filepath)
318 f = open(testfile, "w")
319 f.close()
320 os.remove(testfile)
321 except IOError as e:
322 import errno
323 err, strerror = e.args
324 if err == errno.ENAMETOOLONG:
325 return "Failed to create a file with a long name in %s. Please use a filesystem that does not unreasonably limit filename length.\n" % pathname
326 else:
327 return "Failed to create a file in %s: %s.\n" % (pathname, strerror)
328 except OSError as e:
329 errno, strerror = e.args
330 return "Failed to create %s directory in which to run long name sanity check: %s.\n" % (pathname, strerror)
331 return ""
332
333def check_path_length(filepath, pathname, limit):
334 if len(filepath) > limit:
335 return "The length of %s is longer than %s, this would cause unexpected errors, please use a shorter path.\n" % (pathname, limit)
336 return ""
337
338def get_filesystem_id(path):
339 import subprocess
340 try:
341 return subprocess.check_output(["stat", "-f", "-c", "%t", path]).decode('utf-8').strip()
342 except subprocess.CalledProcessError:
343 bb.warn("Can't get filesystem id of: %s" % path)
344 return None
345
346# Check that the path isn't located on nfs.
347def check_not_nfs(path, name):
348 # The nfs' filesystem id is 6969
349 if get_filesystem_id(path) == "6969":
350 return "The %s: %s can't be located on nfs.\n" % (name, path)
351 return ""
352
353# Check that the path is on a case-sensitive file system
354def check_case_sensitive(path, name):
355 import tempfile
356 with tempfile.NamedTemporaryFile(prefix='TmP', dir=path) as tmp_file:
357 if os.path.exists(tmp_file.name.lower()):
358 return "The %s (%s) can't be on a case-insensitive file system.\n" % (name, path)
359 return ""
360
361# Check that path isn't a broken symlink
362def check_symlink(lnk, data):
363 if os.path.islink(lnk) and not os.path.exists(lnk):
364 raise_sanity_error("%s is a broken symlink." % lnk, data)
365
366def check_connectivity(d):
367 # URI's to check can be set in the CONNECTIVITY_CHECK_URIS variable
368 # using the same syntax as for SRC_URI. If the variable is not set
369 # the check is skipped
370 test_uris = (d.getVar('CONNECTIVITY_CHECK_URIS') or "").split()
371 retval = ""
372
373 bbn = d.getVar('BB_NO_NETWORK')
374 if bbn not in (None, '0', '1'):
375 return 'BB_NO_NETWORK should be "0" or "1", but it is "%s"' % bbn
376
377 # Only check connectivity if network enabled and the
378 # CONNECTIVITY_CHECK_URIS are set
379 network_enabled = not (bbn == '1')
380 check_enabled = len(test_uris)
381 if check_enabled and network_enabled:
382 # Take a copy of the data store and unset MIRRORS and PREMIRRORS
383 data = bb.data.createCopy(d)
384 data.delVar('PREMIRRORS')
385 data.delVar('MIRRORS')
386 try:
387 fetcher = bb.fetch2.Fetch(test_uris, data)
388 fetcher.checkstatus()
389 except Exception as err:
390 # Allow the message to be configured so that users can be
391 # pointed to a support mechanism.
392 msg = data.getVar('CONNECTIVITY_CHECK_MSG') or ""
393 if len(msg) == 0:
394 msg = "%s.\n" % err
395 msg += " Please ensure your host's network is configured correctly,\n"
396 msg += " or set BB_NO_NETWORK = \"1\" to disable network access if\n"
397 msg += " all required sources are on local disk.\n"
398 retval = msg
399
400 return retval
401
402def check_supported_distro(sanity_data):
403 from fnmatch import fnmatch
404
405 tested_distros = sanity_data.getVar('SANITY_TESTED_DISTROS')
406 if not tested_distros:
407 return
408
409 try:
410 distro = oe.lsb.distro_identifier()
411 except Exception:
412 distro = None
413
414 if not distro:
415 bb.warn('Host distribution could not be determined; you may possibly experience unexpected failures. It is recommended that you use a tested distribution.')
416
417 for supported in [x.strip() for x in tested_distros.split('\\n')]:
418 if fnmatch(distro, supported):
419 return
420
421 bb.warn('Host distribution "%s" has not been validated with this version of the build system; you may possibly experience unexpected failures. It is recommended that you use a tested distribution.' % distro)
422
423# Checks we should only make if MACHINE is set correctly
424def check_sanity_validmachine(sanity_data):
425 messages = ""
426
427 # Check TUNE_ARCH is set
428 if sanity_data.getVar('TUNE_ARCH') == 'INVALID':
429 messages = messages + 'TUNE_ARCH is unset. Please ensure your MACHINE configuration includes a valid tune configuration file which will set this correctly.\n'
430
431 # Check TARGET_OS is set
432 if sanity_data.getVar('TARGET_OS') == 'INVALID':
433 messages = messages + 'Please set TARGET_OS directly, or choose a MACHINE or DISTRO that does so.\n'
434
435 # Check that we don't have duplicate entries in PACKAGE_ARCHS & that TUNE_PKGARCH is in PACKAGE_ARCHS
436 pkgarchs = sanity_data.getVar('PACKAGE_ARCHS')
437 tunepkg = sanity_data.getVar('TUNE_PKGARCH')
438 defaulttune = sanity_data.getVar('DEFAULTTUNE')
439 tunefound = False
440 seen = {}
441 dups = []
442
443 for pa in pkgarchs.split():
444 if seen.get(pa, 0) == 1:
445 dups.append(pa)
446 else:
447 seen[pa] = 1
448 if pa == tunepkg:
449 tunefound = True
450
451 if len(dups):
452 messages = messages + "Error, the PACKAGE_ARCHS variable contains duplicates. The following archs are listed more than once: %s" % " ".join(dups)
453
454 if tunefound == False:
455 messages = messages + "Error, the PACKAGE_ARCHS variable (%s) for DEFAULTTUNE (%s) does not contain TUNE_PKGARCH (%s)." % (pkgarchs, defaulttune, tunepkg)
456
457 return messages
458
459# Patch before 2.7 can't handle all the features in git-style diffs. Some
460# patches may incorrectly apply, and others won't apply at all.
461def check_patch_version(sanity_data):
462 from distutils.version import LooseVersion
463 import re, subprocess
464
465 try:
466 result = subprocess.check_output(["patch", "--version"], stderr=subprocess.STDOUT).decode('utf-8')
467 version = re.search(r"[0-9.]+", result.splitlines()[0]).group()
468 if LooseVersion(version) < LooseVersion("2.7"):
469 return "Your version of patch is older than 2.7 and has bugs which will break builds. Please install a newer version of patch.\n"
470 else:
471 return None
472 except subprocess.CalledProcessError as e:
473 return "Unable to execute patch --version, exit code %d:\n%s\n" % (e.returncode, e.output)
474
475# Unpatched versions of make 3.82 are known to be broken. See GNU Savannah Bug 30612.
476# Use a modified reproducer from http://savannah.gnu.org/bugs/?30612 to validate.
477def check_make_version(sanity_data):
478 from distutils.version import LooseVersion
479 import subprocess
480
481 try:
482 result = subprocess.check_output(['make', '--version'], stderr=subprocess.STDOUT).decode('utf-8')
483 except subprocess.CalledProcessError as e:
484 return "Unable to execute make --version, exit code %d\n%s\n" % (e.returncode, e.output)
485 version = result.split()[2]
486 if LooseVersion(version) == LooseVersion("3.82"):
487 # Construct a test file
488 f = open("makefile_test", "w")
489 f.write("makefile_test.a: makefile_test_a.c makefile_test_b.c makefile_test.a( makefile_test_a.c makefile_test_b.c)\n")
490 f.write("\n")
491 f.write("makefile_test_a.c:\n")
492 f.write(" touch $@\n")
493 f.write("\n")
494 f.write("makefile_test_b.c:\n")
495 f.write(" touch $@\n")
496 f.close()
497
498 # Check if make 3.82 has been patched
499 try:
500 subprocess.check_call(['make', '-f', 'makefile_test'])
501 except subprocess.CalledProcessError as e:
502 return "Your version of make 3.82 is broken. Please revert to 3.81 or install a patched version.\n"
503 finally:
504 os.remove("makefile_test")
505 if os.path.exists("makefile_test_a.c"):
506 os.remove("makefile_test_a.c")
507 if os.path.exists("makefile_test_b.c"):
508 os.remove("makefile_test_b.c")
509 if os.path.exists("makefile_test.a"):
510 os.remove("makefile_test.a")
511 return None
512
513
514# Check if we're running on WSL (Windows Subsystem for Linux).
515# WSLv1 is known not to work but WSLv2 should work properly as
516# long as the VHDX file is optimized often, let the user know
517# upfront.
518# More information on installing WSLv2 at:
519# https://docs.microsoft.com/en-us/windows/wsl/wsl2-install
520def check_wsl(d):
521 with open("/proc/version", "r") as f:
522 verdata = f.readlines()
523 for l in verdata:
524 if "Microsoft" in l:
525 return "OpenEmbedded doesn't work under WSLv1, please upgrade to WSLv2 if you want to run builds on Windows"
526 elif "microsoft" in l:
527 bb.warn("You are running bitbake under WSLv2, this works properly but you should optimize your VHDX file eventually to avoid running out of storage space")
528 return None
529
530# Require at least gcc version 6.0.
531#
532# This can be fixed on CentOS-7 with devtoolset-6+
533# https://www.softwarecollections.org/en/scls/rhscl/devtoolset-6/
534#
535# A less invasive fix is with scripts/install-buildtools (or with user
536# built buildtools-extended-tarball)
537#
538def check_gcc_version(sanity_data):
539 from distutils.version import LooseVersion
540 import subprocess
541
542 build_cc, version = oe.utils.get_host_compiler_version(sanity_data)
543 if build_cc.strip() == "gcc":
544 if LooseVersion(version) < LooseVersion("6.0"):
545 return "Your version of gcc is older than 6.0 and will break builds. Please install a newer version of gcc (you could use the project's buildtools-extended-tarball or use scripts/install-buildtools).\n"
546 return None
547
548# Tar version 1.24 and onwards handle overwriting symlinks correctly
549# but earlier versions do not; this needs to work properly for sstate
550# Version 1.28 is needed so opkg-build works correctly when reproducibile builds are enabled
551def check_tar_version(sanity_data):
552 from distutils.version import LooseVersion
553 import subprocess
554 try:
555 result = subprocess.check_output(["tar", "--version"], stderr=subprocess.STDOUT).decode('utf-8')
556 except subprocess.CalledProcessError as e:
557 return "Unable to execute tar --version, exit code %d\n%s\n" % (e.returncode, e.output)
558 version = result.split()[3]
559 if LooseVersion(version) < LooseVersion("1.28"):
560 return "Your version of tar is older than 1.28 and does not have the support needed to enable reproducible builds. Please install a newer version of tar (you could use the project's buildtools-tarball from our last release or use scripts/install-buildtools).\n"
561 return None
562
563# We use git parameters and functionality only found in 1.7.8 or later
564# The kernel tools assume git >= 1.8.3.1 (verified needed > 1.7.9.5) see #6162
565# The git fetcher also had workarounds for git < 1.7.9.2 which we've dropped
566def check_git_version(sanity_data):
567 from distutils.version import LooseVersion
568 import subprocess
569 try:
570 result = subprocess.check_output(["git", "--version"], stderr=subprocess.DEVNULL).decode('utf-8')
571 except subprocess.CalledProcessError as e:
572 return "Unable to execute git --version, exit code %d\n%s\n" % (e.returncode, e.output)
573 version = result.split()[2]
574 if LooseVersion(version) < LooseVersion("1.8.3.1"):
575 return "Your version of git is older than 1.8.3.1 and has bugs which will break builds. Please install a newer version of git.\n"
576 return None
577
578# Check the required perl modules which may not be installed by default
579def check_perl_modules(sanity_data):
580 import subprocess
581 ret = ""
582 modules = ( "Text::ParseWords", "Thread::Queue", "Data::Dumper" )
583 errresult = ''
584 for m in modules:
585 try:
586 subprocess.check_output(["perl", "-e", "use %s" % m])
587 except subprocess.CalledProcessError as e:
588 errresult += bytes.decode(e.output)
589 ret += "%s " % m
590 if ret:
591 return "Required perl module(s) not found: %s\n\n%s\n" % (ret, errresult)
592 return None
593
594def sanity_check_conffiles(d):
595 funcs = d.getVar('BBLAYERS_CONF_UPDATE_FUNCS').split()
596 for func in funcs:
597 conffile, current_version, required_version, func = func.split(":")
598 if check_conf_exists(conffile, d) and d.getVar(current_version) is not None and \
599 d.getVar(current_version) != d.getVar(required_version):
600 try:
601 bb.build.exec_func(func, d)
602 except NotImplementedError as e:
603 bb.fatal(str(e))
604 d.setVar("BB_INVALIDCONF", True)
605
606def sanity_handle_abichanges(status, d):
607 #
608 # Check the 'ABI' of TMPDIR
609 #
610 import subprocess
611
612 current_abi = d.getVar('OELAYOUT_ABI')
613 abifile = d.getVar('SANITY_ABIFILE')
614 if os.path.exists(abifile):
615 with open(abifile, "r") as f:
616 abi = f.read().strip()
617 if not abi.isdigit():
618 with open(abifile, "w") as f:
619 f.write(current_abi)
620 elif int(abi) <= 11 and current_abi == "12":
621 status.addresult("The layout of TMPDIR changed for Recipe Specific Sysroots.\nConversion doesn't make sense and this change will rebuild everything so please delete TMPDIR (%s).\n" % d.getVar("TMPDIR"))
622 elif int(abi) <= 13 and current_abi == "14":
623 status.addresult("TMPDIR changed to include path filtering from the pseudo database.\nIt is recommended to use a clean TMPDIR with the new pseudo path filtering so TMPDIR (%s) would need to be removed to continue.\n" % d.getVar("TMPDIR"))
624
625 elif (abi != current_abi):
626 # Code to convert from one ABI to another could go here if possible.
627 status.addresult("Error, TMPDIR has changed its layout version number (%s to %s) and you need to either rebuild, revert or adjust it at your own risk.\n" % (abi, current_abi))
628 else:
629 with open(abifile, "w") as f:
630 f.write(current_abi)
631
632def check_sanity_sstate_dir_change(sstate_dir, data):
633 # Sanity checks to be done when the value of SSTATE_DIR changes
634
635 # Check that SSTATE_DIR isn't on a filesystem with limited filename length (eg. eCryptFS)
636 testmsg = ""
637 if sstate_dir != "":
638 testmsg = check_create_long_filename(sstate_dir, "SSTATE_DIR")
639 # If we don't have permissions to SSTATE_DIR, suggest the user set it as an SSTATE_MIRRORS
640 try:
641 err = testmsg.split(': ')[1].strip()
642 if err == "Permission denied.":
643 testmsg = testmsg + "You could try using %s in SSTATE_MIRRORS rather than as an SSTATE_CACHE.\n" % (sstate_dir)
644 except IndexError:
645 pass
646 return testmsg
647
648def check_sanity_version_change(status, d):
649 # Sanity checks to be done when SANITY_VERSION or NATIVELSBSTRING changes
650 # In other words, these tests run once in a given build directory and then
651 # never again until the sanity version or host distrubution id/version changes.
652
653 # Check the python install is complete. Examples that are often removed in
654 # minimal installations: glib-2.0-natives requries # xml.parsers.expat and icu
655 # requires distutils.sysconfig.
656 try:
657 import xml.parsers.expat
658 import distutils.sysconfig
659 except ImportError as e:
660 status.addresult('Your Python 3 is not a full install. Please install the module %s (see the Getting Started guide for further information).\n' % e.name)
661
662 status.addresult(check_gcc_version(d))
663 status.addresult(check_make_version(d))
664 status.addresult(check_patch_version(d))
665 status.addresult(check_tar_version(d))
666 status.addresult(check_git_version(d))
667 status.addresult(check_perl_modules(d))
668 status.addresult(check_wsl(d))
669
670 missing = ""
671
672 if not check_app_exists("${MAKE}", d):
673 missing = missing + "GNU make,"
674
675 if not check_app_exists('${BUILD_CC}', d):
676 missing = missing + "C Compiler (%s)," % d.getVar("BUILD_CC")
677
678 if not check_app_exists('${BUILD_CXX}', d):
679 missing = missing + "C++ Compiler (%s)," % d.getVar("BUILD_CXX")
680
681 required_utilities = d.getVar('SANITY_REQUIRED_UTILITIES')
682
683 for util in required_utilities.split():
684 if not check_app_exists(util, d):
685 missing = missing + "%s," % util
686
687 if missing:
688 missing = missing.rstrip(',')
689 status.addresult("Please install the following missing utilities: %s\n" % missing)
690
691 assume_provided = d.getVar('ASSUME_PROVIDED').split()
692 # Check user doesn't have ASSUME_PROVIDED = instead of += in local.conf
693 if "diffstat-native" not in assume_provided:
694 status.addresult('Please use ASSUME_PROVIDED +=, not ASSUME_PROVIDED = in your local.conf\n')
695
696 # Check that TMPDIR isn't on a filesystem with limited filename length (eg. eCryptFS)
697 import stat
698 tmpdir = d.getVar('TMPDIR')
699 status.addresult(check_create_long_filename(tmpdir, "TMPDIR"))
700 tmpdirmode = os.stat(tmpdir).st_mode
701 if (tmpdirmode & stat.S_ISGID):
702 status.addresult("TMPDIR is setgid, please don't build in a setgid directory")
703 if (tmpdirmode & stat.S_ISUID):
704 status.addresult("TMPDIR is setuid, please don't build in a setuid directory")
705
706 # Check that a user isn't building in a path in PSEUDO_IGNORE_PATHS
707 pseudoignorepaths = d.getVar('PSEUDO_IGNORE_PATHS', expand=True).split(",")
708 workdir = d.getVar('WORKDIR', expand=True)
709 for i in pseudoignorepaths:
710 if i and workdir.startswith(i):
711 status.addresult("You are building in a path included in PSEUDO_IGNORE_PATHS " + str(i) + " please locate the build outside this path.\n")
712
713 # Check if PSEUDO_IGNORE_PATHS and and paths under pseudo control overlap
714 pseudoignorepaths = d.getVar('PSEUDO_IGNORE_PATHS', expand=True).split(",")
715 pseudo_control_dir = "${D},${PKGD},${PKGDEST},${IMAGEROOTFS},${SDK_OUTPUT}"
716 pseudocontroldir = d.expand(pseudo_control_dir).split(",")
717 for i in pseudoignorepaths:
718 for j in pseudocontroldir:
719 if i and j:
720 if j.startswith(i):
721 status.addresult("A path included in PSEUDO_IGNORE_PATHS " + str(i) + " and the path " + str(j) + " overlap and this will break pseudo permission and ownership tracking. Please set the path " + str(j) + " to a different directory which does not overlap with pseudo controlled directories. \n")
722
723 # Some third-party software apparently relies on chmod etc. being suid root (!!)
724 import stat
725 suid_check_bins = "chown chmod mknod".split()
726 for bin_cmd in suid_check_bins:
727 bin_path = bb.utils.which(os.environ["PATH"], bin_cmd)
728 if bin_path:
729 bin_stat = os.stat(bin_path)
730 if bin_stat.st_uid == 0 and bin_stat.st_mode & stat.S_ISUID:
731 status.addresult('%s has the setuid bit set. This interferes with pseudo and may cause other issues that break the build process.\n' % bin_path)
732
733 # Check that we can fetch from various network transports
734 netcheck = check_connectivity(d)
735 status.addresult(netcheck)
736 if netcheck:
737 status.network_error = True
738
739 nolibs = d.getVar('NO32LIBS')
740 if not nolibs:
741 lib32path = '/lib'
742 if os.path.exists('/lib64') and ( os.path.islink('/lib64') or os.path.islink('/lib') ):
743 lib32path = '/lib32'
744
745 if os.path.exists('%s/libc.so.6' % lib32path) and not os.path.exists('/usr/include/gnu/stubs-32.h'):
746 status.addresult("You have a 32-bit libc, but no 32-bit headers. You must install the 32-bit libc headers.\n")
747
748 bbpaths = d.getVar('BBPATH').split(":")
749 if ("." in bbpaths or "./" in bbpaths or "" in bbpaths):
750 status.addresult("BBPATH references the current directory, either through " \
751 "an empty entry, a './' or a '.'.\n\t This is unsafe and means your "\
752 "layer configuration is adding empty elements to BBPATH.\n\t "\
753 "Please check your layer.conf files and other BBPATH " \
754 "settings to remove the current working directory " \
755 "references.\n" \
756 "Parsed BBPATH is" + str(bbpaths));
757
758 oes_bb_conf = d.getVar( 'OES_BITBAKE_CONF')
759 if not oes_bb_conf:
760 status.addresult('You are not using the OpenEmbedded version of conf/bitbake.conf. This means your environment is misconfigured, in particular check BBPATH.\n')
761
762 # The length of TMPDIR can't be longer than 410
763 status.addresult(check_path_length(tmpdir, "TMPDIR", 410))
764
765 # Check that TMPDIR isn't located on nfs
766 status.addresult(check_not_nfs(tmpdir, "TMPDIR"))
767
768 # Check for case-insensitive file systems (such as Linux in Docker on
769 # macOS with default HFS+ file system)
770 status.addresult(check_case_sensitive(tmpdir, "TMPDIR"))
771
772def sanity_check_locale(d):
773 """
774 Currently bitbake switches locale to en_US.UTF-8 so check that this locale actually exists.
775 """
776 import locale
777 try:
778 locale.setlocale(locale.LC_ALL, "en_US.UTF-8")
779 except locale.Error:
780 raise_sanity_error("Your system needs to support the en_US.UTF-8 locale.", d)
781
782def check_sanity_everybuild(status, d):
783 import os, stat
784 # Sanity tests which test the users environment so need to run at each build (or are so cheap
785 # it makes sense to always run them.
786
787 if 0 == os.getuid():
788 raise_sanity_error("Do not use Bitbake as root.", d)
789
790 # Check the Python version, we now have a minimum of Python 3.6
791 import sys
792 if sys.hexversion < 0x030600F0:
793 status.addresult('The system requires at least Python 3.6 to run. Please update your Python interpreter.\n')
794
795 # Check the bitbake version meets minimum requirements
796 from distutils.version import LooseVersion
797 minversion = d.getVar('BB_MIN_VERSION')
798 if (LooseVersion(bb.__version__) < LooseVersion(minversion)):
799 status.addresult('Bitbake version %s is required and version %s was found\n' % (minversion, bb.__version__))
800
801 sanity_check_locale(d)
802
803 paths = d.getVar('PATH').split(":")
804 if "." in paths or "./" in paths or "" in paths:
805 status.addresult("PATH contains '.', './' or '' (empty element), which will break the build, please remove this.\nParsed PATH is " + str(paths) + "\n")
806
807 #Check if bitbake is present in PATH environment variable
808 bb_check = bb.utils.which(d.getVar('PATH'), 'bitbake')
809 if not bb_check:
810 bb.warn("bitbake binary is not found in PATH, did you source the script?")
811
812 # Check whether 'inherit' directive is found (used for a class to inherit)
813 # in conf file it's supposed to be uppercase INHERIT
814 inherit = d.getVar('inherit')
815 if inherit:
816 status.addresult("Please don't use inherit directive in your local.conf. The directive is supposed to be used in classes and recipes only to inherit of bbclasses. Here INHERIT should be used.\n")
817
818 # Check that the DISTRO is valid, if set
819 # need to take into account DISTRO renaming DISTRO
820 distro = d.getVar('DISTRO')
821 if distro and distro != "nodistro":
822 if not ( check_conf_exists("conf/distro/${DISTRO}.conf", d) or check_conf_exists("conf/distro/include/${DISTRO}.inc", d) ):
823 status.addresult("DISTRO '%s' not found. Please set a valid DISTRO in your local.conf\n" % d.getVar("DISTRO"))
824
825 # Check that these variables don't use tilde-expansion as we don't do that
826 for v in ("TMPDIR", "DL_DIR", "SSTATE_DIR"):
827 if d.getVar(v).startswith("~"):
828 status.addresult("%s uses ~ but Bitbake will not expand this, use an absolute path or variables." % v)
829
830 # Check that DL_DIR is set, exists and is writable. In theory, we should never even hit the check if DL_DIR isn't
831 # set, since so much relies on it being set.
832 dldir = d.getVar('DL_DIR')
833 if not dldir:
834 status.addresult("DL_DIR is not set. Your environment is misconfigured, check that DL_DIR is set, and if the directory exists, that it is writable. \n")
835 if os.path.exists(dldir) and not os.access(dldir, os.W_OK):
836 status.addresult("DL_DIR: %s exists but you do not appear to have write access to it. \n" % dldir)
837 check_symlink(dldir, d)
838
839 # Check that the MACHINE is valid, if it is set
840 machinevalid = True
841 if d.getVar('MACHINE'):
842 if not check_conf_exists("conf/machine/${MACHINE}.conf", d):
843 status.addresult('MACHINE=%s is invalid. Please set a valid MACHINE in your local.conf, environment or other configuration file.\n' % (d.getVar('MACHINE')))
844 machinevalid = False
845 else:
846 status.addresult(check_sanity_validmachine(d))
847 else:
848 status.addresult('Please set a MACHINE in your local.conf or environment\n')
849 machinevalid = False
850 if machinevalid:
851 status.addresult(check_toolchain(d))
852
853 # Check that the SDKMACHINE is valid, if it is set
854 if d.getVar('SDKMACHINE'):
855 if not check_conf_exists("conf/machine-sdk/${SDKMACHINE}.conf", d):
856 status.addresult('Specified SDKMACHINE value is not valid\n')
857 elif d.getVar('SDK_ARCH', False) == "${BUILD_ARCH}":
858 status.addresult('SDKMACHINE is set, but SDK_ARCH has not been changed as a result - SDKMACHINE may have been set too late (e.g. in the distro configuration)\n')
859
860 # If SDK_VENDOR looks like "-my-sdk" then the triples are badly formed so fail early
861 sdkvendor = d.getVar("SDK_VENDOR")
862 if not (sdkvendor.startswith("-") and sdkvendor.count("-") == 1):
863 status.addresult("SDK_VENDOR should be of the form '-foosdk' with a single dash; found '%s'\n" % sdkvendor)
864
865 check_supported_distro(d)
866
867 omask = os.umask(0o022)
868 if omask & 0o755:
869 status.addresult("Please use a umask which allows a+rx and u+rwx\n")
870 os.umask(omask)
871
872 if d.getVar('TARGET_ARCH') == "arm":
873 # This path is no longer user-readable in modern (very recent) Linux
874 try:
875 if os.path.exists("/proc/sys/vm/mmap_min_addr"):
876 f = open("/proc/sys/vm/mmap_min_addr", "r")
877 try:
878 if (int(f.read().strip()) > 65536):
879 status.addresult("/proc/sys/vm/mmap_min_addr is not <= 65536. This will cause problems with qemu so please fix the value (as root).\n\nTo fix this in later reboots, set vm.mmap_min_addr = 65536 in /etc/sysctl.conf.\n")
880 finally:
881 f.close()
882 except:
883 pass
884
885 oeroot = d.getVar('COREBASE')
886 if oeroot.find('+') != -1:
887 status.addresult("Error, you have an invalid character (+) in your COREBASE directory path. Please move the installation to a directory which doesn't include any + characters.")
888 if oeroot.find('@') != -1:
889 status.addresult("Error, you have an invalid character (@) in your COREBASE directory path. Please move the installation to a directory which doesn't include any @ characters.")
890 if oeroot.find(' ') != -1:
891 status.addresult("Error, you have a space in your COREBASE directory path. Please move the installation to a directory which doesn't include a space since autotools doesn't support this.")
892
893 # Check the format of MIRRORS, PREMIRRORS and SSTATE_MIRRORS
894 import re
895 mirror_vars = ['MIRRORS', 'PREMIRRORS', 'SSTATE_MIRRORS']
896 protocols = ['http', 'ftp', 'file', 'https', \
897 'git', 'gitsm', 'hg', 'osc', 'p4', 'svn', \
898 'bzr', 'cvs', 'npm', 'sftp', 'ssh', 's3' ]
899 for mirror_var in mirror_vars:
900 mirrors = (d.getVar(mirror_var) or '').replace('\\n', ' ').split()
901
902 # Split into pairs
903 if len(mirrors) % 2 != 0:
904 bb.warn('Invalid mirror variable value for %s: %s, should contain paired members.' % (mirror_var, str(mirrors)))
905 continue
906 mirrors = list(zip(*[iter(mirrors)]*2))
907
908 for mirror_entry in mirrors:
909 pattern, mirror = mirror_entry
910
911 decoded = bb.fetch2.decodeurl(pattern)
912 try:
913 pattern_scheme = re.compile(decoded[0])
914 except re.error as exc:
915 bb.warn('Invalid scheme regex (%s) in %s; %s' % (pattern, mirror_var, mirror_entry))
916 continue
917
918 if not any(pattern_scheme.match(protocol) for protocol in protocols):
919 bb.warn('Invalid protocol (%s) in %s: %s' % (decoded[0], mirror_var, mirror_entry))
920 continue
921
922 if not any(mirror.startswith(protocol + '://') for protocol in protocols):
923 bb.warn('Invalid protocol in %s: %s' % (mirror_var, mirror_entry))
924 continue
925
926 if mirror.startswith('file://'):
927 import urllib
928 check_symlink(urllib.parse.urlparse(mirror).path, d)
929 # SSTATE_MIRROR ends with a /PATH string
930 if mirror.endswith('/PATH'):
931 # remove /PATH$ from SSTATE_MIRROR to get a working
932 # base directory path
933 mirror_base = urllib.parse.urlparse(mirror[:-1*len('/PATH')]).path
934 check_symlink(mirror_base, d)
935
936 # Check that TMPDIR hasn't changed location since the last time we were run
937 tmpdir = d.getVar('TMPDIR')
938 checkfile = os.path.join(tmpdir, "saved_tmpdir")
939 if os.path.exists(checkfile):
940 with open(checkfile, "r") as f:
941 saved_tmpdir = f.read().strip()
942 if (saved_tmpdir != tmpdir):
943 status.addresult("Error, TMPDIR has changed location. You need to either move it back to %s or delete it and rebuild\n" % saved_tmpdir)
944 else:
945 bb.utils.mkdirhier(tmpdir)
946 # Remove setuid, setgid and sticky bits from TMPDIR
947 try:
948 os.chmod(tmpdir, os.stat(tmpdir).st_mode & ~ stat.S_ISUID)
949 os.chmod(tmpdir, os.stat(tmpdir).st_mode & ~ stat.S_ISGID)
950 os.chmod(tmpdir, os.stat(tmpdir).st_mode & ~ stat.S_ISVTX)
951 except OSError as exc:
952 bb.warn("Unable to chmod TMPDIR: %s" % exc)
953 with open(checkfile, "w") as f:
954 f.write(tmpdir)
955
956 # If /bin/sh is a symlink, check that it points to dash or bash
957 if os.path.islink('/bin/sh'):
958 real_sh = os.path.realpath('/bin/sh')
959 # Due to update-alternatives, the shell name may take various
960 # forms, such as /bin/dash, bin/bash, /bin/bash.bash ...
961 if '/dash' not in real_sh and '/bash' not in real_sh:
962 status.addresult("Error, /bin/sh links to %s, must be dash or bash\n" % real_sh)
963
964def check_sanity(sanity_data):
965 class SanityStatus(object):
966 def __init__(self):
967 self.messages = ""
968 self.network_error = False
969
970 def addresult(self, message):
971 if message:
972 self.messages = self.messages + message
973
974 status = SanityStatus()
975
976 tmpdir = sanity_data.getVar('TMPDIR')
977 sstate_dir = sanity_data.getVar('SSTATE_DIR')
978
979 check_symlink(sstate_dir, sanity_data)
980
981 # Check saved sanity info
982 last_sanity_version = 0
983 last_tmpdir = ""
984 last_sstate_dir = ""
985 last_nativelsbstr = ""
986 sanityverfile = sanity_data.expand("${TOPDIR}/cache/sanity_info")
987 if os.path.exists(sanityverfile):
988 with open(sanityverfile, 'r') as f:
989 for line in f:
990 if line.startswith('SANITY_VERSION'):
991 last_sanity_version = int(line.split()[1])
992 if line.startswith('TMPDIR'):
993 last_tmpdir = line.split()[1]
994 if line.startswith('SSTATE_DIR'):
995 last_sstate_dir = line.split()[1]
996 if line.startswith('NATIVELSBSTRING'):
997 last_nativelsbstr = line.split()[1]
998
999 check_sanity_everybuild(status, sanity_data)
1000
1001 sanity_version = int(sanity_data.getVar('SANITY_VERSION') or 1)
1002 network_error = False
1003 # NATIVELSBSTRING var may have been overridden with "universal", so
1004 # get actual host distribution id and version
1005 nativelsbstr = lsb_distro_identifier(sanity_data)
1006 if last_sanity_version < sanity_version or last_nativelsbstr != nativelsbstr:
1007 check_sanity_version_change(status, sanity_data)
1008 status.addresult(check_sanity_sstate_dir_change(sstate_dir, sanity_data))
1009 else:
1010 if last_sstate_dir != sstate_dir:
1011 status.addresult(check_sanity_sstate_dir_change(sstate_dir, sanity_data))
1012
1013 if os.path.exists(os.path.dirname(sanityverfile)) and not status.messages:
1014 with open(sanityverfile, 'w') as f:
1015 f.write("SANITY_VERSION %s\n" % sanity_version)
1016 f.write("TMPDIR %s\n" % tmpdir)
1017 f.write("SSTATE_DIR %s\n" % sstate_dir)
1018 f.write("NATIVELSBSTRING %s\n" % nativelsbstr)
1019
1020 sanity_handle_abichanges(status, sanity_data)
1021
1022 if status.messages != "":
1023 raise_sanity_error(sanity_data.expand(status.messages), sanity_data, status.network_error)
1024
1025# Create a copy of the datastore and finalise it to ensure appends and
1026# overrides are set - the datastore has yet to be finalised at ConfigParsed
1027def copy_data(e):
1028 sanity_data = bb.data.createCopy(e.data)
1029 sanity_data.finalize()
1030 return sanity_data
1031
1032addhandler config_reparse_eventhandler
1033config_reparse_eventhandler[eventmask] = "bb.event.ConfigParsed"
1034python config_reparse_eventhandler() {
1035 sanity_check_conffiles(e.data)
1036}
1037
1038addhandler check_sanity_eventhandler
1039check_sanity_eventhandler[eventmask] = "bb.event.SanityCheck bb.event.NetworkTest"
1040python check_sanity_eventhandler() {
1041 if bb.event.getName(e) == "SanityCheck":
1042 sanity_data = copy_data(e)
1043 check_sanity(sanity_data)
1044 if e.generateevents:
1045 sanity_data.setVar("SANITY_USE_EVENTS", "1")
1046 bb.event.fire(bb.event.SanityCheckPassed(), e.data)
1047 elif bb.event.getName(e) == "NetworkTest":
1048 sanity_data = copy_data(e)
1049 if e.generateevents:
1050 sanity_data.setVar("SANITY_USE_EVENTS", "1")
1051 bb.event.fire(bb.event.NetworkTestFailed() if check_connectivity(sanity_data) else bb.event.NetworkTestPassed(), e.data)
1052
1053 return
1054}
diff --git a/meta/classes/scons.bbclass b/meta/classes/scons.bbclass
deleted file mode 100644
index 4f3ae502ef..0000000000
--- a/meta/classes/scons.bbclass
+++ /dev/null
@@ -1,28 +0,0 @@
1inherit python3native
2
3DEPENDS += "python3-scons-native"
4
5EXTRA_OESCONS ?= ""
6
7do_configure() {
8 if [ -n "${CONFIGURESTAMPFILE}" ]; then
9 if [ -e "${CONFIGURESTAMPFILE}" -a "`cat ${CONFIGURESTAMPFILE}`" != "${BB_TASKHASH}" -a "${CLEANBROKEN}" != "1" ]; then
10 ${STAGING_BINDIR_NATIVE}/scons --clean PREFIX=${prefix} prefix=${prefix} ${EXTRA_OESCONS}
11 fi
12
13 mkdir -p `dirname ${CONFIGURESTAMPFILE}`
14 echo ${BB_TASKHASH} > ${CONFIGURESTAMPFILE}
15 fi
16}
17
18scons_do_compile() {
19 ${STAGING_BINDIR_NATIVE}/scons ${PARALLEL_MAKE} PREFIX=${prefix} prefix=${prefix} ${EXTRA_OESCONS} || \
20 die "scons build execution failed."
21}
22
23scons_do_install() {
24 ${STAGING_BINDIR_NATIVE}/scons install_root=${D}${prefix} PREFIX=${prefix} prefix=${prefix} ${EXTRA_OESCONS} install || \
25 die "scons install execution failed."
26}
27
28EXPORT_FUNCTIONS do_compile do_install
diff --git a/meta/classes/setuptools3.bbclass b/meta/classes/setuptools3.bbclass
deleted file mode 100644
index 8ca66ee708..0000000000
--- a/meta/classes/setuptools3.bbclass
+++ /dev/null
@@ -1,4 +0,0 @@
1inherit distutils3
2
3DEPENDS += "python3-setuptools-native"
4
diff --git a/meta/classes/sign_ipk.bbclass b/meta/classes/sign_ipk.bbclass
index e5057b7799..51c24b38b2 100644
--- a/meta/classes/sign_ipk.bbclass
+++ b/meta/classes/sign_ipk.bbclass
@@ -1,3 +1,9 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
1# Class for generating signed IPK packages. 7# Class for generating signed IPK packages.
2# 8#
3# Configuration variables used by this class: 9# Configuration variables used by this class:
diff --git a/meta/classes/sign_package_feed.bbclass b/meta/classes/sign_package_feed.bbclass
index 7ff3a35a2f..e9d664750c 100644
--- a/meta/classes/sign_package_feed.bbclass
+++ b/meta/classes/sign_package_feed.bbclass
@@ -1,3 +1,9 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
1# Class for signing package feeds 7# Class for signing package feeds
2# 8#
3# Related configuration variables that will be used after this class is 9# Related configuration variables that will be used after this class is
@@ -27,9 +33,10 @@ inherit sanity
27PACKAGE_FEED_SIGN = '1' 33PACKAGE_FEED_SIGN = '1'
28PACKAGE_FEED_GPG_BACKEND ?= 'local' 34PACKAGE_FEED_GPG_BACKEND ?= 'local'
29PACKAGE_FEED_GPG_SIGNATURE_TYPE ?= 'ASC' 35PACKAGE_FEED_GPG_SIGNATURE_TYPE ?= 'ASC'
36PACKAGEINDEXDEPS += "gnupg-native:do_populate_sysroot"
30 37
31# Make feed signing key to be present in rootfs 38# Make feed signing key to be present in rootfs
32FEATURE_PACKAGES_package-management_append = " signing-keys-packagefeed" 39FEATURE_PACKAGES_package-management:append = " signing-keys-packagefeed"
33 40
34python () { 41python () {
35 # Check sanity of configuration 42 # Check sanity of configuration
diff --git a/meta/classes/sign_rpm.bbclass b/meta/classes/sign_rpm.bbclass
index 73a55a512d..ee0c4808fa 100644
--- a/meta/classes/sign_rpm.bbclass
+++ b/meta/classes/sign_rpm.bbclass
@@ -1,3 +1,9 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
1# Class for generating signed RPM packages. 7# Class for generating signed RPM packages.
2# 8#
3# Configuration variables used by this class: 9# Configuration variables used by this class:
diff --git a/meta/classes/siteconfig.bbclass b/meta/classes/siteconfig.bbclass
index 0cfa5a6834..953cafd285 100644
--- a/meta/classes/siteconfig.bbclass
+++ b/meta/classes/siteconfig.bbclass
@@ -1,3 +1,9 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
1python siteconfig_do_siteconfig () { 7python siteconfig_do_siteconfig () {
2 shared_state = sstate_state_fromvars(d) 8 shared_state = sstate_state_fromvars(d)
3 if shared_state['task'] != 'populate_sysroot': 9 if shared_state['task'] != 'populate_sysroot':
diff --git a/meta/classes/siteinfo.bbclass b/meta/classes/siteinfo.bbclass
deleted file mode 100644
index 0bd1f36805..0000000000
--- a/meta/classes/siteinfo.bbclass
+++ /dev/null
@@ -1,204 +0,0 @@
1# This class exists to provide information about the targets that
2# may be needed by other classes and/or recipes. If you add a new
3# target this will probably need to be updated.
4
5#
6# Returns information about 'what' for the named target 'target'
7# where 'target' == "<arch>-<os>"
8#
9# 'what' can be one of
10# * target: Returns the target name ("<arch>-<os>")
11# * endianness: Return "be" for big endian targets, "le" for little endian
12# * bits: Returns the bit size of the target, either "32" or "64"
13# * libc: Returns the name of the c library used by the target
14#
15# It is an error for the target not to exist.
16# If 'what' doesn't exist then an empty value is returned
17#
18def siteinfo_data_for_machine(arch, os, d):
19 archinfo = {
20 "allarch": "endian-little bit-32", # bogus, but better than special-casing the checks below for allarch
21 "aarch64": "endian-little bit-64 arm-common arm-64",
22 "aarch64_be": "endian-big bit-64 arm-common arm-64",
23 "arc": "endian-little bit-32 arc-common",
24 "arceb": "endian-big bit-32 arc-common",
25 "arm": "endian-little bit-32 arm-common arm-32",
26 "armeb": "endian-big bit-32 arm-common arm-32",
27 "avr32": "endian-big bit-32 avr32-common",
28 "bfin": "endian-little bit-32 bfin-common",
29 "epiphany": "endian-little bit-32",
30 "i386": "endian-little bit-32 ix86-common",
31 "i486": "endian-little bit-32 ix86-common",
32 "i586": "endian-little bit-32 ix86-common",
33 "i686": "endian-little bit-32 ix86-common",
34 "ia64": "endian-little bit-64",
35 "lm32": "endian-big bit-32",
36 "m68k": "endian-big bit-32",
37 "microblaze": "endian-big bit-32 microblaze-common",
38 "microblazeel": "endian-little bit-32 microblaze-common",
39 "mips": "endian-big bit-32 mips-common",
40 "mips64": "endian-big bit-64 mips-common",
41 "mips64el": "endian-little bit-64 mips-common",
42 "mipsisa64r6": "endian-big bit-64 mips-common",
43 "mipsisa64r6el": "endian-little bit-64 mips-common",
44 "mipsel": "endian-little bit-32 mips-common",
45 "mipsisa32r6": "endian-big bit-32 mips-common",
46 "mipsisa32r6el": "endian-little bit-32 mips-common",
47 "powerpc": "endian-big bit-32 powerpc-common",
48 "powerpcle": "endian-little bit-32 powerpc-common",
49 "nios2": "endian-little bit-32 nios2-common",
50 "powerpc64": "endian-big bit-64 powerpc-common",
51 "powerpc64le": "endian-little bit-64 powerpc-common",
52 "ppc": "endian-big bit-32 powerpc-common",
53 "ppc64": "endian-big bit-64 powerpc-common",
54 "ppc64le" : "endian-little bit-64 powerpc-common",
55 "riscv32": "endian-little bit-32 riscv-common",
56 "riscv64": "endian-little bit-64 riscv-common",
57 "sh3": "endian-little bit-32 sh-common",
58 "sh3eb": "endian-big bit-32 sh-common",
59 "sh4": "endian-little bit-32 sh-common",
60 "sh4eb": "endian-big bit-32 sh-common",
61 "sparc": "endian-big bit-32",
62 "viac3": "endian-little bit-32 ix86-common",
63 "x86_64": "endian-little", # bitinfo specified in targetinfo
64 }
65 osinfo = {
66 "darwin": "common-darwin",
67 "darwin9": "common-darwin",
68 "linux": "common-linux common-glibc",
69 "linux-gnu": "common-linux common-glibc",
70 "linux-gnu_ilp32": "common-linux common-glibc",
71 "linux-gnux32": "common-linux common-glibc",
72 "linux-gnun32": "common-linux common-glibc",
73 "linux-gnueabi": "common-linux common-glibc",
74 "linux-gnuspe": "common-linux common-glibc",
75 "linux-musl": "common-linux common-musl",
76 "linux-muslx32": "common-linux common-musl",
77 "linux-musleabi": "common-linux common-musl",
78 "linux-muslspe": "common-linux common-musl",
79 "uclinux-uclibc": "common-uclibc",
80 "cygwin": "common-cygwin",
81 "mingw32": "common-mingw",
82 }
83 targetinfo = {
84 "aarch64-linux-gnu": "aarch64-linux",
85 "aarch64_be-linux-gnu": "aarch64_be-linux",
86 "aarch64-linux-gnu_ilp32": "bit-32 aarch64_be-linux arm-32",
87 "aarch64_be-linux-gnu_ilp32": "bit-32 aarch64_be-linux arm-32",
88 "aarch64-linux-musl": "aarch64-linux",
89 "aarch64_be-linux-musl": "aarch64_be-linux",
90 "arm-linux-gnueabi": "arm-linux",
91 "arm-linux-musleabi": "arm-linux",
92 "armeb-linux-gnueabi": "armeb-linux",
93 "armeb-linux-musleabi": "armeb-linux",
94 "microblazeel-linux" : "microblaze-linux",
95 "microblazeel-linux-musl" : "microblaze-linux",
96 "mips-linux-musl": "mips-linux",
97 "mipsel-linux-musl": "mipsel-linux",
98 "mips64-linux-musl": "mips64-linux",
99 "mips64el-linux-musl": "mips64el-linux",
100 "mips64-linux-gnun32": "mips-linux bit-32",
101 "mips64el-linux-gnun32": "mipsel-linux bit-32",
102 "mipsisa64r6-linux-gnun32": "mipsisa32r6-linux bit-32",
103 "mipsisa64r6el-linux-gnun32": "mipsisa32r6el-linux bit-32",
104 "powerpc-linux": "powerpc32-linux",
105 "powerpc-linux-musl": "powerpc-linux powerpc32-linux",
106 "powerpcle-linux": "powerpc32-linux",
107 "powerpcle-linux-musl": "powerpc-linux powerpc32-linux",
108 "powerpc-linux-gnuspe": "powerpc-linux powerpc32-linux",
109 "powerpc-linux-muslspe": "powerpc-linux powerpc32-linux",
110 "powerpc64-linux-gnuspe": "powerpc-linux powerpc64-linux",
111 "powerpc64-linux-muslspe": "powerpc-linux powerpc64-linux",
112 "powerpc64-linux": "powerpc-linux powerpc64-linux",
113 "powerpc64-linux-musl": "powerpc-linux powerpc64-linux",
114 "powerpc64le-linux": "powerpc-linux powerpc64-linux",
115 "powerpc64le-linux-musl": "powerpc-linux powerpc64-linux",
116 "riscv32-linux": "riscv32-linux",
117 "riscv32-linux-musl": "riscv32-linux",
118 "riscv64-linux": "riscv64-linux",
119 "riscv64-linux-musl": "riscv64-linux",
120 "x86_64-cygwin": "bit-64",
121 "x86_64-darwin": "bit-64",
122 "x86_64-darwin9": "bit-64",
123 "x86_64-linux": "bit-64",
124 "x86_64-linux-musl": "x86_64-linux bit-64",
125 "x86_64-linux-muslx32": "bit-32 ix86-common x32-linux",
126 "x86_64-elf": "bit-64",
127 "x86_64-linux-gnu": "bit-64 x86_64-linux",
128 "x86_64-linux-gnux32": "bit-32 ix86-common x32-linux",
129 "x86_64-mingw32": "bit-64",
130 }
131
132 # Add in any extra user supplied data which may come from a BSP layer, removing the
133 # need to always change this class directly
134 extra_siteinfo = (d.getVar("SITEINFO_EXTRA_DATAFUNCS") or "").split()
135 for m in extra_siteinfo:
136 call = m + "(archinfo, osinfo, targetinfo, d)"
137 locs = { "archinfo" : archinfo, "osinfo" : osinfo, "targetinfo" : targetinfo, "d" : d}
138 archinfo, osinfo, targetinfo = bb.utils.better_eval(call, locs)
139
140 target = "%s-%s" % (arch, os)
141
142 sitedata = []
143 if arch in archinfo:
144 sitedata.extend(archinfo[arch].split())
145 if os in osinfo:
146 sitedata.extend(osinfo[os].split())
147 if target in targetinfo:
148 sitedata.extend(targetinfo[target].split())
149 sitedata.append(target)
150 sitedata.append("common")
151
152 bb.debug(1, "SITE files %s" % sitedata);
153 return sitedata
154
155def siteinfo_data(d):
156 return siteinfo_data_for_machine(d.getVar("HOST_ARCH"), d.getVar("HOST_OS"), d)
157
158python () {
159 sitedata = set(siteinfo_data(d))
160 if "endian-little" in sitedata:
161 d.setVar("SITEINFO_ENDIANNESS", "le")
162 elif "endian-big" in sitedata:
163 d.setVar("SITEINFO_ENDIANNESS", "be")
164 else:
165 bb.error("Unable to determine endianness for architecture '%s'" %
166 d.getVar("HOST_ARCH"))
167 bb.fatal("Please add your architecture to siteinfo.bbclass")
168
169 if "bit-32" in sitedata:
170 d.setVar("SITEINFO_BITS", "32")
171 elif "bit-64" in sitedata:
172 d.setVar("SITEINFO_BITS", "64")
173 else:
174 bb.error("Unable to determine bit size for architecture '%s'" %
175 d.getVar("HOST_ARCH"))
176 bb.fatal("Please add your architecture to siteinfo.bbclass")
177}
178
179def siteinfo_get_files(d, sysrootcache = False):
180 sitedata = siteinfo_data(d)
181 sitefiles = ""
182 for path in d.getVar("BBPATH").split(":"):
183 for element in sitedata:
184 filename = os.path.join(path, "site", element)
185 if os.path.exists(filename):
186 sitefiles += filename + " "
187
188 if not sysrootcache:
189 return sitefiles
190
191 # Now check for siteconfig cache files in sysroots
192 path_siteconfig = d.getVar('SITECONFIG_SYSROOTCACHE')
193 if path_siteconfig and os.path.isdir(path_siteconfig):
194 for i in os.listdir(path_siteconfig):
195 if not i.endswith("_config"):
196 continue
197 filename = os.path.join(path_siteconfig, i)
198 sitefiles += filename + " "
199 return sitefiles
200
201#
202# Make some information available via variables
203#
204SITECONFIG_SYSROOTCACHE = "${STAGING_DATADIR}/${TARGET_SYS}_config_site.d"
diff --git a/meta/classes/sstate.bbclass b/meta/classes/sstate.bbclass
deleted file mode 100644
index f579168162..0000000000
--- a/meta/classes/sstate.bbclass
+++ /dev/null
@@ -1,1225 +0,0 @@
1SSTATE_VERSION = "3"
2
3SSTATE_MANIFESTS ?= "${TMPDIR}/sstate-control"
4SSTATE_MANFILEPREFIX = "${SSTATE_MANIFESTS}/manifest-${SSTATE_MANMACH}-${PN}"
5
6def generate_sstatefn(spec, hash, taskname, siginfo, d):
7 if taskname is None:
8 return ""
9 extension = ".tgz"
10 # 8 chars reserved for siginfo
11 limit = 254 - 8
12 if siginfo:
13 limit = 254
14 extension = ".tgz.siginfo"
15 if not hash:
16 hash = "INVALID"
17 fn = spec + hash + "_" + taskname + extension
18 # If the filename is too long, attempt to reduce it
19 if len(fn) > limit:
20 components = spec.split(":")
21 # Fields 0,5,6 are mandatory, 1 is most useful, 2,3,4 are just for information
22 # 7 is for the separators
23 avail = (254 - len(hash + "_" + taskname + extension) - len(components[0]) - len(components[1]) - len(components[5]) - len(components[6]) - 7) // 3
24 components[2] = components[2][:avail]
25 components[3] = components[3][:avail]
26 components[4] = components[4][:avail]
27 spec = ":".join(components)
28 fn = spec + hash + "_" + taskname + extension
29 if len(fn) > limit:
30 bb.fatal("Unable to reduce sstate name to less than 255 chararacters")
31 return hash[:2] + "/" + hash[2:4] + "/" + fn
32
33SSTATE_PKGARCH = "${PACKAGE_ARCH}"
34SSTATE_PKGSPEC = "sstate:${PN}:${PACKAGE_ARCH}${TARGET_VENDOR}-${TARGET_OS}:${PV}:${PR}:${SSTATE_PKGARCH}:${SSTATE_VERSION}:"
35SSTATE_SWSPEC = "sstate:${PN}::${PV}:${PR}::${SSTATE_VERSION}:"
36SSTATE_PKGNAME = "${SSTATE_EXTRAPATH}${@generate_sstatefn(d.getVar('SSTATE_PKGSPEC'), d.getVar('BB_UNIHASH'), d.getVar('SSTATE_CURRTASK'), False, d)}"
37SSTATE_PKG = "${SSTATE_DIR}/${SSTATE_PKGNAME}"
38SSTATE_EXTRAPATH = ""
39SSTATE_EXTRAPATHWILDCARD = ""
40SSTATE_PATHSPEC = "${SSTATE_DIR}/${SSTATE_EXTRAPATHWILDCARD}*/*/${SSTATE_PKGSPEC}*_${SSTATE_PATH_CURRTASK}.tgz*"
41
42# explicitly make PV to depend on evaluated value of PV variable
43PV[vardepvalue] = "${PV}"
44
45# We don't want the sstate to depend on things like the distro string
46# of the system, we let the sstate paths take care of this.
47SSTATE_EXTRAPATH[vardepvalue] = ""
48SSTATE_EXTRAPATHWILDCARD[vardepvalue] = ""
49
50# For multilib rpm the allarch packagegroup files can overwrite (in theory they're identical)
51SSTATE_DUPWHITELIST = "${DEPLOY_DIR}/licenses/"
52# Avoid docbook/sgml catalog warnings for now
53SSTATE_DUPWHITELIST += "${STAGING_ETCDIR_NATIVE}/sgml ${STAGING_DATADIR_NATIVE}/sgml"
54# sdk-provides-dummy-nativesdk and nativesdk-buildtools-perl-dummy overlap for different SDKMACHINE
55SSTATE_DUPWHITELIST += "${DEPLOY_DIR_RPM}/sdk_provides_dummy_nativesdk/ ${DEPLOY_DIR_IPK}/sdk-provides-dummy-nativesdk/"
56SSTATE_DUPWHITELIST += "${DEPLOY_DIR_RPM}/buildtools_dummy_nativesdk/ ${DEPLOY_DIR_IPK}/buildtools-dummy-nativesdk/"
57# target-sdk-provides-dummy overlaps that allarch is disabled when multilib is used
58SSTATE_DUPWHITELIST += "${COMPONENTS_DIR}/sdk-provides-dummy-target/ ${DEPLOY_DIR_RPM}/sdk_provides_dummy_target/ ${DEPLOY_DIR_IPK}/sdk-provides-dummy-target/"
59# Archive the sources for many architectures in one deploy folder
60SSTATE_DUPWHITELIST += "${DEPLOY_DIR_SRC}"
61# ovmf/grub-efi/systemd-boot/intel-microcode multilib recipes can generate identical overlapping files
62SSTATE_DUPWHITELIST += "${DEPLOY_DIR_IMAGE}/ovmf"
63SSTATE_DUPWHITELIST += "${DEPLOY_DIR_IMAGE}/grub-efi"
64SSTATE_DUPWHITELIST += "${DEPLOY_DIR_IMAGE}/systemd-boot"
65SSTATE_DUPWHITELIST += "${DEPLOY_DIR_IMAGE}/microcode"
66
67SSTATE_SCAN_FILES ?= "*.la *-config *_config postinst-*"
68SSTATE_SCAN_CMD ??= 'find ${SSTATE_BUILDDIR} \( -name "${@"\" -o -name \"".join(d.getVar("SSTATE_SCAN_FILES").split())}" \) -type f'
69SSTATE_SCAN_CMD_NATIVE ??= 'grep -Irl -e ${RECIPE_SYSROOT} -e ${RECIPE_SYSROOT_NATIVE} -e ${HOSTTOOLS_DIR} ${SSTATE_BUILDDIR}'
70
71BB_HASHFILENAME = "False ${SSTATE_PKGSPEC} ${SSTATE_SWSPEC}"
72
73SSTATE_ARCHS = " \
74 ${BUILD_ARCH} \
75 ${BUILD_ARCH}_${ORIGNATIVELSBSTRING} \
76 ${BUILD_ARCH}_${SDK_ARCH}_${SDK_OS} \
77 ${BUILD_ARCH}_${TARGET_ARCH} \
78 ${SDK_ARCH}_${SDK_OS} \
79 ${SDK_ARCH}_${PACKAGE_ARCH} \
80 allarch \
81 ${PACKAGE_ARCH} \
82 ${PACKAGE_EXTRA_ARCHS} \
83 ${MACHINE_ARCH}"
84SSTATE_ARCHS[vardepsexclude] = "ORIGNATIVELSBSTRING"
85
86SSTATE_MANMACH ?= "${SSTATE_PKGARCH}"
87
88SSTATECREATEFUNCS = "sstate_hardcode_path"
89SSTATECREATEFUNCS[vardeps] = "SSTATE_SCAN_FILES"
90SSTATEPOSTCREATEFUNCS = ""
91SSTATEPREINSTFUNCS = ""
92SSTATEPOSTUNPACKFUNCS = "sstate_hardcode_path_unpack"
93SSTATEPOSTINSTFUNCS = ""
94EXTRA_STAGING_FIXMES ?= "HOSTTOOLS_DIR"
95
96# Check whether sstate exists for tasks that support sstate and are in the
97# locked signatures file.
98SIGGEN_LOCKEDSIGS_SSTATE_EXISTS_CHECK ?= 'error'
99
100# Check whether the task's computed hash matches the task's hash in the
101# locked signatures file.
102SIGGEN_LOCKEDSIGS_TASKSIG_CHECK ?= "error"
103
104# The GnuPG key ID and passphrase to use to sign sstate archives (or unset to
105# not sign)
106SSTATE_SIG_KEY ?= ""
107SSTATE_SIG_PASSPHRASE ?= ""
108# Whether to verify the GnUPG signatures when extracting sstate archives
109SSTATE_VERIFY_SIG ?= "0"
110
111SSTATE_HASHEQUIV_METHOD ?= "oe.sstatesig.OEOuthashBasic"
112SSTATE_HASHEQUIV_METHOD[doc] = "The fully-qualified function used to calculate \
113 the output hash for a task, which in turn is used to determine equivalency. \
114 "
115
116SSTATE_HASHEQUIV_REPORT_TASKDATA ?= "0"
117SSTATE_HASHEQUIV_REPORT_TASKDATA[doc] = "Report additional useful data to the \
118 hash equivalency server, such as PN, PV, taskname, etc. This information \
119 is very useful for developers looking at task data, but may leak sensitive \
120 data if the equivalence server is public. \
121 "
122
123python () {
124 if bb.data.inherits_class('native', d):
125 d.setVar('SSTATE_PKGARCH', d.getVar('BUILD_ARCH', False))
126 if d.getVar("PN") == "pseudo-native":
127 d.appendVar('SSTATE_PKGARCH', '_${ORIGNATIVELSBSTRING}')
128 elif bb.data.inherits_class('crosssdk', d):
129 d.setVar('SSTATE_PKGARCH', d.expand("${BUILD_ARCH}_${SDK_ARCH}_${SDK_OS}"))
130 elif bb.data.inherits_class('cross', d):
131 d.setVar('SSTATE_PKGARCH', d.expand("${BUILD_ARCH}_${TARGET_ARCH}"))
132 elif bb.data.inherits_class('nativesdk', d):
133 d.setVar('SSTATE_PKGARCH', d.expand("${SDK_ARCH}_${SDK_OS}"))
134 elif bb.data.inherits_class('cross-canadian', d):
135 d.setVar('SSTATE_PKGARCH', d.expand("${SDK_ARCH}_${PACKAGE_ARCH}"))
136 elif bb.data.inherits_class('allarch', d) and d.getVar("PACKAGE_ARCH") == "all":
137 d.setVar('SSTATE_PKGARCH', "allarch")
138 else:
139 d.setVar('SSTATE_MANMACH', d.expand("${PACKAGE_ARCH}"))
140
141 if bb.data.inherits_class('native', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('cross', d):
142 d.setVar('SSTATE_EXTRAPATH', "${NATIVELSBSTRING}/")
143 d.setVar('BB_HASHFILENAME', "True ${SSTATE_PKGSPEC} ${SSTATE_SWSPEC}")
144 d.setVar('SSTATE_EXTRAPATHWILDCARD', "${NATIVELSBSTRING}/")
145
146 unique_tasks = sorted(set((d.getVar('SSTATETASKS') or "").split()))
147 d.setVar('SSTATETASKS', " ".join(unique_tasks))
148 for task in unique_tasks:
149 d.prependVarFlag(task, 'prefuncs', "sstate_task_prefunc ")
150 d.appendVarFlag(task, 'postfuncs', " sstate_task_postfunc")
151}
152
153def sstate_init(task, d):
154 ss = {}
155 ss['task'] = task
156 ss['dirs'] = []
157 ss['plaindirs'] = []
158 ss['lockfiles'] = []
159 ss['lockfiles-shared'] = []
160 return ss
161
162def sstate_state_fromvars(d, task = None):
163 if task is None:
164 task = d.getVar('BB_CURRENTTASK')
165 if not task:
166 bb.fatal("sstate code running without task context?!")
167 task = task.replace("_setscene", "")
168
169 if task.startswith("do_"):
170 task = task[3:]
171 inputs = (d.getVarFlag("do_" + task, 'sstate-inputdirs') or "").split()
172 outputs = (d.getVarFlag("do_" + task, 'sstate-outputdirs') or "").split()
173 plaindirs = (d.getVarFlag("do_" + task, 'sstate-plaindirs') or "").split()
174 lockfiles = (d.getVarFlag("do_" + task, 'sstate-lockfile') or "").split()
175 lockfilesshared = (d.getVarFlag("do_" + task, 'sstate-lockfile-shared') or "").split()
176 interceptfuncs = (d.getVarFlag("do_" + task, 'sstate-interceptfuncs') or "").split()
177 fixmedir = d.getVarFlag("do_" + task, 'sstate-fixmedir') or ""
178 if not task or len(inputs) != len(outputs):
179 bb.fatal("sstate variables not setup correctly?!")
180
181 if task == "populate_lic":
182 d.setVar("SSTATE_PKGSPEC", "${SSTATE_SWSPEC}")
183 d.setVar("SSTATE_EXTRAPATH", "")
184 d.setVar('SSTATE_EXTRAPATHWILDCARD', "")
185
186 ss = sstate_init(task, d)
187 for i in range(len(inputs)):
188 sstate_add(ss, inputs[i], outputs[i], d)
189 ss['lockfiles'] = lockfiles
190 ss['lockfiles-shared'] = lockfilesshared
191 ss['plaindirs'] = plaindirs
192 ss['interceptfuncs'] = interceptfuncs
193 ss['fixmedir'] = fixmedir
194 return ss
195
196def sstate_add(ss, source, dest, d):
197 if not source.endswith("/"):
198 source = source + "/"
199 if not dest.endswith("/"):
200 dest = dest + "/"
201 source = os.path.normpath(source)
202 dest = os.path.normpath(dest)
203 srcbase = os.path.basename(source)
204 ss['dirs'].append([srcbase, source, dest])
205 return ss
206
207def sstate_install(ss, d):
208 import oe.path
209 import oe.sstatesig
210 import subprocess
211
212 sharedfiles = []
213 shareddirs = []
214 bb.utils.mkdirhier(d.expand("${SSTATE_MANIFESTS}"))
215
216 sstateinst = d.expand("${WORKDIR}/sstate-install-%s/" % ss['task'])
217
218 manifest, d2 = oe.sstatesig.sstate_get_manifest_filename(ss['task'], d)
219
220 if os.access(manifest, os.R_OK):
221 bb.fatal("Package already staged (%s)?!" % manifest)
222
223 d.setVar("SSTATE_INST_POSTRM", manifest + ".postrm")
224
225 locks = []
226 for lock in ss['lockfiles-shared']:
227 locks.append(bb.utils.lockfile(lock, True))
228 for lock in ss['lockfiles']:
229 locks.append(bb.utils.lockfile(lock))
230
231 for state in ss['dirs']:
232 bb.debug(2, "Staging files from %s to %s" % (state[1], state[2]))
233 for walkroot, dirs, files in os.walk(state[1]):
234 for file in files:
235 srcpath = os.path.join(walkroot, file)
236 dstpath = srcpath.replace(state[1], state[2])
237 #bb.debug(2, "Staging %s to %s" % (srcpath, dstpath))
238 sharedfiles.append(dstpath)
239 for dir in dirs:
240 srcdir = os.path.join(walkroot, dir)
241 dstdir = srcdir.replace(state[1], state[2])
242 #bb.debug(2, "Staging %s to %s" % (srcdir, dstdir))
243 if os.path.islink(srcdir):
244 sharedfiles.append(dstdir)
245 continue
246 if not dstdir.endswith("/"):
247 dstdir = dstdir + "/"
248 shareddirs.append(dstdir)
249
250 # Check the file list for conflicts against files which already exist
251 whitelist = (d.getVar("SSTATE_DUPWHITELIST") or "").split()
252 match = []
253 for f in sharedfiles:
254 if os.path.exists(f) and not os.path.islink(f):
255 f = os.path.normpath(f)
256 realmatch = True
257 for w in whitelist:
258 w = os.path.normpath(w)
259 if f.startswith(w):
260 realmatch = False
261 break
262 if realmatch:
263 match.append(f)
264 sstate_search_cmd = "grep -rlF '%s' %s --exclude=master.list | sed -e 's:^.*/::'" % (f, d.expand("${SSTATE_MANIFESTS}"))
265 search_output = subprocess.Popen(sstate_search_cmd, shell=True, stdout=subprocess.PIPE).communicate()[0]
266 if search_output:
267 match.append(" (matched in %s)" % search_output.decode('utf-8').rstrip())
268 else:
269 match.append(" (not matched to any task)")
270 if match:
271 bb.error("The recipe %s is trying to install files into a shared " \
272 "area when those files already exist. Those files and their manifest " \
273 "location are:\n %s\nPlease verify which recipe should provide the " \
274 "above files.\n\nThe build has stopped, as continuing in this scenario WILL " \
275 "break things - if not now, possibly in the future (we've seen builds fail " \
276 "several months later). If the system knew how to recover from this " \
277 "automatically it would, however there are several different scenarios " \
278 "which can result in this and we don't know which one this is. It may be " \
279 "you have switched providers of something like virtual/kernel (e.g. from " \
280 "linux-yocto to linux-yocto-dev), in that case you need to execute the " \
281 "clean task for both recipes and it will resolve this error. It may be " \
282 "you changed DISTRO_FEATURES from systemd to udev or vice versa. Cleaning " \
283 "those recipes should again resolve this error, however switching " \
284 "DISTRO_FEATURES on an existing build directory is not supported - you " \
285 "should really clean out tmp and rebuild (reusing sstate should be safe). " \
286 "It could be the overlapping files detected are harmless in which case " \
287 "adding them to SSTATE_DUPWHITELIST may be the correct solution. It could " \
288 "also be your build is including two different conflicting versions of " \
289 "things (e.g. bluez 4 and bluez 5 and the correct solution for that would " \
290 "be to resolve the conflict. If in doubt, please ask on the mailing list, " \
291 "sharing the error and filelist above." % \
292 (d.getVar('PN'), "\n ".join(match)))
293 bb.fatal("If the above message is too much, the simpler version is you're advised to wipe out tmp and rebuild (reusing sstate is fine). That will likely fix things in most (but not all) cases.")
294
295 if ss['fixmedir'] and os.path.exists(ss['fixmedir'] + "/fixmepath.cmd"):
296 sharedfiles.append(ss['fixmedir'] + "/fixmepath.cmd")
297 sharedfiles.append(ss['fixmedir'] + "/fixmepath")
298
299 # Write out the manifest
300 f = open(manifest, "w")
301 for file in sharedfiles:
302 f.write(file + "\n")
303
304 # We want to ensure that directories appear at the end of the manifest
305 # so that when we test to see if they should be deleted any contents
306 # added by the task will have been removed first.
307 dirs = sorted(shareddirs, key=len)
308 # Must remove children first, which will have a longer path than the parent
309 for di in reversed(dirs):
310 f.write(di + "\n")
311 f.close()
312
313 # Append to the list of manifests for this PACKAGE_ARCH
314
315 i = d2.expand("${SSTATE_MANIFESTS}/index-${SSTATE_MANMACH}")
316 l = bb.utils.lockfile(i + ".lock")
317 filedata = d.getVar("STAMP") + " " + d2.getVar("SSTATE_MANFILEPREFIX") + " " + d.getVar("WORKDIR") + "\n"
318 manifests = []
319 if os.path.exists(i):
320 with open(i, "r") as f:
321 manifests = f.readlines()
322 if filedata not in manifests:
323 with open(i, "a+") as f:
324 f.write(filedata)
325 bb.utils.unlockfile(l)
326
327 # Run the actual file install
328 for state in ss['dirs']:
329 if os.path.exists(state[1]):
330 oe.path.copyhardlinktree(state[1], state[2])
331
332 for postinst in (d.getVar('SSTATEPOSTINSTFUNCS') or '').split():
333 # All hooks should run in the SSTATE_INSTDIR
334 bb.build.exec_func(postinst, d, (sstateinst,))
335
336 for lock in locks:
337 bb.utils.unlockfile(lock)
338
339sstate_install[vardepsexclude] += "SSTATE_DUPWHITELIST STATE_MANMACH SSTATE_MANFILEPREFIX"
340sstate_install[vardeps] += "${SSTATEPOSTINSTFUNCS}"
341
342def sstate_installpkg(ss, d):
343 from oe.gpg_sign import get_signer
344
345 sstateinst = d.expand("${WORKDIR}/sstate-install-%s/" % ss['task'])
346 d.setVar("SSTATE_CURRTASK", ss['task'])
347 sstatefetch = d.getVar('SSTATE_PKGNAME')
348 sstatepkg = d.getVar('SSTATE_PKG')
349
350 if not os.path.exists(sstatepkg):
351 pstaging_fetch(sstatefetch, d)
352
353 if not os.path.isfile(sstatepkg):
354 bb.note("Sstate package %s does not exist" % sstatepkg)
355 return False
356
357 sstate_clean(ss, d)
358
359 d.setVar('SSTATE_INSTDIR', sstateinst)
360
361 if bb.utils.to_boolean(d.getVar("SSTATE_VERIFY_SIG"), False):
362 if not os.path.isfile(sstatepkg + '.sig'):
363 bb.warn("No signature file for sstate package %s, skipping acceleration..." % sstatepkg)
364 return False
365 signer = get_signer(d, 'local')
366 if not signer.verify(sstatepkg + '.sig'):
367 bb.warn("Cannot verify signature on sstate package %s, skipping acceleration..." % sstatepkg)
368 return False
369
370 # Empty sstateinst directory, ensure its clean
371 if os.path.exists(sstateinst):
372 oe.path.remove(sstateinst)
373 bb.utils.mkdirhier(sstateinst)
374
375 sstateinst = d.getVar("SSTATE_INSTDIR")
376 d.setVar('SSTATE_FIXMEDIR', ss['fixmedir'])
377
378 for f in (d.getVar('SSTATEPREINSTFUNCS') or '').split() + ['sstate_unpack_package']:
379 # All hooks should run in the SSTATE_INSTDIR
380 bb.build.exec_func(f, d, (sstateinst,))
381
382 return sstate_installpkgdir(ss, d)
383
384def sstate_installpkgdir(ss, d):
385 import oe.path
386 import subprocess
387
388 sstateinst = d.getVar("SSTATE_INSTDIR")
389 d.setVar('SSTATE_FIXMEDIR', ss['fixmedir'])
390
391 for f in (d.getVar('SSTATEPOSTUNPACKFUNCS') or '').split():
392 # All hooks should run in the SSTATE_INSTDIR
393 bb.build.exec_func(f, d, (sstateinst,))
394
395 def prepdir(dir):
396 # remove dir if it exists, ensure any parent directories do exist
397 if os.path.exists(dir):
398 oe.path.remove(dir)
399 bb.utils.mkdirhier(dir)
400 oe.path.remove(dir)
401
402 for state in ss['dirs']:
403 prepdir(state[1])
404 os.rename(sstateinst + state[0], state[1])
405 sstate_install(ss, d)
406
407 for plain in ss['plaindirs']:
408 workdir = d.getVar('WORKDIR')
409 sharedworkdir = os.path.join(d.getVar('TMPDIR'), "work-shared")
410 src = sstateinst + "/" + plain.replace(workdir, '')
411 if sharedworkdir in plain:
412 src = sstateinst + "/" + plain.replace(sharedworkdir, '')
413 dest = plain
414 bb.utils.mkdirhier(src)
415 prepdir(dest)
416 os.rename(src, dest)
417
418 return True
419
420python sstate_hardcode_path_unpack () {
421 # Fixup hardcoded paths
422 #
423 # Note: The logic below must match the reverse logic in
424 # sstate_hardcode_path(d)
425 import subprocess
426
427 sstateinst = d.getVar('SSTATE_INSTDIR')
428 sstatefixmedir = d.getVar('SSTATE_FIXMEDIR')
429 fixmefn = sstateinst + "fixmepath"
430 if os.path.isfile(fixmefn):
431 staging_target = d.getVar('RECIPE_SYSROOT')
432 staging_host = d.getVar('RECIPE_SYSROOT_NATIVE')
433
434 if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross-canadian', d):
435 sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIRHOST:%s:g'" % (staging_host)
436 elif bb.data.inherits_class('cross', d) or bb.data.inherits_class('crosssdk', d):
437 sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIRTARGET:%s:g; s:FIXMESTAGINGDIRHOST:%s:g'" % (staging_target, staging_host)
438 else:
439 sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIRTARGET:%s:g'" % (staging_target)
440
441 extra_staging_fixmes = d.getVar('EXTRA_STAGING_FIXMES') or ''
442 for fixmevar in extra_staging_fixmes.split():
443 fixme_path = d.getVar(fixmevar)
444 sstate_sed_cmd += " -e 's:FIXME_%s:%s:g'" % (fixmevar, fixme_path)
445
446 # Add sstateinst to each filename in fixmepath, use xargs to efficiently call sed
447 sstate_hardcode_cmd = "sed -e 's:^:%s:g' %s | xargs %s" % (sstateinst, fixmefn, sstate_sed_cmd)
448
449 # Defer do_populate_sysroot relocation command
450 if sstatefixmedir:
451 bb.utils.mkdirhier(sstatefixmedir)
452 with open(sstatefixmedir + "/fixmepath.cmd", "w") as f:
453 sstate_hardcode_cmd = sstate_hardcode_cmd.replace(fixmefn, sstatefixmedir + "/fixmepath")
454 sstate_hardcode_cmd = sstate_hardcode_cmd.replace(sstateinst, "FIXMEFINALSSTATEINST")
455 sstate_hardcode_cmd = sstate_hardcode_cmd.replace(staging_host, "FIXMEFINALSSTATEHOST")
456 sstate_hardcode_cmd = sstate_hardcode_cmd.replace(staging_target, "FIXMEFINALSSTATETARGET")
457 f.write(sstate_hardcode_cmd)
458 bb.utils.copyfile(fixmefn, sstatefixmedir + "/fixmepath")
459 return
460
461 bb.note("Replacing fixme paths in sstate package: %s" % (sstate_hardcode_cmd))
462 subprocess.check_call(sstate_hardcode_cmd, shell=True)
463
464 # Need to remove this or we'd copy it into the target directory and may
465 # conflict with another writer
466 os.remove(fixmefn)
467}
468
469def sstate_clean_cachefile(ss, d):
470 import oe.path
471
472 if d.getVarFlag('do_%s' % ss['task'], 'task'):
473 d.setVar("SSTATE_PATH_CURRTASK", ss['task'])
474 sstatepkgfile = d.getVar('SSTATE_PATHSPEC')
475 bb.note("Removing %s" % sstatepkgfile)
476 oe.path.remove(sstatepkgfile)
477
478def sstate_clean_cachefiles(d):
479 for task in (d.getVar('SSTATETASKS') or "").split():
480 ld = d.createCopy()
481 ss = sstate_state_fromvars(ld, task)
482 sstate_clean_cachefile(ss, ld)
483
484def sstate_clean_manifest(manifest, d, prefix=None):
485 import oe.path
486
487 mfile = open(manifest)
488 entries = mfile.readlines()
489 mfile.close()
490
491 for entry in entries:
492 entry = entry.strip()
493 if prefix and not entry.startswith("/"):
494 entry = prefix + "/" + entry
495 bb.debug(2, "Removing manifest: %s" % entry)
496 # We can race against another package populating directories as we're removing them
497 # so we ignore errors here.
498 try:
499 if entry.endswith("/"):
500 if os.path.islink(entry[:-1]):
501 os.remove(entry[:-1])
502 elif os.path.exists(entry) and len(os.listdir(entry)) == 0:
503 os.rmdir(entry[:-1])
504 else:
505 os.remove(entry)
506 except OSError:
507 pass
508
509 postrm = manifest + ".postrm"
510 if os.path.exists(manifest + ".postrm"):
511 import subprocess
512 os.chmod(postrm, 0o755)
513 subprocess.check_call(postrm, shell=True)
514 oe.path.remove(postrm)
515
516 oe.path.remove(manifest)
517
518def sstate_clean(ss, d):
519 import oe.path
520 import glob
521
522 d2 = d.createCopy()
523 stamp_clean = d.getVar("STAMPCLEAN")
524 extrainf = d.getVarFlag("do_" + ss['task'], 'stamp-extra-info')
525 if extrainf:
526 d2.setVar("SSTATE_MANMACH", extrainf)
527 wildcard_stfile = "%s.do_%s*.%s" % (stamp_clean, ss['task'], extrainf)
528 else:
529 wildcard_stfile = "%s.do_%s*" % (stamp_clean, ss['task'])
530
531 manifest = d2.expand("${SSTATE_MANFILEPREFIX}.%s" % ss['task'])
532
533 if os.path.exists(manifest):
534 locks = []
535 for lock in ss['lockfiles-shared']:
536 locks.append(bb.utils.lockfile(lock))
537 for lock in ss['lockfiles']:
538 locks.append(bb.utils.lockfile(lock))
539
540 sstate_clean_manifest(manifest, d)
541
542 for lock in locks:
543 bb.utils.unlockfile(lock)
544
545 # Remove the current and previous stamps, but keep the sigdata.
546 #
547 # The glob() matches do_task* which may match multiple tasks, for
548 # example: do_package and do_package_write_ipk, so we need to
549 # exactly match *.do_task.* and *.do_task_setscene.*
550 rm_stamp = '.do_%s.' % ss['task']
551 rm_setscene = '.do_%s_setscene.' % ss['task']
552 # For BB_SIGNATURE_HANDLER = "noop"
553 rm_nohash = ".do_%s" % ss['task']
554 for stfile in glob.glob(wildcard_stfile):
555 # Keep the sigdata
556 if ".sigdata." in stfile or ".sigbasedata." in stfile:
557 continue
558 # Preserve taint files in the stamps directory
559 if stfile.endswith('.taint'):
560 continue
561 if rm_stamp in stfile or rm_setscene in stfile or \
562 stfile.endswith(rm_nohash):
563 oe.path.remove(stfile)
564
565sstate_clean[vardepsexclude] = "SSTATE_MANFILEPREFIX"
566
567CLEANFUNCS += "sstate_cleanall"
568
569python sstate_cleanall() {
570 bb.note("Removing shared state for package %s" % d.getVar('PN'))
571
572 manifest_dir = d.getVar('SSTATE_MANIFESTS')
573 if not os.path.exists(manifest_dir):
574 return
575
576 tasks = d.getVar('SSTATETASKS').split()
577 for name in tasks:
578 ld = d.createCopy()
579 shared_state = sstate_state_fromvars(ld, name)
580 sstate_clean(shared_state, ld)
581}
582
583python sstate_hardcode_path () {
584 import subprocess, platform
585
586 # Need to remove hardcoded paths and fix these when we install the
587 # staging packages.
588 #
589 # Note: the logic in this function needs to match the reverse logic
590 # in sstate_installpkg(ss, d)
591
592 staging_target = d.getVar('RECIPE_SYSROOT')
593 staging_host = d.getVar('RECIPE_SYSROOT_NATIVE')
594 sstate_builddir = d.getVar('SSTATE_BUILDDIR')
595
596 sstate_sed_cmd = "sed -i -e 's:%s:FIXMESTAGINGDIRHOST:g'" % staging_host
597 if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross-canadian', d):
598 sstate_grep_cmd = "grep -l -e '%s'" % (staging_host)
599 elif bb.data.inherits_class('cross', d) or bb.data.inherits_class('crosssdk', d):
600 sstate_grep_cmd = "grep -l -e '%s' -e '%s'" % (staging_target, staging_host)
601 sstate_sed_cmd += " -e 's:%s:FIXMESTAGINGDIRTARGET:g'" % staging_target
602 else:
603 sstate_grep_cmd = "grep -l -e '%s' -e '%s'" % (staging_target, staging_host)
604 sstate_sed_cmd += " -e 's:%s:FIXMESTAGINGDIRTARGET:g'" % staging_target
605
606 extra_staging_fixmes = d.getVar('EXTRA_STAGING_FIXMES') or ''
607 for fixmevar in extra_staging_fixmes.split():
608 fixme_path = d.getVar(fixmevar)
609 sstate_sed_cmd += " -e 's:%s:FIXME_%s:g'" % (fixme_path, fixmevar)
610 sstate_grep_cmd += " -e '%s'" % (fixme_path)
611
612 fixmefn = sstate_builddir + "fixmepath"
613
614 sstate_scan_cmd = d.getVar('SSTATE_SCAN_CMD')
615 sstate_filelist_cmd = "tee %s" % (fixmefn)
616
617 # fixmepath file needs relative paths, drop sstate_builddir prefix
618 sstate_filelist_relative_cmd = "sed -i -e 's:^%s::g' %s" % (sstate_builddir, fixmefn)
619
620 xargs_no_empty_run_cmd = '--no-run-if-empty'
621 if platform.system() == 'Darwin':
622 xargs_no_empty_run_cmd = ''
623
624 # Limit the fixpaths and sed operations based on the initial grep search
625 # This has the side effect of making sure the vfs cache is hot
626 sstate_hardcode_cmd = "%s | xargs %s | %s | xargs %s %s" % (sstate_scan_cmd, sstate_grep_cmd, sstate_filelist_cmd, xargs_no_empty_run_cmd, sstate_sed_cmd)
627
628 bb.note("Removing hardcoded paths from sstate package: '%s'" % (sstate_hardcode_cmd))
629 subprocess.check_output(sstate_hardcode_cmd, shell=True, cwd=sstate_builddir)
630
631 # If the fixmefn is empty, remove it..
632 if os.stat(fixmefn).st_size == 0:
633 os.remove(fixmefn)
634 else:
635 bb.note("Replacing absolute paths in fixmepath file: '%s'" % (sstate_filelist_relative_cmd))
636 subprocess.check_output(sstate_filelist_relative_cmd, shell=True)
637}
638
639def sstate_package(ss, d):
640 import oe.path
641
642 tmpdir = d.getVar('TMPDIR')
643
644 sstatebuild = d.expand("${WORKDIR}/sstate-build-%s/" % ss['task'])
645 d.setVar("SSTATE_CURRTASK", ss['task'])
646 bb.utils.remove(sstatebuild, recurse=True)
647 bb.utils.mkdirhier(sstatebuild)
648 for state in ss['dirs']:
649 if not os.path.exists(state[1]):
650 continue
651 srcbase = state[0].rstrip("/").rsplit('/', 1)[0]
652 # Find and error for absolute symlinks. We could attempt to relocate but its not
653 # clear where the symlink is relative to in this context. We could add that markup
654 # to sstate tasks but there aren't many of these so better just avoid them entirely.
655 for walkroot, dirs, files in os.walk(state[1]):
656 for file in files + dirs:
657 srcpath = os.path.join(walkroot, file)
658 if not os.path.islink(srcpath):
659 continue
660 link = os.readlink(srcpath)
661 if not os.path.isabs(link):
662 continue
663 if not link.startswith(tmpdir):
664 continue
665 bb.error("sstate found an absolute path symlink %s pointing at %s. Please replace this with a relative link." % (srcpath, link))
666 bb.debug(2, "Preparing tree %s for packaging at %s" % (state[1], sstatebuild + state[0]))
667 os.rename(state[1], sstatebuild + state[0])
668
669 workdir = d.getVar('WORKDIR')
670 sharedworkdir = os.path.join(d.getVar('TMPDIR'), "work-shared")
671 for plain in ss['plaindirs']:
672 pdir = plain.replace(workdir, sstatebuild)
673 if sharedworkdir in plain:
674 pdir = plain.replace(sharedworkdir, sstatebuild)
675 bb.utils.mkdirhier(plain)
676 bb.utils.mkdirhier(pdir)
677 os.rename(plain, pdir)
678
679 d.setVar('SSTATE_BUILDDIR', sstatebuild)
680 d.setVar('SSTATE_INSTDIR', sstatebuild)
681
682 if d.getVar('SSTATE_SKIP_CREATION') == '1':
683 return
684
685 sstate_create_package = ['sstate_report_unihash', 'sstate_create_package']
686 if d.getVar('SSTATE_SIG_KEY'):
687 sstate_create_package.append('sstate_sign_package')
688
689 for f in (d.getVar('SSTATECREATEFUNCS') or '').split() + \
690 sstate_create_package + \
691 (d.getVar('SSTATEPOSTCREATEFUNCS') or '').split():
692 # All hooks should run in SSTATE_BUILDDIR.
693 bb.build.exec_func(f, d, (sstatebuild,))
694
695 # SSTATE_PKG may have been changed by sstate_report_unihash
696 siginfo = d.getVar('SSTATE_PKG') + ".siginfo"
697 if not os.path.exists(siginfo):
698 bb.siggen.dump_this_task(siginfo, d)
699 else:
700 try:
701 os.utime(siginfo, None)
702 except PermissionError:
703 pass
704
705 return
706
707def pstaging_fetch(sstatefetch, d):
708 import bb.fetch2
709
710 # Only try and fetch if the user has configured a mirror
711 mirrors = d.getVar('SSTATE_MIRRORS')
712 if not mirrors:
713 return
714
715 # Copy the data object and override DL_DIR and SRC_URI
716 localdata = bb.data.createCopy(d)
717
718 dldir = localdata.expand("${SSTATE_DIR}")
719 bb.utils.mkdirhier(dldir)
720
721 localdata.delVar('MIRRORS')
722 localdata.setVar('FILESPATH', dldir)
723 localdata.setVar('DL_DIR', dldir)
724 localdata.setVar('PREMIRRORS', mirrors)
725
726 # if BB_NO_NETWORK is set but we also have SSTATE_MIRROR_ALLOW_NETWORK,
727 # we'll want to allow network access for the current set of fetches.
728 if bb.utils.to_boolean(localdata.getVar('BB_NO_NETWORK')) and \
729 bb.utils.to_boolean(localdata.getVar('SSTATE_MIRROR_ALLOW_NETWORK')):
730 localdata.delVar('BB_NO_NETWORK')
731
732 # Try a fetch from the sstate mirror, if it fails just return and
733 # we will build the package
734 uris = ['file://{0};downloadfilename={0}'.format(sstatefetch),
735 'file://{0}.siginfo;downloadfilename={0}.siginfo'.format(sstatefetch)]
736 if bb.utils.to_boolean(d.getVar("SSTATE_VERIFY_SIG"), False):
737 uris += ['file://{0}.sig;downloadfilename={0}.sig'.format(sstatefetch)]
738
739 for srcuri in uris:
740 localdata.setVar('SRC_URI', srcuri)
741 try:
742 fetcher = bb.fetch2.Fetch([srcuri], localdata, cache=False)
743 fetcher.checkstatus()
744 fetcher.download()
745
746 except bb.fetch2.BBFetchException:
747 pass
748
749def sstate_setscene(d):
750 shared_state = sstate_state_fromvars(d)
751 accelerate = sstate_installpkg(shared_state, d)
752 if not accelerate:
753 bb.fatal("No suitable staging package found")
754
755python sstate_task_prefunc () {
756 shared_state = sstate_state_fromvars(d)
757 sstate_clean(shared_state, d)
758}
759sstate_task_prefunc[dirs] = "${WORKDIR}"
760
761python sstate_task_postfunc () {
762 shared_state = sstate_state_fromvars(d)
763
764 for intercept in shared_state['interceptfuncs']:
765 bb.build.exec_func(intercept, d, (d.getVar("WORKDIR"),))
766
767 omask = os.umask(0o002)
768 if omask != 0o002:
769 bb.note("Using umask 0o002 (not %0o) for sstate packaging" % omask)
770 sstate_package(shared_state, d)
771 os.umask(omask)
772
773 sstateinst = d.getVar("SSTATE_INSTDIR")
774 d.setVar('SSTATE_FIXMEDIR', shared_state['fixmedir'])
775
776 sstate_installpkgdir(shared_state, d)
777
778 bb.utils.remove(d.getVar("SSTATE_BUILDDIR"), recurse=True)
779}
780sstate_task_postfunc[dirs] = "${WORKDIR}"
781
782
783#
784# Shell function to generate a sstate package from a directory
785# set as SSTATE_BUILDDIR. Will be run from within SSTATE_BUILDDIR.
786#
787sstate_create_package () {
788 # Exit early if it already exists
789 if [ -e ${SSTATE_PKG} ]; then
790 [ ! -w ${SSTATE_PKG} ] || touch ${SSTATE_PKG}
791 return
792 fi
793
794 mkdir --mode=0775 -p `dirname ${SSTATE_PKG}`
795 TFILE=`mktemp ${SSTATE_PKG}.XXXXXXXX`
796
797 # Use pigz if available
798 OPT="-czS"
799 if [ -x "$(command -v pigz)" ]; then
800 OPT="-I pigz -cS"
801 fi
802
803 # Need to handle empty directories
804 if [ "$(ls -A)" ]; then
805 set +e
806 tar $OPT -f $TFILE *
807 ret=$?
808 if [ $ret -ne 0 ] && [ $ret -ne 1 ]; then
809 exit 1
810 fi
811 set -e
812 else
813 tar $OPT --file=$TFILE --files-from=/dev/null
814 fi
815 chmod 0664 $TFILE
816 # Skip if it was already created by some other process
817 if [ ! -e ${SSTATE_PKG} ]; then
818 # Move into place using ln to attempt an atomic op.
819 # Abort if it already exists
820 ln $TFILE ${SSTATE_PKG} && rm $TFILE
821 else
822 rm $TFILE
823 fi
824 [ ! -w ${SSTATE_PKG} ] || touch ${SSTATE_PKG}
825}
826
827python sstate_sign_package () {
828 from oe.gpg_sign import get_signer
829
830
831 signer = get_signer(d, 'local')
832 sstate_pkg = d.getVar('SSTATE_PKG')
833 if os.path.exists(sstate_pkg + '.sig'):
834 os.unlink(sstate_pkg + '.sig')
835 signer.detach_sign(sstate_pkg, d.getVar('SSTATE_SIG_KEY', False), None,
836 d.getVar('SSTATE_SIG_PASSPHRASE'), armor=False)
837}
838
839python sstate_report_unihash() {
840 report_unihash = getattr(bb.parse.siggen, 'report_unihash', None)
841
842 if report_unihash:
843 ss = sstate_state_fromvars(d)
844 report_unihash(os.getcwd(), ss['task'], d)
845}
846
847#
848# Shell function to decompress and prepare a package for installation
849# Will be run from within SSTATE_INSTDIR.
850#
851sstate_unpack_package () {
852 tar -xvzf ${SSTATE_PKG}
853 # update .siginfo atime on local/NFS mirror
854 [ -O ${SSTATE_PKG}.siginfo ] && [ -w ${SSTATE_PKG}.siginfo ] && [ -h ${SSTATE_PKG}.siginfo ] && touch -a ${SSTATE_PKG}.siginfo
855 # Use "! -w ||" to return true for read only files
856 [ ! -w ${SSTATE_PKG} ] || touch --no-dereference ${SSTATE_PKG}
857 [ ! -w ${SSTATE_PKG}.sig ] || [ ! -e ${SSTATE_PKG}.sig ] || touch --no-dereference ${SSTATE_PKG}.sig
858 [ ! -w ${SSTATE_PKG}.siginfo ] || [ ! -e ${SSTATE_PKG}.siginfo ] || touch --no-dereference ${SSTATE_PKG}.siginfo
859}
860
861BB_HASHCHECK_FUNCTION = "sstate_checkhashes"
862
863def sstate_checkhashes(sq_data, d, siginfo=False, currentcount=0, summary=True, **kwargs):
864 found = set()
865 foundLocal = set()
866 foundNet = set()
867 missed = set()
868
869 def gethash(task):
870 return sq_data['unihash'][task]
871
872 def getpathcomponents(task, d):
873 # Magic data from BB_HASHFILENAME
874 splithashfn = sq_data['hashfn'][task].split(" ")
875 spec = splithashfn[1]
876 if splithashfn[0] == "True":
877 extrapath = d.getVar("NATIVELSBSTRING") + "/"
878 else:
879 extrapath = ""
880
881 tname = bb.runqueue.taskname_from_tid(task)[3:]
882
883 if tname in ["fetch", "unpack", "patch", "populate_lic", "preconfigure"] and splithashfn[2]:
884 spec = splithashfn[2]
885 extrapath = ""
886
887 return spec, extrapath, tname
888
889
890 for tid in sq_data['hash']:
891
892 spec, extrapath, tname = getpathcomponents(tid, d)
893
894 sstatefile = d.expand("${SSTATE_DIR}/" + extrapath + generate_sstatefn(spec, gethash(tid), tname, siginfo, d))
895
896 if os.path.exists(sstatefile):
897 bb.debug(2, "SState: Found valid sstate file %s" % sstatefile)
898 found.add(tid)
899 foundLocal.add(tid)
900 continue
901 else:
902 missed.add(tid)
903 bb.debug(2, "SState: Looked for but didn't find file %s" % sstatefile)
904
905 mirrors = d.getVar("SSTATE_MIRRORS")
906 if mirrors:
907 # Copy the data object and override DL_DIR and SRC_URI
908 localdata = bb.data.createCopy(d)
909
910 dldir = localdata.expand("${SSTATE_DIR}")
911 localdata.delVar('MIRRORS')
912 localdata.setVar('FILESPATH', dldir)
913 localdata.setVar('DL_DIR', dldir)
914 localdata.setVar('PREMIRRORS', mirrors)
915
916 bb.debug(2, "SState using premirror of: %s" % mirrors)
917
918 # if BB_NO_NETWORK is set but we also have SSTATE_MIRROR_ALLOW_NETWORK,
919 # we'll want to allow network access for the current set of fetches.
920 if bb.utils.to_boolean(localdata.getVar('BB_NO_NETWORK')) and \
921 bb.utils.to_boolean(localdata.getVar('SSTATE_MIRROR_ALLOW_NETWORK')):
922 localdata.delVar('BB_NO_NETWORK')
923
924 from bb.fetch2 import FetchConnectionCache
925 def checkstatus_init(thread_worker):
926 thread_worker.connection_cache = FetchConnectionCache()
927
928 def checkstatus_end(thread_worker):
929 thread_worker.connection_cache.close_connections()
930
931 def checkstatus(thread_worker, arg):
932 (tid, sstatefile) = arg
933
934 localdata2 = bb.data.createCopy(localdata)
935 srcuri = "file://" + sstatefile
936 localdata.setVar('SRC_URI', srcuri)
937 bb.debug(2, "SState: Attempting to fetch %s" % srcuri)
938
939 try:
940 fetcher = bb.fetch2.Fetch(srcuri.split(), localdata2,
941 connection_cache=thread_worker.connection_cache)
942 fetcher.checkstatus()
943 bb.debug(2, "SState: Successful fetch test for %s" % srcuri)
944 found.add(tid)
945 foundNet.add(tid)
946 if tid in missed:
947 missed.remove(tid)
948 except:
949 missed.add(tid)
950 bb.debug(2, "SState: Unsuccessful fetch test for %s" % srcuri)
951 pass
952 if len(tasklist) >= min_tasks:
953 bb.event.fire(bb.event.ProcessProgress(msg, len(tasklist) - thread_worker.tasks.qsize()), d)
954
955 tasklist = []
956 min_tasks = 100
957 for tid in sq_data['hash']:
958 if tid in found:
959 continue
960 spec, extrapath, tname = getpathcomponents(tid, d)
961 sstatefile = d.expand(extrapath + generate_sstatefn(spec, gethash(tid), tname, siginfo, d))
962 tasklist.append((tid, sstatefile))
963
964 if tasklist:
965 if len(tasklist) >= min_tasks:
966 msg = "Checking sstate mirror object availability"
967 bb.event.fire(bb.event.ProcessStarted(msg, len(tasklist)), d)
968
969 import multiprocessing
970 nproc = min(multiprocessing.cpu_count(), len(tasklist))
971
972 bb.event.enable_threadlock()
973 pool = oe.utils.ThreadedPool(nproc, len(tasklist),
974 worker_init=checkstatus_init, worker_end=checkstatus_end)
975 for t in tasklist:
976 pool.add_task(checkstatus, t)
977 pool.start()
978 pool.wait_completion()
979 bb.event.disable_threadlock()
980
981 if len(tasklist) >= min_tasks:
982 bb.event.fire(bb.event.ProcessFinished(msg), d)
983
984 inheritlist = d.getVar("INHERIT")
985 if "toaster" in inheritlist:
986 evdata = {'missed': [], 'found': []};
987 for tid in missed:
988 spec, extrapath, tname = getpathcomponents(tid, d)
989 sstatefile = d.expand(extrapath + generate_sstatefn(spec, gethash(tid), tname, False, d))
990 evdata['missed'].append((bb.runqueue.fn_from_tid(tid), bb.runqueue.taskname_from_tid(tid), gethash(tid), sstatefile ) )
991 for tid in found:
992 spec, extrapath, tname = getpathcomponents(tid, d)
993 sstatefile = d.expand(extrapath + generate_sstatefn(spec, gethash(tid), tname, False, d))
994 evdata['found'].append((bb.runqueue.fn_from_tid(tid), bb.runqueue.taskname_from_tid(tid), gethash(tid), sstatefile ) )
995 bb.event.fire(bb.event.MetadataEvent("MissedSstate", evdata), d)
996
997 if summary:
998 # Print some summary statistics about the current task completion and how much sstate
999 # reuse there was. Avoid divide by zero errors.
1000 total = len(sq_data['hash'])
1001 complete = 0
1002 if currentcount:
1003 complete = (len(found) + currentcount) / (total + currentcount) * 100
1004 match = 0
1005 if total:
1006 match = len(found) / total * 100
1007 bb.plain("Sstate summary: Wanted %d Local %d Network %d Missed %d Current %d (%d%% match, %d%% complete)" % (total, len(foundLocal), len(foundNet),len(missed), currentcount, match, complete))
1008
1009 if hasattr(bb.parse.siggen, "checkhashes"):
1010 bb.parse.siggen.checkhashes(sq_data, missed, found, d)
1011
1012 return found
1013
1014BB_SETSCENE_DEPVALID = "setscene_depvalid"
1015
1016def setscene_depvalid(task, taskdependees, notneeded, d, log=None):
1017 # taskdependees is a dict of tasks which depend on task, each being a 3 item list of [PN, TASKNAME, FILENAME]
1018 # task is included in taskdependees too
1019 # Return - False - We need this dependency
1020 # - True - We can skip this dependency
1021 import re
1022
1023 def logit(msg, log):
1024 if log is not None:
1025 log.append(msg)
1026 else:
1027 bb.debug(2, msg)
1028
1029 logit("Considering setscene task: %s" % (str(taskdependees[task])), log)
1030
1031 def isNativeCross(x):
1032 return x.endswith("-native") or "-cross-" in x or "-crosssdk" in x or x.endswith("-cross")
1033
1034 # We only need to trigger populate_lic through direct dependencies
1035 if taskdependees[task][1] == "do_populate_lic":
1036 return True
1037
1038 # stash_locale and gcc_stash_builddir are never needed as a dependency for built objects
1039 if taskdependees[task][1] == "do_stash_locale" or taskdependees[task][1] == "do_gcc_stash_builddir":
1040 return True
1041
1042 # We only need to trigger packagedata through direct dependencies
1043 # but need to preserve packagedata on packagedata links
1044 if taskdependees[task][1] == "do_packagedata":
1045 for dep in taskdependees:
1046 if taskdependees[dep][1] == "do_packagedata":
1047 return False
1048 return True
1049
1050 for dep in taskdependees:
1051 logit(" considering dependency: %s" % (str(taskdependees[dep])), log)
1052 if task == dep:
1053 continue
1054 if dep in notneeded:
1055 continue
1056 # do_package_write_* and do_package doesn't need do_package
1057 if taskdependees[task][1] == "do_package" and taskdependees[dep][1] in ['do_package', 'do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm', 'do_packagedata', 'do_package_qa']:
1058 continue
1059 # do_package_write_* need do_populate_sysroot as they're mainly postinstall dependencies
1060 if taskdependees[task][1] == "do_populate_sysroot" and taskdependees[dep][1] in ['do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm']:
1061 return False
1062 # do_package/packagedata/package_qa don't need do_populate_sysroot
1063 if taskdependees[task][1] == "do_populate_sysroot" and taskdependees[dep][1] in ['do_package', 'do_packagedata', 'do_package_qa']:
1064 continue
1065 # Native/Cross packages don't exist and are noexec anyway
1066 if isNativeCross(taskdependees[dep][0]) and taskdependees[dep][1] in ['do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm', 'do_packagedata', 'do_package', 'do_package_qa']:
1067 continue
1068
1069 # This is due to the [depends] in useradd.bbclass complicating matters
1070 # The logic *is* reversed here due to the way hard setscene dependencies are injected
1071 if (taskdependees[task][1] == 'do_package' or taskdependees[task][1] == 'do_populate_sysroot') and taskdependees[dep][0].endswith(('shadow-native', 'shadow-sysroot', 'base-passwd', 'pseudo-native')) and taskdependees[dep][1] == 'do_populate_sysroot':
1072 continue
1073
1074 # Consider sysroot depending on sysroot tasks
1075 if taskdependees[task][1] == 'do_populate_sysroot' and taskdependees[dep][1] == 'do_populate_sysroot':
1076 # Allow excluding certain recursive dependencies. If a recipe needs it should add a
1077 # specific dependency itself, rather than relying on one of its dependees to pull
1078 # them in.
1079 # See also http://lists.openembedded.org/pipermail/openembedded-core/2018-January/146324.html
1080 not_needed = False
1081 excludedeps = d.getVar('_SSTATE_EXCLUDEDEPS_SYSROOT')
1082 if excludedeps is None:
1083 # Cache the regular expressions for speed
1084 excludedeps = []
1085 for excl in (d.getVar('SSTATE_EXCLUDEDEPS_SYSROOT') or "").split():
1086 excludedeps.append((re.compile(excl.split('->', 1)[0]), re.compile(excl.split('->', 1)[1])))
1087 d.setVar('_SSTATE_EXCLUDEDEPS_SYSROOT', excludedeps)
1088 for excl in excludedeps:
1089 if excl[0].match(taskdependees[dep][0]):
1090 if excl[1].match(taskdependees[task][0]):
1091 not_needed = True
1092 break
1093 if not_needed:
1094 continue
1095 # For meta-extsdk-toolchain we want all sysroot dependencies
1096 if taskdependees[dep][0] == 'meta-extsdk-toolchain':
1097 return False
1098 # Native/Cross populate_sysroot need their dependencies
1099 if isNativeCross(taskdependees[task][0]) and isNativeCross(taskdependees[dep][0]):
1100 return False
1101 # Target populate_sysroot depended on by cross tools need to be installed
1102 if isNativeCross(taskdependees[dep][0]):
1103 return False
1104 # Native/cross tools depended upon by target sysroot are not needed
1105 # Add an exception for shadow-native as required by useradd.bbclass
1106 if isNativeCross(taskdependees[task][0]) and taskdependees[task][0] != 'shadow-native':
1107 continue
1108 # Target populate_sysroot need their dependencies
1109 return False
1110
1111 if taskdependees[task][1] == 'do_shared_workdir':
1112 continue
1113
1114 if taskdependees[dep][1] == "do_populate_lic":
1115 continue
1116
1117
1118 # Safe fallthrough default
1119 logit(" Default setscene dependency fall through due to dependency: %s" % (str(taskdependees[dep])), log)
1120 return False
1121 return True
1122
1123addhandler sstate_eventhandler
1124sstate_eventhandler[eventmask] = "bb.build.TaskSucceeded"
1125python sstate_eventhandler() {
1126 d = e.data
1127 writtensstate = d.getVar('SSTATE_CURRTASK')
1128 if not writtensstate:
1129 taskname = d.getVar("BB_RUNTASK")[3:]
1130 spec = d.getVar('SSTATE_PKGSPEC')
1131 swspec = d.getVar('SSTATE_SWSPEC')
1132 if taskname in ["fetch", "unpack", "patch", "populate_lic", "preconfigure"] and swspec:
1133 d.setVar("SSTATE_PKGSPEC", "${SSTATE_SWSPEC}")
1134 d.setVar("SSTATE_EXTRAPATH", "")
1135 d.setVar("SSTATE_CURRTASK", taskname)
1136 siginfo = d.getVar('SSTATE_PKG') + ".siginfo"
1137 if not os.path.exists(siginfo):
1138 bb.siggen.dump_this_task(siginfo, d)
1139 else:
1140 try:
1141 os.utime(siginfo, None)
1142 except PermissionError:
1143 pass
1144
1145}
1146
1147SSTATE_PRUNE_OBSOLETEWORKDIR ?= "1"
1148
1149# Event handler which removes manifests and stamps file for
1150# recipes which are no longer reachable in a build where they
1151# once were.
1152# Also optionally removes the workdir of those tasks/recipes
1153#
1154addhandler sstate_eventhandler2
1155sstate_eventhandler2[eventmask] = "bb.event.ReachableStamps"
1156python sstate_eventhandler2() {
1157 import glob
1158 d = e.data
1159 stamps = e.stamps.values()
1160 removeworkdir = (d.getVar("SSTATE_PRUNE_OBSOLETEWORKDIR", False) == "1")
1161 preservestampfile = d.expand('${SSTATE_MANIFESTS}/preserve-stamps')
1162 preservestamps = []
1163 if os.path.exists(preservestampfile):
1164 with open(preservestampfile, 'r') as f:
1165 preservestamps = f.readlines()
1166 seen = []
1167
1168 # The machine index contains all the stamps this machine has ever seen in this build directory.
1169 # We should only remove things which this machine once accessed but no longer does.
1170 machineindex = set()
1171 bb.utils.mkdirhier(d.expand("${SSTATE_MANIFESTS}"))
1172 mi = d.expand("${SSTATE_MANIFESTS}/index-machine-${MACHINE}")
1173 if os.path.exists(mi):
1174 with open(mi, "r") as f:
1175 machineindex = set(line.strip() for line in f.readlines())
1176
1177 for a in sorted(list(set(d.getVar("SSTATE_ARCHS").split()))):
1178 toremove = []
1179 i = d.expand("${SSTATE_MANIFESTS}/index-" + a)
1180 if not os.path.exists(i):
1181 continue
1182 with open(i, "r") as f:
1183 lines = f.readlines()
1184 for l in lines:
1185 try:
1186 (stamp, manifest, workdir) = l.split()
1187 if stamp not in stamps and stamp not in preservestamps and stamp in machineindex:
1188 toremove.append(l)
1189 if stamp not in seen:
1190 bb.debug(2, "Stamp %s is not reachable, removing related manifests" % stamp)
1191 seen.append(stamp)
1192 except ValueError:
1193 bb.fatal("Invalid line '%s' in sstate manifest '%s'" % (l, i))
1194
1195 if toremove:
1196 msg = "Removing %d recipes from the %s sysroot" % (len(toremove), a)
1197 bb.event.fire(bb.event.ProcessStarted(msg, len(toremove)), d)
1198
1199 removed = 0
1200 for r in toremove:
1201 (stamp, manifest, workdir) = r.split()
1202 for m in glob.glob(manifest + ".*"):
1203 if m.endswith(".postrm"):
1204 continue
1205 sstate_clean_manifest(m, d)
1206 bb.utils.remove(stamp + "*")
1207 if removeworkdir:
1208 bb.utils.remove(workdir, recurse = True)
1209 lines.remove(r)
1210 removed = removed + 1
1211 bb.event.fire(bb.event.ProcessProgress(msg, removed), d)
1212
1213 bb.event.fire(bb.event.ProcessFinished(msg), d)
1214
1215 with open(i, "w") as f:
1216 for l in lines:
1217 f.write(l)
1218 machineindex |= set(stamps)
1219 with open(mi, "w") as f:
1220 for l in machineindex:
1221 f.write(l + "\n")
1222
1223 if preservestamps:
1224 os.remove(preservestampfile)
1225}
diff --git a/meta/classes/staging.bbclass b/meta/classes/staging.bbclass
deleted file mode 100644
index 806a85773a..0000000000
--- a/meta/classes/staging.bbclass
+++ /dev/null
@@ -1,625 +0,0 @@
1# These directories will be staged in the sysroot
2SYSROOT_DIRS = " \
3 ${includedir} \
4 ${libdir} \
5 ${base_libdir} \
6 ${nonarch_base_libdir} \
7 ${datadir} \
8 /sysroot-only \
9"
10
11# These directories are also staged in the sysroot when they contain files that
12# are usable on the build system
13SYSROOT_DIRS_NATIVE = " \
14 ${bindir} \
15 ${sbindir} \
16 ${base_bindir} \
17 ${base_sbindir} \
18 ${libexecdir} \
19 ${sysconfdir} \
20 ${localstatedir} \
21"
22SYSROOT_DIRS_append_class-native = " ${SYSROOT_DIRS_NATIVE}"
23SYSROOT_DIRS_append_class-cross = " ${SYSROOT_DIRS_NATIVE}"
24SYSROOT_DIRS_append_class-crosssdk = " ${SYSROOT_DIRS_NATIVE}"
25
26# These directories will not be staged in the sysroot
27SYSROOT_DIRS_BLACKLIST = " \
28 ${mandir} \
29 ${docdir} \
30 ${infodir} \
31 ${datadir}/X11/locale \
32 ${datadir}/applications \
33 ${datadir}/bash-completion \
34 ${datadir}/fonts \
35 ${datadir}/gtk-doc/html \
36 ${datadir}/installed-tests \
37 ${datadir}/locale \
38 ${datadir}/pixmaps \
39 ${datadir}/terminfo \
40 ${libdir}/${BPN}/ptest \
41"
42
43sysroot_stage_dir() {
44 src="$1"
45 dest="$2"
46 # if the src doesn't exist don't do anything
47 if [ ! -d "$src" ]; then
48 return
49 fi
50
51 mkdir -p "$dest"
52 (
53 cd $src
54 find . -print0 | cpio --null -pdlu $dest
55 )
56}
57
58sysroot_stage_dirs() {
59 from="$1"
60 to="$2"
61
62 for dir in ${SYSROOT_DIRS}; do
63 sysroot_stage_dir "$from$dir" "$to$dir"
64 done
65
66 # Remove directories we do not care about
67 for dir in ${SYSROOT_DIRS_BLACKLIST}; do
68 rm -rf "$to$dir"
69 done
70}
71
72sysroot_stage_all() {
73 sysroot_stage_dirs ${D} ${SYSROOT_DESTDIR}
74}
75
76python sysroot_strip () {
77 inhibit_sysroot = d.getVar('INHIBIT_SYSROOT_STRIP')
78 if inhibit_sysroot and oe.types.boolean(inhibit_sysroot):
79 return
80
81 dstdir = d.getVar('SYSROOT_DESTDIR')
82 pn = d.getVar('PN')
83 libdir = d.getVar("libdir")
84 base_libdir = d.getVar("base_libdir")
85 qa_already_stripped = 'already-stripped' in (d.getVar('INSANE_SKIP_' + pn) or "").split()
86 strip_cmd = d.getVar("STRIP")
87
88 oe.package.strip_execs(pn, dstdir, strip_cmd, libdir, base_libdir, d,
89 qa_already_stripped=qa_already_stripped)
90}
91
92do_populate_sysroot[dirs] = "${SYSROOT_DESTDIR}"
93
94addtask populate_sysroot after do_install
95
96SYSROOT_PREPROCESS_FUNCS ?= ""
97SYSROOT_DESTDIR = "${WORKDIR}/sysroot-destdir"
98
99python do_populate_sysroot () {
100 # SYSROOT 'version' 2
101 bb.build.exec_func("sysroot_stage_all", d)
102 bb.build.exec_func("sysroot_strip", d)
103 for f in (d.getVar('SYSROOT_PREPROCESS_FUNCS') or '').split():
104 bb.build.exec_func(f, d)
105 pn = d.getVar("PN")
106 multiprov = d.getVar("MULTI_PROVIDER_WHITELIST").split()
107 provdir = d.expand("${SYSROOT_DESTDIR}${base_prefix}/sysroot-providers/")
108 bb.utils.mkdirhier(provdir)
109 for p in d.getVar("PROVIDES").split():
110 if p in multiprov:
111 continue
112 p = p.replace("/", "_")
113 with open(provdir + p, "w") as f:
114 f.write(pn)
115}
116
117do_populate_sysroot[vardeps] += "${SYSROOT_PREPROCESS_FUNCS}"
118do_populate_sysroot[vardepsexclude] += "MULTI_PROVIDER_WHITELIST"
119
120POPULATESYSROOTDEPS = ""
121POPULATESYSROOTDEPS_class-target = "virtual/${MLPREFIX}${TARGET_PREFIX}binutils:do_populate_sysroot"
122POPULATESYSROOTDEPS_class-nativesdk = "virtual/${TARGET_PREFIX}binutils-crosssdk:do_populate_sysroot"
123do_populate_sysroot[depends] += "${POPULATESYSROOTDEPS}"
124
125SSTATETASKS += "do_populate_sysroot"
126do_populate_sysroot[cleandirs] = "${SYSROOT_DESTDIR}"
127do_populate_sysroot[sstate-inputdirs] = "${SYSROOT_DESTDIR}"
128do_populate_sysroot[sstate-outputdirs] = "${COMPONENTS_DIR}/${PACKAGE_ARCH}/${PN}"
129do_populate_sysroot[sstate-fixmedir] = "${COMPONENTS_DIR}/${PACKAGE_ARCH}/${PN}"
130
131python do_populate_sysroot_setscene () {
132 sstate_setscene(d)
133}
134addtask do_populate_sysroot_setscene
135
136def staging_copyfile(c, target, dest, postinsts, seendirs):
137 import errno
138
139 destdir = os.path.dirname(dest)
140 if destdir not in seendirs:
141 bb.utils.mkdirhier(destdir)
142 seendirs.add(destdir)
143 if "/usr/bin/postinst-" in c:
144 postinsts.append(dest)
145 if os.path.islink(c):
146 linkto = os.readlink(c)
147 if os.path.lexists(dest):
148 if not os.path.islink(dest):
149 raise OSError(errno.EEXIST, "Link %s already exists as a file" % dest, dest)
150 if os.readlink(dest) == linkto:
151 return dest
152 raise OSError(errno.EEXIST, "Link %s already exists to a different location? (%s vs %s)" % (dest, os.readlink(dest), linkto), dest)
153 os.symlink(linkto, dest)
154 #bb.warn(c)
155 else:
156 try:
157 os.link(c, dest)
158 except OSError as err:
159 if err.errno == errno.EXDEV:
160 bb.utils.copyfile(c, dest)
161 else:
162 raise
163 return dest
164
165def staging_copydir(c, target, dest, seendirs):
166 if dest not in seendirs:
167 bb.utils.mkdirhier(dest)
168 seendirs.add(dest)
169
170def staging_processfixme(fixme, target, recipesysroot, recipesysrootnative, d):
171 import subprocess
172
173 if not fixme:
174 return
175 cmd = "sed -e 's:^[^/]*/:%s/:g' %s | xargs sed -i -e 's:FIXMESTAGINGDIRTARGET:%s:g; s:FIXMESTAGINGDIRHOST:%s:g'" % (target, " ".join(fixme), recipesysroot, recipesysrootnative)
176 for fixmevar in ['PSEUDO_SYSROOT', 'HOSTTOOLS_DIR', 'PKGDATA_DIR', 'PSEUDO_LOCALSTATEDIR', 'LOGFIFO']:
177 fixme_path = d.getVar(fixmevar)
178 cmd += " -e 's:FIXME_%s:%s:g'" % (fixmevar, fixme_path)
179 bb.debug(2, cmd)
180 subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
181
182
183def staging_populate_sysroot_dir(targetsysroot, nativesysroot, native, d):
184 import glob
185 import subprocess
186 import errno
187
188 fixme = []
189 postinsts = []
190 seendirs = set()
191 stagingdir = d.getVar("STAGING_DIR")
192 if native:
193 pkgarchs = ['${BUILD_ARCH}', '${BUILD_ARCH}_*']
194 targetdir = nativesysroot
195 else:
196 pkgarchs = ['${MACHINE_ARCH}']
197 pkgarchs = pkgarchs + list(reversed(d.getVar("PACKAGE_EXTRA_ARCHS").split()))
198 pkgarchs.append('allarch')
199 targetdir = targetsysroot
200
201 bb.utils.mkdirhier(targetdir)
202 for pkgarch in pkgarchs:
203 for manifest in glob.glob(d.expand("${SSTATE_MANIFESTS}/manifest-%s-*.populate_sysroot" % pkgarch)):
204 if manifest.endswith("-initial.populate_sysroot"):
205 # skip libgcc-initial due to file overlap
206 continue
207 if not native and (manifest.endswith("-native.populate_sysroot") or "nativesdk-" in manifest):
208 continue
209 if native and not (manifest.endswith("-native.populate_sysroot") or manifest.endswith("-cross.populate_sysroot") or "-cross-" in manifest):
210 continue
211 tmanifest = targetdir + "/" + os.path.basename(manifest)
212 if os.path.exists(tmanifest):
213 continue
214 try:
215 os.link(manifest, tmanifest)
216 except OSError as err:
217 if err.errno == errno.EXDEV:
218 bb.utils.copyfile(manifest, tmanifest)
219 else:
220 raise
221 with open(manifest, "r") as f:
222 for l in f:
223 l = l.strip()
224 if l.endswith("/fixmepath"):
225 fixme.append(l)
226 continue
227 if l.endswith("/fixmepath.cmd"):
228 continue
229 dest = l.replace(stagingdir, "")
230 dest = targetdir + "/" + "/".join(dest.split("/")[3:])
231 if l.endswith("/"):
232 staging_copydir(l, targetdir, dest, seendirs)
233 continue
234 try:
235 staging_copyfile(l, targetdir, dest, postinsts, seendirs)
236 except FileExistsError:
237 continue
238
239 staging_processfixme(fixme, targetdir, targetsysroot, nativesysroot, d)
240 for p in postinsts:
241 subprocess.check_output(p, shell=True, stderr=subprocess.STDOUT)
242
243#
244# Manifests here are complicated. The main sysroot area has the unpacked sstate
245# which us unrelocated and tracked by the main sstate manifests. Each recipe
246# specific sysroot has manifests for each dependency that is installed there.
247# The task hash is used to tell whether the data needs to be reinstalled. We
248# use a symlink to point to the currently installed hash. There is also a
249# "complete" stamp file which is used to mark if installation completed. If
250# something fails (e.g. a postinst), this won't get written and we would
251# remove and reinstall the dependency. This also means partially installed
252# dependencies should get cleaned up correctly.
253#
254
255python extend_recipe_sysroot() {
256 import copy
257 import subprocess
258 import errno
259 import collections
260 import glob
261
262 taskdepdata = d.getVar("BB_TASKDEPDATA", False)
263 mytaskname = d.getVar("BB_RUNTASK")
264 if mytaskname.endswith("_setscene"):
265 mytaskname = mytaskname.replace("_setscene", "")
266 workdir = d.getVar("WORKDIR")
267 #bb.warn(str(taskdepdata))
268 pn = d.getVar("PN")
269 stagingdir = d.getVar("STAGING_DIR")
270 sharedmanifests = d.getVar("COMPONENTS_DIR") + "/manifests"
271 recipesysroot = d.getVar("RECIPE_SYSROOT")
272 recipesysrootnative = d.getVar("RECIPE_SYSROOT_NATIVE")
273
274 # Detect bitbake -b usage
275 nodeps = d.getVar("BB_LIMITEDDEPS") or False
276 if nodeps:
277 lock = bb.utils.lockfile(recipesysroot + "/sysroot.lock")
278 staging_populate_sysroot_dir(recipesysroot, recipesysrootnative, True, d)
279 staging_populate_sysroot_dir(recipesysroot, recipesysrootnative, False, d)
280 bb.utils.unlockfile(lock)
281 return
282
283 start = None
284 configuredeps = []
285 owntaskdeps = []
286 for dep in taskdepdata:
287 data = taskdepdata[dep]
288 if data[1] == mytaskname and data[0] == pn:
289 start = dep
290 elif data[0] == pn:
291 owntaskdeps.append(data[1])
292 if start is None:
293 bb.fatal("Couldn't find ourself in BB_TASKDEPDATA?")
294
295 # We need to figure out which sysroot files we need to expose to this task.
296 # This needs to match what would get restored from sstate, which is controlled
297 # ultimately by calls from bitbake to setscene_depvalid().
298 # That function expects a setscene dependency tree. We build a dependency tree
299 # condensed to inter-sstate task dependencies, similar to that used by setscene
300 # tasks. We can then call into setscene_depvalid() and decide
301 # which dependencies we can "see" and should expose in the recipe specific sysroot.
302 setscenedeps = copy.deepcopy(taskdepdata)
303
304 start = set([start])
305
306 sstatetasks = d.getVar("SSTATETASKS").split()
307 # Add recipe specific tasks referenced by setscene_depvalid()
308 sstatetasks.append("do_stash_locale")
309
310 def print_dep_tree(deptree):
311 data = ""
312 for dep in deptree:
313 deps = " " + "\n ".join(deptree[dep][3]) + "\n"
314 data = data + "%s:\n %s\n %s\n%s %s\n %s\n" % (deptree[dep][0], deptree[dep][1], deptree[dep][2], deps, deptree[dep][4], deptree[dep][5])
315 return data
316
317 #bb.note("Full dep tree is:\n%s" % print_dep_tree(taskdepdata))
318
319 #bb.note(" start2 is %s" % str(start))
320
321 # If start is an sstate task (like do_package) we need to add in its direct dependencies
322 # else the code below won't recurse into them.
323 for dep in set(start):
324 for dep2 in setscenedeps[dep][3]:
325 start.add(dep2)
326 start.remove(dep)
327
328 #bb.note(" start3 is %s" % str(start))
329
330 # Create collapsed do_populate_sysroot -> do_populate_sysroot tree
331 for dep in taskdepdata:
332 data = setscenedeps[dep]
333 if data[1] not in sstatetasks:
334 for dep2 in setscenedeps:
335 data2 = setscenedeps[dep2]
336 if dep in data2[3]:
337 data2[3].update(setscenedeps[dep][3])
338 data2[3].remove(dep)
339 if dep in start:
340 start.update(setscenedeps[dep][3])
341 start.remove(dep)
342 del setscenedeps[dep]
343
344 # Remove circular references
345 for dep in setscenedeps:
346 if dep in setscenedeps[dep][3]:
347 setscenedeps[dep][3].remove(dep)
348
349 #bb.note("Computed dep tree is:\n%s" % print_dep_tree(setscenedeps))
350 #bb.note(" start is %s" % str(start))
351
352 # Direct dependencies should be present and can be depended upon
353 for dep in set(start):
354 if setscenedeps[dep][1] == "do_populate_sysroot":
355 if dep not in configuredeps:
356 configuredeps.append(dep)
357 bb.note("Direct dependencies are %s" % str(configuredeps))
358 #bb.note(" or %s" % str(start))
359
360 msgbuf = []
361 # Call into setscene_depvalid for each sub-dependency and only copy sysroot files
362 # for ones that would be restored from sstate.
363 done = list(start)
364 next = list(start)
365 while next:
366 new = []
367 for dep in next:
368 data = setscenedeps[dep]
369 for datadep in data[3]:
370 if datadep in done:
371 continue
372 taskdeps = {}
373 taskdeps[dep] = setscenedeps[dep][:2]
374 taskdeps[datadep] = setscenedeps[datadep][:2]
375 retval = setscene_depvalid(datadep, taskdeps, [], d, msgbuf)
376 if retval:
377 msgbuf.append("Skipping setscene dependency %s for installation into the sysroot" % datadep)
378 continue
379 done.append(datadep)
380 new.append(datadep)
381 if datadep not in configuredeps and setscenedeps[datadep][1] == "do_populate_sysroot":
382 configuredeps.append(datadep)
383 msgbuf.append("Adding dependency on %s" % setscenedeps[datadep][0])
384 else:
385 msgbuf.append("Following dependency on %s" % setscenedeps[datadep][0])
386 next = new
387
388 # This logging is too verbose for day to day use sadly
389 #bb.debug(2, "\n".join(msgbuf))
390
391 depdir = recipesysrootnative + "/installeddeps"
392 bb.utils.mkdirhier(depdir)
393 bb.utils.mkdirhier(sharedmanifests)
394
395 lock = bb.utils.lockfile(recipesysroot + "/sysroot.lock")
396
397 fixme = {}
398 seendirs = set()
399 postinsts = []
400 multilibs = {}
401 manifests = {}
402 # All files that we're going to be installing, to find conflicts.
403 fileset = {}
404
405 for f in os.listdir(depdir):
406 if not f.endswith(".complete"):
407 continue
408 f = depdir + "/" + f
409 if os.path.islink(f) and not os.path.exists(f):
410 bb.note("%s no longer exists, removing from sysroot" % f)
411 lnk = os.readlink(f.replace(".complete", ""))
412 sstate_clean_manifest(depdir + "/" + lnk, d, workdir)
413 os.unlink(f)
414 os.unlink(f.replace(".complete", ""))
415
416 installed = []
417 for dep in configuredeps:
418 c = setscenedeps[dep][0]
419 if mytaskname in ["do_sdk_depends", "do_populate_sdk_ext"] and c.endswith("-initial"):
420 bb.note("Skipping initial setscene dependency %s for installation into the sysroot" % c)
421 continue
422 installed.append(c)
423
424 # We want to remove anything which this task previously installed but is no longer a dependency
425 taskindex = depdir + "/" + "index." + mytaskname
426 if os.path.exists(taskindex):
427 potential = []
428 with open(taskindex, "r") as f:
429 for l in f:
430 l = l.strip()
431 if l not in installed:
432 fl = depdir + "/" + l
433 if not os.path.exists(fl):
434 # Was likely already uninstalled
435 continue
436 potential.append(l)
437 # We need to ensure no other task needs this dependency. We hold the sysroot
438 # lock so we ca search the indexes to check
439 if potential:
440 for i in glob.glob(depdir + "/index.*"):
441 if i.endswith("." + mytaskname):
442 continue
443 with open(i, "r") as f:
444 for l in f:
445 if l.startswith("TaskDeps:"):
446 prevtasks = l.split()[1:]
447 if mytaskname in prevtasks:
448 # We're a dependency of this task so we can clear items out the sysroot
449 break
450 l = l.strip()
451 if l in potential:
452 potential.remove(l)
453 for l in potential:
454 fl = depdir + "/" + l
455 bb.note("Task %s no longer depends on %s, removing from sysroot" % (mytaskname, l))
456 lnk = os.readlink(fl)
457 sstate_clean_manifest(depdir + "/" + lnk, d, workdir)
458 os.unlink(fl)
459 os.unlink(fl + ".complete")
460
461 msg_exists = []
462 msg_adding = []
463
464 # Handle all removals first since files may move between recipes
465 for dep in configuredeps:
466 c = setscenedeps[dep][0]
467 if c not in installed:
468 continue
469 taskhash = setscenedeps[dep][5]
470 taskmanifest = depdir + "/" + c + "." + taskhash
471
472 if os.path.exists(depdir + "/" + c):
473 lnk = os.readlink(depdir + "/" + c)
474 if lnk == c + "." + taskhash and os.path.exists(depdir + "/" + c + ".complete"):
475 continue
476 else:
477 bb.note("%s exists in sysroot, but is stale (%s vs. %s), removing." % (c, lnk, c + "." + taskhash))
478 sstate_clean_manifest(depdir + "/" + lnk, d, workdir)
479 os.unlink(depdir + "/" + c)
480 if os.path.lexists(depdir + "/" + c + ".complete"):
481 os.unlink(depdir + "/" + c + ".complete")
482 elif os.path.lexists(depdir + "/" + c):
483 os.unlink(depdir + "/" + c)
484
485 binfiles = {}
486 # Now handle installs
487 for dep in configuredeps:
488 c = setscenedeps[dep][0]
489 if c not in installed:
490 continue
491 taskhash = setscenedeps[dep][5]
492 taskmanifest = depdir + "/" + c + "." + taskhash
493
494 if os.path.exists(depdir + "/" + c):
495 lnk = os.readlink(depdir + "/" + c)
496 if lnk == c + "." + taskhash and os.path.exists(depdir + "/" + c + ".complete"):
497 msg_exists.append(c)
498 continue
499
500 msg_adding.append(c)
501
502 os.symlink(c + "." + taskhash, depdir + "/" + c)
503
504 manifest, d2 = oe.sstatesig.find_sstate_manifest(c, setscenedeps[dep][2], "populate_sysroot", d, multilibs)
505 if d2 is not d:
506 # If we don't do this, the recipe sysroot will be placed in the wrong WORKDIR for multilibs
507 # We need a consistent WORKDIR for the image
508 d2.setVar("WORKDIR", d.getVar("WORKDIR"))
509 destsysroot = d2.getVar("RECIPE_SYSROOT")
510 # We put allarch recipes into the default sysroot
511 if manifest and "allarch" in manifest:
512 destsysroot = d.getVar("RECIPE_SYSROOT")
513
514 native = False
515 if c.endswith("-native") or "-cross-" in c or "-crosssdk" in c:
516 native = True
517
518 if manifest:
519 newmanifest = collections.OrderedDict()
520 targetdir = destsysroot
521 if native:
522 targetdir = recipesysrootnative
523 if targetdir not in fixme:
524 fixme[targetdir] = []
525 fm = fixme[targetdir]
526
527 with open(manifest, "r") as f:
528 manifests[dep] = manifest
529 for l in f:
530 l = l.strip()
531 if l.endswith("/fixmepath"):
532 fm.append(l)
533 continue
534 if l.endswith("/fixmepath.cmd"):
535 continue
536 dest = l.replace(stagingdir, "")
537 dest = "/" + "/".join(dest.split("/")[3:])
538 newmanifest[l] = targetdir + dest
539
540 # Check if files have already been installed by another
541 # recipe and abort if they have, explaining what recipes are
542 # conflicting.
543 hashname = targetdir + dest
544 if not hashname.endswith("/"):
545 if hashname in fileset:
546 bb.fatal("The file %s is installed by both %s and %s, aborting" % (dest, c, fileset[hashname]))
547 else:
548 fileset[hashname] = c
549
550 # Having multiple identical manifests in each sysroot eats diskspace so
551 # create a shared pool of them and hardlink if we can.
552 # We create the manifest in advance so that if something fails during installation,
553 # or the build is interrupted, subsequent exeuction can cleanup.
554 sharedm = sharedmanifests + "/" + os.path.basename(taskmanifest)
555 if not os.path.exists(sharedm):
556 smlock = bb.utils.lockfile(sharedm + ".lock")
557 # Can race here. You'd think it just means we may not end up with all copies hardlinked to each other
558 # but python can lose file handles so we need to do this under a lock.
559 if not os.path.exists(sharedm):
560 with open(sharedm, 'w') as m:
561 for l in newmanifest:
562 dest = newmanifest[l]
563 m.write(dest.replace(workdir + "/", "") + "\n")
564 bb.utils.unlockfile(smlock)
565 try:
566 os.link(sharedm, taskmanifest)
567 except OSError as err:
568 if err.errno == errno.EXDEV:
569 bb.utils.copyfile(sharedm, taskmanifest)
570 else:
571 raise
572 # Finally actually install the files
573 for l in newmanifest:
574 dest = newmanifest[l]
575 if l.endswith("/"):
576 staging_copydir(l, targetdir, dest, seendirs)
577 continue
578 if "/bin/" in l or "/sbin/" in l:
579 # defer /*bin/* files until last in case they need libs
580 binfiles[l] = (targetdir, dest)
581 else:
582 staging_copyfile(l, targetdir, dest, postinsts, seendirs)
583
584 # Handle deferred binfiles
585 for l in binfiles:
586 (targetdir, dest) = binfiles[l]
587 staging_copyfile(l, targetdir, dest, postinsts, seendirs)
588
589 bb.note("Installed into sysroot: %s" % str(msg_adding))
590 bb.note("Skipping as already exists in sysroot: %s" % str(msg_exists))
591
592 for f in fixme:
593 staging_processfixme(fixme[f], f, recipesysroot, recipesysrootnative, d)
594
595 for p in postinsts:
596 subprocess.check_output(p, shell=True, stderr=subprocess.STDOUT)
597
598 for dep in manifests:
599 c = setscenedeps[dep][0]
600 os.symlink(manifests[dep], depdir + "/" + c + ".complete")
601
602 with open(taskindex, "w") as f:
603 f.write("TaskDeps: " + " ".join(owntaskdeps) + "\n")
604 for l in sorted(installed):
605 f.write(l + "\n")
606
607 bb.utils.unlockfile(lock)
608}
609extend_recipe_sysroot[vardepsexclude] += "MACHINE_ARCH PACKAGE_EXTRA_ARCHS SDK_ARCH BUILD_ARCH SDK_OS BB_TASKDEPDATA"
610
611do_prepare_recipe_sysroot[deptask] = "do_populate_sysroot"
612python do_prepare_recipe_sysroot () {
613 bb.build.exec_func("extend_recipe_sysroot", d)
614}
615addtask do_prepare_recipe_sysroot before do_configure after do_fetch
616
617python staging_taskhandler() {
618 bbtasks = e.tasklist
619 for task in bbtasks:
620 deps = d.getVarFlag(task, "depends")
621 if task == "do_configure" or (deps and "populate_sysroot" in deps):
622 d.appendVarFlag(task, "prefuncs", " extend_recipe_sysroot")
623}
624staging_taskhandler[eventmask] = "bb.event.RecipeTaskPreProcess"
625addhandler staging_taskhandler
diff --git a/meta/classes/syslinux.bbclass b/meta/classes/syslinux.bbclass
deleted file mode 100644
index 894f6b3718..0000000000
--- a/meta/classes/syslinux.bbclass
+++ /dev/null
@@ -1,194 +0,0 @@
1# syslinux.bbclass
2# Copyright (C) 2004-2006, Advanced Micro Devices, Inc. All Rights Reserved
3# Released under the MIT license (see packages/COPYING)
4
5# Provide syslinux specific functions for building bootable images.
6
7# External variables
8# ${INITRD} - indicates a list of filesystem images to concatenate and use as an initrd (optional)
9# ${ROOTFS} - indicates a filesystem image to include as the root filesystem (optional)
10# ${AUTO_SYSLINUXMENU} - set this to 1 to enable creating an automatic menu
11# ${LABELS} - a list of targets for the automatic config
12# ${APPEND} - an override list of append strings for each label
13# ${SYSLINUX_OPTS} - additional options to add to the syslinux file ';' delimited
14# ${SYSLINUX_SPLASH} - A background for the vga boot menu if using the boot menu
15# ${SYSLINUX_DEFAULT_CONSOLE} - set to "console=ttyX" to change kernel boot default console
16# ${SYSLINUX_SERIAL} - Set an alternate serial port or turn off serial with empty string
17# ${SYSLINUX_SERIAL_TTY} - Set alternate console=tty... kernel boot argument
18# ${SYSLINUX_KERNEL_ARGS} - Add additional kernel arguments
19
20do_bootimg[depends] += "${MLPREFIX}syslinux:do_populate_sysroot \
21 syslinux-native:do_populate_sysroot"
22
23ISOLINUXDIR ?= "/isolinux"
24SYSLINUXDIR = "/"
25# The kernel has an internal default console, which you can override with
26# a console=...some_tty...
27SYSLINUX_DEFAULT_CONSOLE ?= ""
28SYSLINUX_SERIAL ?= "0 115200"
29SYSLINUX_SERIAL_TTY ?= "console=ttyS0,115200"
30SYSLINUX_PROMPT ?= "0"
31SYSLINUX_TIMEOUT ?= "50"
32AUTO_SYSLINUXMENU ?= "1"
33SYSLINUX_ALLOWOPTIONS ?= "1"
34SYSLINUX_ROOT ?= "${ROOT}"
35SYSLINUX_CFG_VM ?= "${S}/syslinux_vm.cfg"
36SYSLINUX_CFG_LIVE ?= "${S}/syslinux_live.cfg"
37APPEND ?= ""
38
39# Need UUID utility code.
40inherit fs-uuid
41
42syslinux_populate() {
43 DEST=$1
44 BOOTDIR=$2
45 CFGNAME=$3
46
47 install -d ${DEST}${BOOTDIR}
48
49 # Install the config files
50 install -m 0644 ${SYSLINUX_CFG} ${DEST}${BOOTDIR}/${CFGNAME}
51 if [ "${AUTO_SYSLINUXMENU}" = 1 ] ; then
52 install -m 0644 ${STAGING_DATADIR}/syslinux/vesamenu.c32 ${DEST}${BOOTDIR}/vesamenu.c32
53 install -m 0444 ${STAGING_DATADIR}/syslinux/libcom32.c32 ${DEST}${BOOTDIR}/libcom32.c32
54 install -m 0444 ${STAGING_DATADIR}/syslinux/libutil.c32 ${DEST}${BOOTDIR}/libutil.c32
55 if [ "${SYSLINUX_SPLASH}" != "" ] ; then
56 install -m 0644 ${SYSLINUX_SPLASH} ${DEST}${BOOTDIR}/splash.lss
57 fi
58 fi
59}
60
61syslinux_iso_populate() {
62 iso_dir=$1
63 syslinux_populate $iso_dir ${ISOLINUXDIR} isolinux.cfg
64 install -m 0644 ${STAGING_DATADIR}/syslinux/isolinux.bin $iso_dir${ISOLINUXDIR}
65 install -m 0644 ${STAGING_DATADIR}/syslinux/ldlinux.c32 $iso_dir${ISOLINUXDIR}
66}
67
68syslinux_hddimg_populate() {
69 hdd_dir=$1
70 syslinux_populate $hdd_dir ${SYSLINUXDIR} syslinux.cfg
71 install -m 0444 ${STAGING_DATADIR}/syslinux/ldlinux.sys $hdd_dir${SYSLINUXDIR}/ldlinux.sys
72}
73
74syslinux_hddimg_install() {
75 syslinux ${IMGDEPLOYDIR}/${IMAGE_NAME}.hddimg
76}
77
78python build_syslinux_cfg () {
79 import copy
80 import sys
81
82 workdir = d.getVar('WORKDIR')
83 if not workdir:
84 bb.error("WORKDIR not defined, unable to package")
85 return
86
87 labels = d.getVar('LABELS')
88 if not labels:
89 bb.debug(1, "LABELS not defined, nothing to do")
90 return
91
92 if labels == []:
93 bb.debug(1, "No labels, nothing to do")
94 return
95
96 cfile = d.getVar('SYSLINUX_CFG')
97 if not cfile:
98 bb.fatal('Unable to read SYSLINUX_CFG')
99
100 try:
101 cfgfile = open(cfile, 'w')
102 except OSError:
103 bb.fatal('Unable to open %s' % cfile)
104
105 cfgfile.write('# Automatically created by OE\n')
106
107 opts = d.getVar('SYSLINUX_OPTS')
108
109 if opts:
110 for opt in opts.split(';'):
111 cfgfile.write('%s\n' % opt)
112
113 allowoptions = d.getVar('SYSLINUX_ALLOWOPTIONS')
114 if allowoptions:
115 cfgfile.write('ALLOWOPTIONS %s\n' % allowoptions)
116 else:
117 cfgfile.write('ALLOWOPTIONS 1\n')
118
119 syslinux_default_console = d.getVar('SYSLINUX_DEFAULT_CONSOLE')
120 syslinux_serial_tty = d.getVar('SYSLINUX_SERIAL_TTY')
121 syslinux_serial = d.getVar('SYSLINUX_SERIAL')
122 if syslinux_serial:
123 cfgfile.write('SERIAL %s\n' % syslinux_serial)
124
125 menu = (d.getVar('AUTO_SYSLINUXMENU') == "1")
126
127 if menu and syslinux_serial:
128 cfgfile.write('DEFAULT Graphics console %s\n' % (labels.split()[0]))
129 else:
130 cfgfile.write('DEFAULT %s\n' % (labels.split()[0]))
131
132 timeout = d.getVar('SYSLINUX_TIMEOUT')
133
134 if timeout:
135 cfgfile.write('TIMEOUT %s\n' % timeout)
136 else:
137 cfgfile.write('TIMEOUT 50\n')
138
139 prompt = d.getVar('SYSLINUX_PROMPT')
140 if prompt:
141 cfgfile.write('PROMPT %s\n' % prompt)
142 else:
143 cfgfile.write('PROMPT 1\n')
144
145 if menu:
146 cfgfile.write('ui vesamenu.c32\n')
147 cfgfile.write('menu title Select kernel options and boot kernel\n')
148 cfgfile.write('menu tabmsg Press [Tab] to edit, [Return] to select\n')
149 splash = d.getVar('SYSLINUX_SPLASH')
150 if splash:
151 cfgfile.write('menu background splash.lss\n')
152
153 for label in labels.split():
154 localdata = bb.data.createCopy(d)
155
156 overrides = localdata.getVar('OVERRIDES')
157 if not overrides:
158 bb.fatal('OVERRIDES not defined')
159
160 localdata.setVar('OVERRIDES', label + ':' + overrides)
161
162 btypes = [ [ "", syslinux_default_console ] ]
163 if menu and syslinux_serial:
164 btypes = [ [ "Graphics console ", syslinux_default_console ],
165 [ "Serial console ", syslinux_serial_tty ] ]
166
167 root= d.getVar('SYSLINUX_ROOT')
168 if not root:
169 bb.fatal('SYSLINUX_ROOT not defined')
170
171 kernel = localdata.getVar('KERNEL_IMAGETYPE')
172 for btype in btypes:
173 cfgfile.write('LABEL %s%s\nKERNEL /%s\n' % (btype[0], label, kernel))
174
175 exargs = d.getVar('SYSLINUX_KERNEL_ARGS')
176 if exargs:
177 btype[1] += " " + exargs
178
179 append = localdata.getVar('APPEND')
180 initrd = localdata.getVar('INITRD')
181
182 append = root + " " + append
183 cfgfile.write('APPEND ')
184
185 if initrd:
186 cfgfile.write('initrd=/initrd ')
187
188 cfgfile.write('LABEL=%s '% (label))
189 append = replace_rootfs_uuid(d, append)
190 cfgfile.write('%s %s\n' % (append, btype[1]))
191
192 cfgfile.close()
193}
194build_syslinux_cfg[dirs] = "${S}"
diff --git a/meta/classes/systemd-boot-cfg.bbclass b/meta/classes/systemd-boot-cfg.bbclass
deleted file mode 100644
index b3e0e6ad41..0000000000
--- a/meta/classes/systemd-boot-cfg.bbclass
+++ /dev/null
@@ -1,71 +0,0 @@
1SYSTEMD_BOOT_CFG ?= "${S}/loader.conf"
2SYSTEMD_BOOT_ENTRIES ?= ""
3SYSTEMD_BOOT_TIMEOUT ?= "10"
4
5# Uses MACHINE specific KERNEL_IMAGETYPE
6PACKAGE_ARCH = "${MACHINE_ARCH}"
7
8# Need UUID utility code.
9inherit fs-uuid
10
11python build_efi_cfg() {
12 s = d.getVar("S")
13 labels = d.getVar('LABELS')
14 if not labels:
15 bb.debug(1, "LABELS not defined, nothing to do")
16 return
17
18 if labels == []:
19 bb.debug(1, "No labels, nothing to do")
20 return
21
22 cfile = d.getVar('SYSTEMD_BOOT_CFG')
23 cdir = os.path.dirname(cfile)
24 if not os.path.exists(cdir):
25 os.makedirs(cdir)
26 try:
27 cfgfile = open(cfile, 'w')
28 except OSError:
29 bb.fatal('Unable to open %s' % cfile)
30
31 cfgfile.write('# Automatically created by OE\n')
32 cfgfile.write('default %s\n' % (labels.split()[0]))
33 timeout = d.getVar('SYSTEMD_BOOT_TIMEOUT')
34 if timeout:
35 cfgfile.write('timeout %s\n' % timeout)
36 else:
37 cfgfile.write('timeout 10\n')
38 cfgfile.close()
39
40 for label in labels.split():
41 localdata = d.createCopy()
42
43 entryfile = "%s/%s.conf" % (s, label)
44 if not os.path.exists(s):
45 os.makedirs(s)
46 d.appendVar("SYSTEMD_BOOT_ENTRIES", " " + entryfile)
47 try:
48 entrycfg = open(entryfile, "w")
49 except OSError:
50 bb.fatal('Unable to open %s' % entryfile)
51
52 entrycfg.write('title %s\n' % label)
53
54 kernel = localdata.getVar("KERNEL_IMAGETYPE")
55 entrycfg.write('linux /%s\n' % kernel)
56
57 append = localdata.getVar('APPEND')
58 initrd = localdata.getVar('INITRD')
59
60 if initrd:
61 entrycfg.write('initrd /initrd\n')
62 lb = label
63 if label == "install":
64 lb = "install-efi"
65 entrycfg.write('options LABEL=%s ' % lb)
66 if append:
67 append = replace_rootfs_uuid(d, append)
68 entrycfg.write('%s' % append)
69 entrycfg.write('\n')
70 entrycfg.close()
71}
diff --git a/meta/classes/systemd-boot.bbclass b/meta/classes/systemd-boot.bbclass
deleted file mode 100644
index 336c4c2ff5..0000000000
--- a/meta/classes/systemd-boot.bbclass
+++ /dev/null
@@ -1,35 +0,0 @@
1# Copyright (C) 2016 Intel Corporation
2#
3# Released under the MIT license (see COPYING.MIT)
4
5# systemd-boot.bbclass - The "systemd-boot" is essentially the gummiboot merged into systemd.
6# The original standalone gummiboot project is dead without any more
7# maintenance.
8#
9# Set EFI_PROVIDER = "systemd-boot" to use systemd-boot on your live images instead of grub-efi
10# (images built by image-live.bbclass)
11
12do_bootimg[depends] += "${MLPREFIX}systemd-boot:do_deploy"
13
14require conf/image-uefi.conf
15# Need UUID utility code.
16inherit fs-uuid
17
18efi_populate() {
19 efi_populate_common "$1" systemd
20
21 # systemd-boot requires these paths for configuration files
22 # they are not customizable so no point in new vars
23 install -d ${DEST}/loader
24 install -d ${DEST}/loader/entries
25 install -m 0644 ${SYSTEMD_BOOT_CFG} ${DEST}/loader/loader.conf
26 for i in ${SYSTEMD_BOOT_ENTRIES}; do
27 install -m 0644 ${i} ${DEST}/loader/entries
28 done
29}
30
31efi_iso_populate_append() {
32 cp -r $iso_dir/loader ${EFIIMGDIR}
33}
34
35inherit systemd-boot-cfg
diff --git a/meta/classes/systemd.bbclass b/meta/classes/systemd.bbclass
deleted file mode 100644
index db5d109545..0000000000
--- a/meta/classes/systemd.bbclass
+++ /dev/null
@@ -1,233 +0,0 @@
1# The list of packages that should have systemd packaging scripts added. For
2# each entry, optionally have a SYSTEMD_SERVICE_[package] that lists the service
3# files in this package. If this variable isn't set, [package].service is used.
4SYSTEMD_PACKAGES ?= "${PN}"
5SYSTEMD_PACKAGES_class-native ?= ""
6SYSTEMD_PACKAGES_class-nativesdk ?= ""
7
8# Whether to enable or disable the services on installation.
9SYSTEMD_AUTO_ENABLE ??= "enable"
10
11# This class will be included in any recipe that supports systemd init scripts,
12# even if systemd is not in DISTRO_FEATURES. As such don't make any changes
13# directly but check the DISTRO_FEATURES first.
14python __anonymous() {
15 # If the distro features have systemd but not sysvinit, inhibit update-rcd
16 # from doing any work so that pure-systemd images don't have redundant init
17 # files.
18 if bb.utils.contains('DISTRO_FEATURES', 'systemd', True, False, d):
19 d.appendVar("DEPENDS", " systemd-systemctl-native")
20 d.appendVar("PACKAGE_WRITE_DEPS", " systemd-systemctl-native")
21 if not bb.utils.contains('DISTRO_FEATURES', 'sysvinit', True, False, d):
22 d.setVar("INHIBIT_UPDATERCD_BBCLASS", "1")
23}
24
25systemd_postinst() {
26if systemctl >/dev/null 2>/dev/null; then
27 OPTS=""
28
29 if [ -n "$D" ]; then
30 OPTS="--root=$D"
31 fi
32
33 if [ "${SYSTEMD_AUTO_ENABLE}" = "enable" ]; then
34 for service in ${SYSTEMD_SERVICE_ESCAPED}; do
35 systemctl ${OPTS} enable "$service"
36 done
37 fi
38
39 if [ -z "$D" ]; then
40 systemctl daemon-reload
41 systemctl preset ${SYSTEMD_SERVICE_ESCAPED}
42
43 if [ "${SYSTEMD_AUTO_ENABLE}" = "enable" ]; then
44 systemctl --no-block restart ${SYSTEMD_SERVICE_ESCAPED}
45 fi
46 fi
47fi
48}
49
50systemd_prerm() {
51if systemctl >/dev/null 2>/dev/null; then
52 if [ -z "$D" ]; then
53 systemctl stop ${SYSTEMD_SERVICE_ESCAPED}
54
55 systemctl disable ${SYSTEMD_SERVICE_ESCAPED}
56 fi
57fi
58}
59
60
61systemd_populate_packages[vardeps] += "systemd_prerm systemd_postinst"
62systemd_populate_packages[vardepsexclude] += "OVERRIDES"
63
64
65python systemd_populate_packages() {
66 import re
67 import shlex
68
69 if not bb.utils.contains('DISTRO_FEATURES', 'systemd', True, False, d):
70 return
71
72 def get_package_var(d, var, pkg):
73 val = (d.getVar('%s_%s' % (var, pkg)) or "").strip()
74 if val == "":
75 val = (d.getVar(var) or "").strip()
76 return val
77
78 # Check if systemd-packages already included in PACKAGES
79 def systemd_check_package(pkg_systemd):
80 packages = d.getVar('PACKAGES')
81 if not pkg_systemd in packages.split():
82 bb.error('%s does not appear in package list, please add it' % pkg_systemd)
83
84
85 def systemd_generate_package_scripts(pkg):
86 bb.debug(1, 'adding systemd calls to postinst/postrm for %s' % pkg)
87
88 paths_escaped = ' '.join(shlex.quote(s) for s in d.getVar('SYSTEMD_SERVICE_' + pkg).split())
89 d.setVar('SYSTEMD_SERVICE_ESCAPED_' + pkg, paths_escaped)
90
91 # Add pkg to the overrides so that it finds the SYSTEMD_SERVICE_pkg
92 # variable.
93 localdata = d.createCopy()
94 localdata.prependVar("OVERRIDES", pkg + ":")
95
96 postinst = d.getVar('pkg_postinst_%s' % pkg)
97 if not postinst:
98 postinst = '#!/bin/sh\n'
99 postinst += localdata.getVar('systemd_postinst')
100 d.setVar('pkg_postinst_%s' % pkg, postinst)
101
102 prerm = d.getVar('pkg_prerm_%s' % pkg)
103 if not prerm:
104 prerm = '#!/bin/sh\n'
105 prerm += localdata.getVar('systemd_prerm')
106 d.setVar('pkg_prerm_%s' % pkg, prerm)
107
108
109 # Add files to FILES_*-systemd if existent and not already done
110 def systemd_append_file(pkg_systemd, file_append):
111 appended = False
112 if os.path.exists(oe.path.join(d.getVar("D"), file_append)):
113 var_name = "FILES_" + pkg_systemd
114 files = d.getVar(var_name, False) or ""
115 if file_append not in files.split():
116 d.appendVar(var_name, " " + file_append)
117 appended = True
118 return appended
119
120 # Add systemd files to FILES_*-systemd, parse for Also= and follow recursive
121 def systemd_add_files_and_parse(pkg_systemd, path, service, keys):
122 # avoid infinite recursion
123 if systemd_append_file(pkg_systemd, oe.path.join(path, service)):
124 fullpath = oe.path.join(d.getVar("D"), path, service)
125 if service.find('.service') != -1:
126 # for *.service add *@.service
127 service_base = service.replace('.service', '')
128 systemd_add_files_and_parse(pkg_systemd, path, service_base + '@.service', keys)
129 if service.find('.socket') != -1:
130 # for *.socket add *.service and *@.service
131 service_base = service.replace('.socket', '')
132 systemd_add_files_and_parse(pkg_systemd, path, service_base + '.service', keys)
133 systemd_add_files_and_parse(pkg_systemd, path, service_base + '@.service', keys)
134 for key in keys.split():
135 # recurse all dependencies found in keys ('Also';'Conflicts';..) and add to files
136 cmd = "grep %s %s | sed 's,%s=,,g' | tr ',' '\\n'" % (key, shlex.quote(fullpath), key)
137 pipe = os.popen(cmd, 'r')
138 line = pipe.readline()
139 while line:
140 line = line.replace('\n', '')
141 systemd_add_files_and_parse(pkg_systemd, path, line, keys)
142 line = pipe.readline()
143 pipe.close()
144
145 # Check service-files and call systemd_add_files_and_parse for each entry
146 def systemd_check_services():
147 searchpaths = [oe.path.join(d.getVar("sysconfdir"), "systemd", "system"),]
148 searchpaths.append(d.getVar("systemd_system_unitdir"))
149 systemd_packages = d.getVar('SYSTEMD_PACKAGES')
150
151 keys = 'Also'
152 # scan for all in SYSTEMD_SERVICE[]
153 for pkg_systemd in systemd_packages.split():
154 for service in get_package_var(d, 'SYSTEMD_SERVICE', pkg_systemd).split():
155 path_found = ''
156
157 # Deal with adding, for example, 'ifplugd@eth0.service' from
158 # 'ifplugd@.service'
159 base = None
160 at = service.find('@')
161 if at != -1:
162 ext = service.rfind('.')
163 base = service[:at] + '@' + service[ext:]
164
165 for path in searchpaths:
166 if os.path.exists(oe.path.join(d.getVar("D"), path, service)):
167 path_found = path
168 break
169 elif base is not None:
170 if os.path.exists(oe.path.join(d.getVar("D"), path, base)):
171 path_found = path
172 break
173
174 if path_found != '':
175 systemd_add_files_and_parse(pkg_systemd, path_found, service, keys)
176 else:
177 bb.fatal("Didn't find service unit '{0}', specified in SYSTEMD_SERVICE_{1}. {2}".format(
178 service, pkg_systemd, "Also looked for service unit '{0}'.".format(base) if base is not None else ""))
179
180 def systemd_create_presets(pkg, action):
181 presetf = oe.path.join(d.getVar("PKGD"), d.getVar("systemd_unitdir"), "system-preset/98-%s.preset" % pkg)
182 bb.utils.mkdirhier(os.path.dirname(presetf))
183 with open(presetf, 'a') as fd:
184 for service in d.getVar('SYSTEMD_SERVICE_%s' % pkg).split():
185 fd.write("%s %s\n" % (action,service))
186 d.appendVar("FILES_%s" % pkg, ' ' + oe.path.join(d.getVar("systemd_unitdir"), "system-preset/98-%s.preset" % pkg))
187
188 # Run all modifications once when creating package
189 if os.path.exists(d.getVar("D")):
190 for pkg in d.getVar('SYSTEMD_PACKAGES').split():
191 systemd_check_package(pkg)
192 if d.getVar('SYSTEMD_SERVICE_' + pkg):
193 systemd_generate_package_scripts(pkg)
194 action = get_package_var(d, 'SYSTEMD_AUTO_ENABLE', pkg)
195 if action in ("enable", "disable"):
196 systemd_create_presets(pkg, action)
197 elif action not in ("mask", "preset"):
198 bb.fatal("SYSTEMD_AUTO_ENABLE_%s '%s' is not 'enable', 'disable', 'mask' or 'preset'" % (pkg, action))
199 systemd_check_services()
200}
201
202PACKAGESPLITFUNCS_prepend = "systemd_populate_packages "
203
204python rm_systemd_unitdir (){
205 import shutil
206 if not bb.utils.contains('DISTRO_FEATURES', 'systemd', True, False, d):
207 systemd_unitdir = oe.path.join(d.getVar("D"), d.getVar('systemd_unitdir'))
208 if os.path.exists(systemd_unitdir):
209 shutil.rmtree(systemd_unitdir)
210 systemd_libdir = os.path.dirname(systemd_unitdir)
211 if (os.path.exists(systemd_libdir) and not os.listdir(systemd_libdir)):
212 os.rmdir(systemd_libdir)
213}
214
215python rm_sysvinit_initddir (){
216 import shutil
217 sysv_initddir = oe.path.join(d.getVar("D"), (d.getVar('INIT_D_DIR') or "/etc/init.d"))
218
219 if bb.utils.contains('DISTRO_FEATURES', 'systemd', True, False, d) and \
220 not bb.utils.contains('DISTRO_FEATURES', 'sysvinit', True, False, d) and \
221 os.path.exists(sysv_initddir):
222 systemd_system_unitdir = oe.path.join(d.getVar("D"), d.getVar('systemd_system_unitdir'))
223
224 # If systemd_system_unitdir contains anything, delete sysv_initddir
225 if (os.path.exists(systemd_system_unitdir) and os.listdir(systemd_system_unitdir)):
226 shutil.rmtree(sysv_initddir)
227}
228
229do_install[postfuncs] += "${RMINITDIR} "
230RMINITDIR_class-target = " rm_sysvinit_initddir rm_systemd_unitdir "
231RMINITDIR_class-nativesdk = " rm_sysvinit_initddir rm_systemd_unitdir "
232RMINITDIR = ""
233
diff --git a/meta/classes/terminal.bbclass b/meta/classes/terminal.bbclass
index 6059ae95e0..2dfc7db255 100644
--- a/meta/classes/terminal.bbclass
+++ b/meta/classes/terminal.bbclass
@@ -1,3 +1,9 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
1OE_TERMINAL ?= 'auto' 7OE_TERMINAL ?= 'auto'
2OE_TERMINAL[type] = 'choice' 8OE_TERMINAL[type] = 'choice'
3OE_TERMINAL[choices] = 'auto none \ 9OE_TERMINAL[choices] = 'auto none \
@@ -26,6 +32,9 @@ def emit_terminal_func(command, envdata, d):
26 bb.utils.mkdirhier(os.path.dirname(runfile)) 32 bb.utils.mkdirhier(os.path.dirname(runfile))
27 33
28 with open(runfile, 'w') as script: 34 with open(runfile, 'w') as script:
35 # Override the shell shell_trap_code specifies.
36 # If our shell is bash, we might well face silent death.
37 script.write("#!/bin/bash\n")
29 script.write(bb.build.shell_trap_code()) 38 script.write(bb.build.shell_trap_code())
30 bb.data.emit_func(cmd_func, script, envdata) 39 bb.data.emit_func(cmd_func, script, envdata)
31 script.write(cmd_func) 40 script.write(cmd_func)
@@ -37,7 +46,7 @@ def emit_terminal_func(command, envdata, d):
37def oe_terminal(command, title, d): 46def oe_terminal(command, title, d):
38 import oe.data 47 import oe.data
39 import oe.terminal 48 import oe.terminal
40 49
41 envdata = bb.data.init() 50 envdata = bb.data.init()
42 51
43 for v in os.environ: 52 for v in os.environ:
diff --git a/meta/classes/testexport.bbclass b/meta/classes/testexport.bbclass
deleted file mode 100644
index 1b0fb44a4a..0000000000
--- a/meta/classes/testexport.bbclass
+++ /dev/null
@@ -1,182 +0,0 @@
1# Copyright (C) 2016 Intel Corporation
2#
3# Released under the MIT license (see COPYING.MIT)
4#
5#
6# testexport.bbclass allows to execute runtime test outside OE environment.
7# Most of the tests are commands run on target image over ssh.
8# To use it add testexport to global inherit and call your target image with -c testexport
9# You can try it out like this:
10# - First build an image. i.e. core-image-sato
11# - Add INHERIT += "testexport" in local.conf
12# - Then bitbake core-image-sato -c testexport. That will generate the directory structure
13# to execute the runtime tests using runexported.py.
14#
15# For more information on TEST_SUITES check testimage class.
16
17TEST_LOG_DIR ?= "${WORKDIR}/testexport"
18TEST_EXPORT_DIR ?= "${TMPDIR}/testexport/${PN}"
19TEST_EXPORT_PACKAGED_DIR ?= "packages/packaged"
20TEST_EXPORT_EXTRACTED_DIR ?= "packages/extracted"
21
22TEST_TARGET ?= "simpleremote"
23TEST_TARGET_IP ?= ""
24TEST_SERVER_IP ?= ""
25
26TEST_EXPORT_SDK_PACKAGES ?= ""
27TEST_EXPORT_SDK_ENABLED ?= "0"
28TEST_EXPORT_SDK_NAME ?= "testexport-tools-nativesdk"
29TEST_EXPORT_SDK_DIR ?= "sdk"
30
31TEST_EXPORT_DEPENDS = ""
32TEST_EXPORT_DEPENDS += "${@bb.utils.contains('IMAGE_PKGTYPE', 'rpm', 'cpio-native:do_populate_sysroot', '', d)}"
33TEST_EXPORT_DEPENDS += "${@bb.utils.contains('TEST_EXPORT_SDK_ENABLED', '1', 'testexport-tarball:do_populate_sdk', '', d)}"
34TEST_EXPORT_LOCK = "${TMPDIR}/testimage.lock"
35
36addtask testexport
37do_testexport[nostamp] = "1"
38do_testexport[depends] += "${TEST_EXPORT_DEPENDS} ${TESTIMAGEDEPENDS}"
39do_testexport[lockfiles] += "${TEST_EXPORT_LOCK}"
40
41python do_testexport() {
42 testexport_main(d)
43}
44
45def testexport_main(d):
46 import json
47 import logging
48
49 from oeqa.runtime.context import OERuntimeTestContext
50 from oeqa.runtime.context import OERuntimeTestContextExecutor
51
52 image_name = ("%s/%s" % (d.getVar('DEPLOY_DIR_IMAGE'),
53 d.getVar('IMAGE_LINK_NAME')))
54
55 tdname = "%s.testdata.json" % image_name
56 td = json.load(open(tdname, "r"))
57
58 logger = logging.getLogger("BitBake")
59
60 target = OERuntimeTestContextExecutor.getTarget(
61 d.getVar("TEST_TARGET"), None, d.getVar("TEST_TARGET_IP"),
62 d.getVar("TEST_SERVER_IP"))
63
64 host_dumper = OERuntimeTestContextExecutor.getHostDumper(
65 d.getVar("testimage_dump_host"), d.getVar("TESTIMAGE_DUMP_DIR"))
66
67 image_manifest = "%s.manifest" % image_name
68 image_packages = OERuntimeTestContextExecutor.readPackagesManifest(image_manifest)
69
70 extract_dir = d.getVar("TEST_EXTRACTED_DIR")
71
72 tc = OERuntimeTestContext(td, logger, target, host_dumper,
73 image_packages, extract_dir)
74
75 copy_needed_files(d, tc)
76
77def copy_needed_files(d, tc):
78 import shutil
79 import oe.path
80
81 from oeqa.utils.package_manager import _get_json_file
82 from oeqa.core.utils.test import getSuiteCasesFiles
83
84 export_path = d.getVar('TEST_EXPORT_DIR')
85 corebase_path = d.getVar('COREBASE')
86
87 # Clean everything before starting
88 oe.path.remove(export_path)
89 bb.utils.mkdirhier(os.path.join(export_path, 'lib', 'oeqa'))
90
91 # The source of files to copy are relative to 'COREBASE' directory
92 # The destination is relative to 'TEST_EXPORT_DIR'
93 # Because we are squashing the libraries, we need to remove
94 # the layer/script directory
95 files_to_copy = [ os.path.join('meta', 'lib', 'oeqa', 'core'),
96 os.path.join('meta', 'lib', 'oeqa', 'runtime'),
97 os.path.join('meta', 'lib', 'oeqa', 'files'),
98 os.path.join('meta', 'lib', 'oeqa', 'utils'),
99 os.path.join('scripts', 'oe-test'),
100 os.path.join('scripts', 'lib', 'argparse_oe.py'),
101 os.path.join('scripts', 'lib', 'scriptutils.py'), ]
102
103 for f in files_to_copy:
104 src = os.path.join(corebase_path, f)
105 dst = os.path.join(export_path, f.split('/', 1)[-1])
106 if os.path.isdir(src):
107 oe.path.copytree(src, dst)
108 else:
109 shutil.copy2(src, dst)
110
111 # Remove cases and just copy the ones specified
112 cases_path = os.path.join(export_path, 'lib', 'oeqa', 'runtime', 'cases')
113 oe.path.remove(cases_path)
114 bb.utils.mkdirhier(cases_path)
115 test_paths = get_runtime_paths(d)
116 test_modules = d.getVar('TEST_SUITES').split()
117 tc.loadTests(test_paths, modules=test_modules)
118 for f in getSuiteCasesFiles(tc.suites):
119 shutil.copy2(f, cases_path)
120 json_file = _get_json_file(f)
121 if json_file:
122 shutil.copy2(json_file, cases_path)
123
124 # Copy test data
125 image_name = ("%s/%s" % (d.getVar('DEPLOY_DIR_IMAGE'),
126 d.getVar('IMAGE_LINK_NAME')))
127 image_manifest = "%s.manifest" % image_name
128 tdname = "%s.testdata.json" % image_name
129 test_data_path = os.path.join(export_path, 'data')
130 bb.utils.mkdirhier(test_data_path)
131 shutil.copy2(image_manifest, os.path.join(test_data_path, 'manifest'))
132 shutil.copy2(tdname, os.path.join(test_data_path, 'testdata.json'))
133
134 for subdir, dirs, files in os.walk(export_path):
135 for dir in dirs:
136 if dir == '__pycache__':
137 shutil.rmtree(os.path.join(subdir, dir))
138
139 # Create tar file for common parts of testexport
140 testexport_create_tarball(d, "testexport.tar.gz", d.getVar("TEST_EXPORT_DIR"))
141
142 # Copy packages needed for runtime testing
143 package_extraction(d, tc.suites)
144 test_pkg_dir = d.getVar("TEST_NEEDED_PACKAGES_DIR")
145 if os.path.isdir(test_pkg_dir) and os.listdir(test_pkg_dir):
146 export_pkg_dir = os.path.join(d.getVar("TEST_EXPORT_DIR"), "packages")
147 oe.path.copytree(test_pkg_dir, export_pkg_dir)
148 # Create tar file for packages needed by the DUT
149 testexport_create_tarball(d, "testexport_packages_%s.tar.gz" % d.getVar("MACHINE"), export_pkg_dir)
150
151 # Copy SDK
152 if d.getVar("TEST_EXPORT_SDK_ENABLED") == "1":
153 sdk_deploy = d.getVar("SDK_DEPLOY")
154 tarball_name = "%s.sh" % d.getVar("TEST_EXPORT_SDK_NAME")
155 tarball_path = os.path.join(sdk_deploy, tarball_name)
156 export_sdk_dir = os.path.join(d.getVar("TEST_EXPORT_DIR"),
157 d.getVar("TEST_EXPORT_SDK_DIR"))
158 bb.utils.mkdirhier(export_sdk_dir)
159 shutil.copy2(tarball_path, export_sdk_dir)
160
161 # Create tar file for the sdk
162 testexport_create_tarball(d, "testexport_sdk_%s.tar.gz" % d.getVar("SDK_ARCH"), export_sdk_dir)
163
164 bb.plain("Exported tests to: %s" % export_path)
165
166def testexport_create_tarball(d, tar_name, src_dir):
167
168 import tarfile
169
170 tar_path = os.path.join(d.getVar("TEST_EXPORT_DIR"), tar_name)
171 current_dir = os.getcwd()
172 src_dir = src_dir.rstrip('/')
173 dir_name = os.path.dirname(src_dir)
174 base_name = os.path.basename(src_dir)
175
176 os.chdir(dir_name)
177 tar = tarfile.open(tar_path, "w:gz")
178 tar.add(base_name)
179 tar.close()
180 os.chdir(current_dir)
181
182inherit testimage
diff --git a/meta/classes/testimage.bbclass b/meta/classes/testimage.bbclass
deleted file mode 100644
index 78da4b09bd..0000000000
--- a/meta/classes/testimage.bbclass
+++ /dev/null
@@ -1,491 +0,0 @@
1# Copyright (C) 2013 Intel Corporation
2#
3# Released under the MIT license (see COPYING.MIT)
4
5inherit metadata_scm
6inherit image-artifact-names
7
8# testimage.bbclass enables testing of qemu images using python unittests.
9# Most of the tests are commands run on target image over ssh.
10# To use it add testimage to global inherit and call your target image with -c testimage
11# You can try it out like this:
12# - first add IMAGE_CLASSES += "testimage" in local.conf
13# - build a qemu core-image-sato
14# - then bitbake core-image-sato -c testimage. That will run a standard suite of tests.
15#
16# The tests can be run automatically each time an image is built if you set
17# TESTIMAGE_AUTO = "1"
18
19TESTIMAGE_AUTO ??= "0"
20
21# You can set (or append to) TEST_SUITES in local.conf to select the tests
22# which you want to run for your target.
23# The test names are the module names in meta/lib/oeqa/runtime/cases.
24# Each name in TEST_SUITES represents a required test for the image. (no skipping allowed)
25# Appending "auto" means that it will try to run all tests that are suitable for the image (each test decides that on it's own).
26# Note that order in TEST_SUITES is relevant: tests are run in an order such that
27# tests mentioned in @skipUnlessPassed run before the tests that depend on them,
28# but without such dependencies, tests run in the order in which they are listed
29# in TEST_SUITES.
30#
31# A layer can add its own tests in lib/oeqa/runtime, provided it extends BBPATH as normal in its layer.conf.
32
33# TEST_LOG_DIR contains a command ssh log and may contain infromation about what command is running, output and return codes and for qemu a boot log till login.
34# Booting is handled by this class, and it's not a test in itself.
35# TEST_QEMUBOOT_TIMEOUT can be used to set the maximum time in seconds the launch code will wait for the login prompt.
36# TEST_OVERALL_TIMEOUT can be used to set the maximum time in seconds the tests will be allowed to run (defaults to no limit).
37# TEST_QEMUPARAMS can be used to pass extra parameters to qemu, e.g. "-m 1024" for setting the amount of ram to 1 GB.
38# TEST_RUNQEMUPARAMS can be used to pass extra parameters to runqemu, e.g. "gl" to enable OpenGL acceleration.
39
40# TESTIMAGE_BOOT_PATTERNS can be used to override certain patterns used to communicate with the target when booting,
41# if a pattern is not specifically present on this variable a default will be used when booting the target.
42# TESTIMAGE_BOOT_PATTERNS[<flag>] overrides the pattern used for that specific flag, where flag comes from a list of accepted flags
43# e.g. normally the system boots and waits for a login prompt (login:), after that it sends the command: "root\n" to log as the root user
44# if we wanted to log in as the hypothetical "webserver" user for example we could set the following:
45# TESTIMAGE_BOOT_PATTERNS = "send_login_user search_login_succeeded"
46# TESTIMAGE_BOOT_PATTERNS[send_login_user] = "webserver\n"
47# TESTIMAGE_BOOT_PATTERNS[search_login_succeeded] = "webserver@[a-zA-Z0-9\-]+:~#"
48# The accepted flags are the following: search_reached_prompt, send_login_user, search_login_succeeded, search_cmd_finished.
49# They are prefixed with either search/send, to differentiate if the pattern is meant to be sent or searched to/from the target terminal
50
51TEST_LOG_DIR ?= "${WORKDIR}/testimage"
52
53TEST_EXPORT_DIR ?= "${TMPDIR}/testimage/${PN}"
54TEST_INSTALL_TMP_DIR ?= "${WORKDIR}/testimage/install_tmp"
55TEST_NEEDED_PACKAGES_DIR ?= "${WORKDIR}/testimage/packages"
56TEST_EXTRACTED_DIR ?= "${TEST_NEEDED_PACKAGES_DIR}/extracted"
57TEST_PACKAGED_DIR ?= "${TEST_NEEDED_PACKAGES_DIR}/packaged"
58
59BASICTESTSUITE = "\
60 ping date df ssh scp python perl gi ptest parselogs \
61 logrotate connman systemd oe_syslog pam stap ldd xorg \
62 kernelmodule gcc buildcpio buildlzip buildgalculator \
63 dnf rpm opkg apt weston"
64
65DEFAULT_TEST_SUITES = "${BASICTESTSUITE}"
66
67# aarch64 has no graphics
68DEFAULT_TEST_SUITES_remove_aarch64 = "xorg"
69# musl doesn't support systemtap
70DEFAULT_TEST_SUITES_remove_libc-musl = "stap"
71
72# qemumips is quite slow and has reached the timeout limit several times on the YP build cluster,
73# mitigate this by removing build tests for qemumips machines.
74MIPSREMOVE ??= "buildcpio buildlzip buildgalculator"
75DEFAULT_TEST_SUITES_remove_qemumips = "${MIPSREMOVE}"
76DEFAULT_TEST_SUITES_remove_qemumips64 = "${MIPSREMOVE}"
77
78TEST_SUITES ?= "${DEFAULT_TEST_SUITES}"
79
80TEST_QEMUBOOT_TIMEOUT ?= "1000"
81TEST_OVERALL_TIMEOUT ?= ""
82TEST_TARGET ?= "qemu"
83TEST_QEMUPARAMS ?= ""
84TEST_RUNQEMUPARAMS ?= ""
85
86TESTIMAGE_BOOT_PATTERNS ?= ""
87
88TESTIMAGEDEPENDS = ""
89TESTIMAGEDEPENDS_append_qemuall = " qemu-native:do_populate_sysroot qemu-helper-native:do_populate_sysroot qemu-helper-native:do_addto_recipe_sysroot"
90TESTIMAGEDEPENDS += "${@bb.utils.contains('IMAGE_PKGTYPE', 'rpm', 'cpio-native:do_populate_sysroot', '', d)}"
91TESTIMAGEDEPENDS += "${@bb.utils.contains('IMAGE_PKGTYPE', 'rpm', 'dnf-native:do_populate_sysroot', '', d)}"
92TESTIMAGEDEPENDS += "${@bb.utils.contains('IMAGE_PKGTYPE', 'rpm', 'createrepo-c-native:do_populate_sysroot', '', d)}"
93TESTIMAGEDEPENDS += "${@bb.utils.contains('IMAGE_PKGTYPE', 'ipk', 'opkg-utils-native:do_populate_sysroot package-index:do_package_index', '', d)}"
94TESTIMAGEDEPENDS += "${@bb.utils.contains('IMAGE_PKGTYPE', 'deb', 'apt-native:do_populate_sysroot package-index:do_package_index', '', d)}"
95
96TESTIMAGELOCK = "${TMPDIR}/testimage.lock"
97TESTIMAGELOCK_qemuall = ""
98
99TESTIMAGE_DUMP_DIR ?= "${LOG_DIR}/runtime-hostdump/"
100
101TESTIMAGE_UPDATE_VARS ?= "DL_DIR WORKDIR DEPLOY_DIR"
102
103testimage_dump_target () {
104 top -bn1
105 ps
106 free
107 df
108 # The next command will export the default gateway IP
109 export DEFAULT_GATEWAY=$(ip route | awk '/default/ { print $3}')
110 ping -c3 $DEFAULT_GATEWAY
111 dmesg
112 netstat -an
113 ip address
114 # Next command will dump logs from /var/log/
115 find /var/log/ -type f 2>/dev/null -exec echo "====================" \; -exec echo {} \; -exec echo "====================" \; -exec cat {} \; -exec echo "" \;
116}
117
118testimage_dump_host () {
119 top -bn1
120 iostat -x -z -N -d -p ALL 20 2
121 ps -ef
122 free
123 df
124 memstat
125 dmesg
126 ip -s link
127 netstat -an
128}
129
130python do_testimage() {
131 testimage_main(d)
132}
133
134addtask testimage
135do_testimage[nostamp] = "1"
136do_testimage[depends] += "${TESTIMAGEDEPENDS}"
137do_testimage[lockfiles] += "${TESTIMAGELOCK}"
138
139def testimage_sanity(d):
140 if (d.getVar('TEST_TARGET') == 'simpleremote'
141 and (not d.getVar('TEST_TARGET_IP')
142 or not d.getVar('TEST_SERVER_IP'))):
143 bb.fatal('When TEST_TARGET is set to "simpleremote" '
144 'TEST_TARGET_IP and TEST_SERVER_IP are needed too.')
145
146def get_testimage_configuration(d, test_type, machine):
147 import platform
148 from oeqa.utils.metadata import get_layers
149 configuration = {'TEST_TYPE': test_type,
150 'MACHINE': machine,
151 'DISTRO': d.getVar("DISTRO"),
152 'IMAGE_BASENAME': d.getVar("IMAGE_BASENAME"),
153 'IMAGE_PKGTYPE': d.getVar("IMAGE_PKGTYPE"),
154 'STARTTIME': d.getVar("DATETIME"),
155 'HOST_DISTRO': oe.lsb.distro_identifier().replace(' ', '-'),
156 'LAYERS': get_layers(d.getVar("BBLAYERS"))}
157 return configuration
158get_testimage_configuration[vardepsexclude] = "DATETIME"
159
160def get_testimage_json_result_dir(d):
161 json_result_dir = os.path.join(d.getVar("LOG_DIR"), 'oeqa')
162 custom_json_result_dir = d.getVar("OEQA_JSON_RESULT_DIR")
163 if custom_json_result_dir:
164 json_result_dir = custom_json_result_dir
165 return json_result_dir
166
167def get_testimage_result_id(configuration):
168 return '%s_%s_%s_%s' % (configuration['TEST_TYPE'], configuration['IMAGE_BASENAME'], configuration['MACHINE'], configuration['STARTTIME'])
169
170def get_testimage_boot_patterns(d):
171 from collections import defaultdict
172 boot_patterns = defaultdict(str)
173 # Only accept certain values
174 accepted_patterns = ['search_reached_prompt', 'send_login_user', 'search_login_succeeded', 'search_cmd_finished']
175 # Not all patterns need to be overriden, e.g. perhaps we only want to change the user
176 boot_patterns_flags = d.getVarFlags('TESTIMAGE_BOOT_PATTERNS') or {}
177 if boot_patterns_flags:
178 patterns_set = [p for p in boot_patterns_flags.items() if p[0] in d.getVar('TESTIMAGE_BOOT_PATTERNS').split()]
179 for flag, flagval in patterns_set:
180 if flag not in accepted_patterns:
181 bb.fatal('Testimage: The only accepted boot patterns are: search_reached_prompt,send_login_user, \
182 search_login_succeeded,search_cmd_finished\n Make sure your TESTIMAGE_BOOT_PATTERNS=%s \
183 contains an accepted flag.' % d.getVar('TESTIMAGE_BOOT_PATTERNS'))
184 return
185 # We know boot prompt is searched through in binary format, others might be expressions
186 if flag == 'search_reached_prompt':
187 boot_patterns[flag] = flagval.encode()
188 else:
189 boot_patterns[flag] = flagval.encode().decode('unicode-escape')
190 return boot_patterns
191
192
193def testimage_main(d):
194 import os
195 import json
196 import signal
197 import logging
198
199 from bb.utils import export_proxies
200 from oeqa.core.utils.misc import updateTestData
201 from oeqa.runtime.context import OERuntimeTestContext
202 from oeqa.runtime.context import OERuntimeTestContextExecutor
203 from oeqa.core.target.qemu import supported_fstypes
204 from oeqa.core.utils.test import getSuiteCases
205 from oeqa.utils import make_logger_bitbake_compatible
206
207 def sigterm_exception(signum, stackframe):
208 """
209 Catch SIGTERM from worker in order to stop qemu.
210 """
211 os.kill(os.getpid(), signal.SIGINT)
212
213 def handle_test_timeout(timeout):
214 bb.warn("Global test timeout reached (%s seconds), stopping the tests." %(timeout))
215 os.kill(os.getpid(), signal.SIGINT)
216
217 testimage_sanity(d)
218
219 if (d.getVar('IMAGE_PKGTYPE') == 'rpm'
220 and ('dnf' in d.getVar('TEST_SUITES') or 'auto' in d.getVar('TEST_SUITES'))):
221 create_rpm_index(d)
222
223 logger = make_logger_bitbake_compatible(logging.getLogger("BitBake"))
224 pn = d.getVar("PN")
225
226 bb.utils.mkdirhier(d.getVar("TEST_LOG_DIR"))
227
228 image_name = ("%s/%s" % (d.getVar('DEPLOY_DIR_IMAGE'),
229 d.getVar('IMAGE_LINK_NAME')))
230
231 tdname = "%s.testdata.json" % image_name
232 try:
233 td = json.load(open(tdname, "r"))
234 except (FileNotFoundError) as err:
235 bb.fatal('File %s Not Found. Have you built the image with INHERIT+="testimage" in the conf/local.conf?' % tdname)
236
237 # Some variables need to be updates (mostly paths) with the
238 # ones of the current environment because some tests require them.
239 updateTestData(d, td, d.getVar('TESTIMAGE_UPDATE_VARS').split())
240
241 image_manifest = "%s.manifest" % image_name
242 image_packages = OERuntimeTestContextExecutor.readPackagesManifest(image_manifest)
243
244 extract_dir = d.getVar("TEST_EXTRACTED_DIR")
245
246 # Get machine
247 machine = d.getVar("MACHINE")
248
249 # Get rootfs
250 fstypes = d.getVar('IMAGE_FSTYPES').split()
251 if d.getVar("TEST_TARGET") == "qemu":
252 fstypes = [fs for fs in fstypes if fs in supported_fstypes]
253 if not fstypes:
254 bb.fatal('Unsupported image type built. Add a compatible image to '
255 'IMAGE_FSTYPES. Supported types: %s' %
256 ', '.join(supported_fstypes))
257 qfstype = fstypes[0]
258 qdeffstype = d.getVar("QB_DEFAULT_FSTYPE")
259 if qdeffstype:
260 qfstype = qdeffstype
261 rootfs = '%s.%s' % (image_name, qfstype)
262
263 # Get tmpdir (not really used, just for compatibility)
264 tmpdir = d.getVar("TMPDIR")
265
266 # Get deploy_dir_image (not really used, just for compatibility)
267 dir_image = d.getVar("DEPLOY_DIR_IMAGE")
268
269 # Get bootlog
270 bootlog = os.path.join(d.getVar("TEST_LOG_DIR"),
271 'qemu_boot_log.%s' % d.getVar('DATETIME'))
272
273 # Get display
274 display = d.getVar("BB_ORIGENV").getVar("DISPLAY")
275
276 # Get kernel
277 kernel_name = ('%s-%s.bin' % (d.getVar("KERNEL_IMAGETYPE"), machine))
278 kernel = os.path.join(d.getVar("DEPLOY_DIR_IMAGE"), kernel_name)
279
280 # Get boottime
281 boottime = int(d.getVar("TEST_QEMUBOOT_TIMEOUT"))
282
283 # Get use_kvm
284 kvm = oe.types.qemu_use_kvm(d.getVar('QEMU_USE_KVM'), d.getVar('TARGET_ARCH'))
285
286 # Get OVMF
287 ovmf = d.getVar("QEMU_USE_OVMF")
288
289 slirp = False
290 if d.getVar("QEMU_USE_SLIRP"):
291 slirp = True
292
293 # TODO: We use the current implementation of qemu runner because of
294 # time constrains, qemu runner really needs a refactor too.
295 target_kwargs = { 'machine' : machine,
296 'rootfs' : rootfs,
297 'tmpdir' : tmpdir,
298 'dir_image' : dir_image,
299 'display' : display,
300 'kernel' : kernel,
301 'boottime' : boottime,
302 'bootlog' : bootlog,
303 'kvm' : kvm,
304 'slirp' : slirp,
305 'dump_dir' : d.getVar("TESTIMAGE_DUMP_DIR"),
306 'serial_ports': len(d.getVar("SERIAL_CONSOLES").split()),
307 'ovmf' : ovmf,
308 }
309
310 if d.getVar("TESTIMAGE_BOOT_PATTERNS"):
311 target_kwargs['boot_patterns'] = get_testimage_boot_patterns(d)
312
313 # TODO: Currently BBPATH is needed for custom loading of targets.
314 # It would be better to find these modules using instrospection.
315 target_kwargs['target_modules_path'] = d.getVar('BBPATH')
316
317 # hardware controlled targets might need further access
318 target_kwargs['powercontrol_cmd'] = d.getVar("TEST_POWERCONTROL_CMD") or None
319 target_kwargs['powercontrol_extra_args'] = d.getVar("TEST_POWERCONTROL_EXTRA_ARGS") or ""
320 target_kwargs['serialcontrol_cmd'] = d.getVar("TEST_SERIALCONTROL_CMD") or None
321 target_kwargs['serialcontrol_extra_args'] = d.getVar("TEST_SERIALCONTROL_EXTRA_ARGS") or ""
322 target_kwargs['testimage_dump_target'] = d.getVar("testimage_dump_target") or ""
323
324 def export_ssh_agent(d):
325 import os
326
327 variables = ['SSH_AGENT_PID', 'SSH_AUTH_SOCK']
328 for v in variables:
329 if v not in os.environ.keys():
330 val = d.getVar(v)
331 if val is not None:
332 os.environ[v] = val
333
334 export_ssh_agent(d)
335
336 # runtime use network for download projects for build
337 export_proxies(d)
338
339 # we need the host dumper in test context
340 host_dumper = OERuntimeTestContextExecutor.getHostDumper(
341 d.getVar("testimage_dump_host"),
342 d.getVar("TESTIMAGE_DUMP_DIR"))
343
344 # the robot dance
345 target = OERuntimeTestContextExecutor.getTarget(
346 d.getVar("TEST_TARGET"), logger, d.getVar("TEST_TARGET_IP"),
347 d.getVar("TEST_SERVER_IP"), **target_kwargs)
348
349 # test context
350 tc = OERuntimeTestContext(td, logger, target, host_dumper,
351 image_packages, extract_dir)
352
353 # Load tests before starting the target
354 test_paths = get_runtime_paths(d)
355 test_modules = d.getVar('TEST_SUITES').split()
356 if not test_modules:
357 bb.fatal('Empty test suite, please verify TEST_SUITES variable')
358
359 tc.loadTests(test_paths, modules=test_modules)
360
361 suitecases = getSuiteCases(tc.suites)
362 if not suitecases:
363 bb.fatal('Empty test suite, please verify TEST_SUITES variable')
364 else:
365 bb.debug(2, 'test suites:\n\t%s' % '\n\t'.join([str(c) for c in suitecases]))
366
367 package_extraction(d, tc.suites)
368
369 results = None
370 complete = False
371 orig_sigterm_handler = signal.signal(signal.SIGTERM, sigterm_exception)
372 try:
373 # We need to check if runqemu ends unexpectedly
374 # or if the worker send us a SIGTERM
375 tc.target.start(params=d.getVar("TEST_QEMUPARAMS"), runqemuparams=d.getVar("TEST_RUNQEMUPARAMS"))
376 import threading
377 try:
378 threading.Timer(int(d.getVar("TEST_OVERALL_TIMEOUT")), handle_test_timeout, (int(d.getVar("TEST_OVERALL_TIMEOUT")),)).start()
379 except ValueError:
380 pass
381 results = tc.runTests()
382 complete = True
383 except (KeyboardInterrupt, BlockingIOError) as err:
384 if isinstance(err, KeyboardInterrupt):
385 bb.error('testimage interrupted, shutting down...')
386 else:
387 bb.error('runqemu failed, shutting down...')
388 if results:
389 results.stop()
390 results = tc.results
391 finally:
392 signal.signal(signal.SIGTERM, orig_sigterm_handler)
393 tc.target.stop()
394
395 # Show results (if we have them)
396 if results:
397 configuration = get_testimage_configuration(d, 'runtime', machine)
398 results.logDetails(get_testimage_json_result_dir(d),
399 configuration,
400 get_testimage_result_id(configuration),
401 dump_streams=d.getVar('TESTREPORT_FULLLOGS'))
402 results.logSummary(pn)
403 if not results or not complete:
404 bb.fatal('%s - FAILED - tests were interrupted during execution' % pn, forcelog=True)
405 if not results.wasSuccessful():
406 bb.fatal('%s - FAILED - check the task log and the ssh log' % pn, forcelog=True)
407
408def get_runtime_paths(d):
409 """
410 Returns a list of paths where runtime test must reside.
411
412 Runtime tests are expected in <LAYER_DIR>/lib/oeqa/runtime/cases/
413 """
414 paths = []
415
416 for layer in d.getVar('BBLAYERS').split():
417 path = os.path.join(layer, 'lib/oeqa/runtime/cases')
418 if os.path.isdir(path):
419 paths.append(path)
420 return paths
421
422def create_index(arg):
423 import subprocess
424
425 index_cmd = arg
426 try:
427 bb.note("Executing '%s' ..." % index_cmd)
428 result = subprocess.check_output(index_cmd,
429 stderr=subprocess.STDOUT,
430 shell=True)
431 result = result.decode('utf-8')
432 except subprocess.CalledProcessError as e:
433 return("Index creation command '%s' failed with return code "
434 '%d:\n%s' % (e.cmd, e.returncode, e.output.decode("utf-8")))
435 if result:
436 bb.note(result)
437 return None
438
439def create_rpm_index(d):
440 import glob
441 # Index RPMs
442 rpm_createrepo = bb.utils.which(os.getenv('PATH'), "createrepo_c")
443 index_cmds = []
444 archs = (d.getVar('ALL_MULTILIB_PACKAGE_ARCHS') or '').replace('-', '_')
445
446 for arch in archs.split():
447 rpm_dir = os.path.join(d.getVar('DEPLOY_DIR_RPM'), arch)
448 idx_path = os.path.join(d.getVar('WORKDIR'), 'oe-testimage-repo', arch)
449
450 if not os.path.isdir(rpm_dir):
451 continue
452
453 lockfilename = os.path.join(d.getVar('DEPLOY_DIR_RPM'), 'rpm.lock')
454 lf = bb.utils.lockfile(lockfilename, False)
455 oe.path.copyhardlinktree(rpm_dir, idx_path)
456 # Full indexes overload a 256MB image so reduce the number of rpms
457 # in the feed by filtering to specific packages needed by the tests.
458 package_list = glob.glob(idx_path + "*/*.rpm")
459
460 for pkg in package_list:
461 if not os.path.basename(pkg).startswith(("rpm", "run-postinsts", "busybox", "bash", "update-alternatives", "libc6", "curl", "musl")):
462 bb.utils.remove(pkg)
463
464 bb.utils.unlockfile(lf)
465 cmd = '%s --update -q %s' % (rpm_createrepo, idx_path)
466
467 # Create repodata
468 result = create_index(cmd)
469 if result:
470 bb.fatal('%s' % ('\n'.join(result)))
471
472def package_extraction(d, test_suites):
473 from oeqa.utils.package_manager import find_packages_to_extract
474 from oeqa.utils.package_manager import extract_packages
475
476 bb.utils.remove(d.getVar("TEST_NEEDED_PACKAGES_DIR"), recurse=True)
477 packages = find_packages_to_extract(test_suites)
478 if packages:
479 bb.utils.mkdirhier(d.getVar("TEST_INSTALL_TMP_DIR"))
480 bb.utils.mkdirhier(d.getVar("TEST_PACKAGED_DIR"))
481 bb.utils.mkdirhier(d.getVar("TEST_EXTRACTED_DIR"))
482 extract_packages(d, packages)
483
484testimage_main[vardepsexclude] += "BB_ORIGENV DATETIME"
485
486python () {
487 if oe.types.boolean(d.getVar("TESTIMAGE_AUTO") or "False"):
488 bb.build.addtask("testimage", "do_build", "do_image_complete", d)
489}
490
491inherit testsdk
diff --git a/meta/classes/testsdk.bbclass b/meta/classes/testsdk.bbclass
deleted file mode 100644
index 758a23ac55..0000000000
--- a/meta/classes/testsdk.bbclass
+++ /dev/null
@@ -1,50 +0,0 @@
1# Copyright (C) 2013 - 2016 Intel Corporation
2#
3# Released under the MIT license (see COPYING.MIT)
4
5# testsdk.bbclass enables testing for SDK and Extensible SDK
6#
7# To run SDK tests, run the commands:
8# $ bitbake <image-name> -c populate_sdk
9# $ bitbake <image-name> -c testsdk
10#
11# To run eSDK tests, run the commands:
12# $ bitbake <image-name> -c populate_sdk_ext
13# $ bitbake <image-name> -c testsdkext
14#
15# where "<image-name>" is an image like core-image-sato.
16
17TESTSDK_CLASS_NAME ?= "oeqa.sdk.testsdk.TestSDK"
18TESTSDKEXT_CLASS_NAME ?= "oeqa.sdkext.testsdk.TestSDKExt"
19
20def import_and_run(name, d):
21 import importlib
22
23 class_name = d.getVar(name)
24 if class_name:
25 module, cls = class_name.rsplit('.', 1)
26 m = importlib.import_module(module)
27 c = getattr(m, cls)()
28 c.run(d)
29 else:
30 bb.warn('No tests were run because %s did not define a class' % name)
31
32import_and_run[vardepsexclude] = "DATETIME BB_ORIGENV"
33
34python do_testsdk() {
35 import_and_run('TESTSDK_CLASS_NAME', d)
36}
37addtask testsdk
38do_testsdk[nostamp] = "1"
39
40python do_testsdkext() {
41 import_and_run('TESTSDKEXT_CLASS_NAME', d)
42}
43addtask testsdkext
44do_testsdkext[nostamp] = "1"
45
46python () {
47 if oe.types.boolean(d.getVar("TESTIMAGE_AUTO") or "False"):
48 bb.build.addtask("testsdk", None, "do_populate_sdk", d)
49 bb.build.addtask("testsdkext", None, "do_populate_sdk_ext", d)
50}
diff --git a/meta/classes/texinfo.bbclass b/meta/classes/texinfo.bbclass
deleted file mode 100644
index f46bacabd4..0000000000
--- a/meta/classes/texinfo.bbclass
+++ /dev/null
@@ -1,18 +0,0 @@
1# This class is inherited by recipes whose upstream packages invoke the
2# texinfo utilities at build-time. Native and cross recipes are made to use the
3# dummy scripts provided by texinfo-dummy-native, for improved performance.
4# Target architecture recipes use the genuine Texinfo utilities. By default,
5# they use the Texinfo utilities on the host system. If you want to use the
6# Texinfo recipe, you can remove texinfo-native from ASSUME_PROVIDED and
7# makeinfo from SANITY_REQUIRED_UTILITIES.
8
9TEXDEP = "${@bb.utils.contains('DISTRO_FEATURES', 'api-documentation', 'texinfo-replacement-native', 'texinfo-dummy-native', d)}"
10TEXDEP_class-native = "texinfo-dummy-native"
11TEXDEP_class-cross = "texinfo-dummy-native"
12TEXDEP_class-crosssdk = "texinfo-dummy-native"
13TEXDEP_class-cross-canadian = "texinfo-dummy-native"
14DEPENDS_append = " ${TEXDEP}"
15
16# libtool-cross doesn't inherit cross
17TEXDEP_pn-libtool-cross = "texinfo-dummy-native"
18
diff --git a/meta/classes/toaster.bbclass b/meta/classes/toaster.bbclass
index 9518ddf7a4..03c4f3a930 100644
--- a/meta/classes/toaster.bbclass
+++ b/meta/classes/toaster.bbclass
@@ -3,7 +3,7 @@
3# 3#
4# Copyright (C) 2013 Intel Corporation 4# Copyright (C) 2013 Intel Corporation
5# 5#
6# Released under the MIT license (see COPYING.MIT) 6# SPDX-License-Identifier: MIT
7# 7#
8# This bbclass is designed to extract data used by OE-Core during the build process, 8# This bbclass is designed to extract data used by OE-Core during the build process,
9# for recording in the Toaster system. 9# for recording in the Toaster system.
@@ -101,12 +101,12 @@ def _toaster_load_pkgdatafile(dirpath, filepath):
101 for line in fin: 101 for line in fin:
102 try: 102 try:
103 kn, kv = line.strip().split(": ", 1) 103 kn, kv = line.strip().split(": ", 1)
104 m = re.match(r"^PKG_([^A-Z:]*)", kn) 104 m = re.match(r"^PKG:([^A-Z:]*)", kn)
105 if m: 105 if m:
106 pkgdata['OPKGN'] = m.group(1) 106 pkgdata['OPKGN'] = m.group(1)
107 kn = "_".join([x for x in kn.split("_") if x.isupper()]) 107 kn = kn.split(":")[0]
108 pkgdata[kn] = kv.strip() 108 pkgdata[kn] = kv
109 if kn == 'FILES_INFO': 109 if kn.startswith('FILES_INFO'):
110 pkgdata[kn] = json.loads(kv) 110 pkgdata[kn] = json.loads(kv)
111 111
112 except ValueError: 112 except ValueError:
diff --git a/meta/classes/toolchain-scripts-base.bbclass b/meta/classes/toolchain-scripts-base.bbclass
deleted file mode 100644
index 2489b9dbeb..0000000000
--- a/meta/classes/toolchain-scripts-base.bbclass
+++ /dev/null
@@ -1,11 +0,0 @@
1#This function create a version information file
2toolchain_create_sdk_version () {
3 local versionfile=$1
4 rm -f $versionfile
5 touch $versionfile
6 echo 'Distro: ${DISTRO}' >> $versionfile
7 echo 'Distro Version: ${DISTRO_VERSION}' >> $versionfile
8 echo 'Metadata Revision: ${METADATA_REVISION}' >> $versionfile
9 echo 'Timestamp: ${DATETIME}' >> $versionfile
10}
11toolchain_create_sdk_version[vardepsexclude] = "DATETIME"
diff --git a/meta/classes/toolchain-scripts.bbclass b/meta/classes/toolchain-scripts.bbclass
deleted file mode 100644
index db1d3215ef..0000000000
--- a/meta/classes/toolchain-scripts.bbclass
+++ /dev/null
@@ -1,203 +0,0 @@
1inherit toolchain-scripts-base siteinfo kernel-arch
2
3# We want to be able to change the value of MULTIMACH_TARGET_SYS, because it
4# doesn't always match our expectations... but we default to the stock value
5REAL_MULTIMACH_TARGET_SYS ?= "${MULTIMACH_TARGET_SYS}"
6TARGET_CC_ARCH_append_libc-musl = " -mmusl"
7
8# default debug prefix map isn't valid in the SDK
9DEBUG_PREFIX_MAP = ""
10
11# This function creates an environment-setup-script for use in a deployable SDK
12toolchain_create_sdk_env_script () {
13 # Create environment setup script. Remember that $SDKTARGETSYSROOT should
14 # only be expanded on the target at runtime.
15 base_sbindir=${10:-${base_sbindir_nativesdk}}
16 base_bindir=${9:-${base_bindir_nativesdk}}
17 sbindir=${8:-${sbindir_nativesdk}}
18 sdkpathnative=${7:-${SDKPATHNATIVE}}
19 prefix=${6:-${prefix_nativesdk}}
20 bindir=${5:-${bindir_nativesdk}}
21 libdir=${4:-${libdir}}
22 sysroot=${3:-${SDKTARGETSYSROOT}}
23 multimach_target_sys=${2:-${REAL_MULTIMACH_TARGET_SYS}}
24 script=${1:-${SDK_OUTPUT}/${SDKPATH}/environment-setup-$multimach_target_sys}
25 rm -f $script
26 touch $script
27
28 echo '# Check for LD_LIBRARY_PATH being set, which can break SDK and generally is a bad practice' >> $script
29 echo '# http://tldp.org/HOWTO/Program-Library-HOWTO/shared-libraries.html#AEN80' >> $script
30 echo '# http://xahlee.info/UnixResource_dir/_/ldpath.html' >> $script
31 echo '# Only disable this check if you are absolutely know what you are doing!' >> $script
32 echo 'if [ ! -z "$LD_LIBRARY_PATH" ]; then' >> $script
33 echo " echo \"Your environment is misconfigured, you probably need to 'unset LD_LIBRARY_PATH'\"" >> $script
34 echo " echo \"but please check why this was set in the first place and that it's safe to unset.\"" >> $script
35 echo ' echo "The SDK will not operate correctly in most cases when LD_LIBRARY_PATH is set."' >> $script
36 echo ' echo "For more references see:"' >> $script
37 echo ' echo " http://tldp.org/HOWTO/Program-Library-HOWTO/shared-libraries.html#AEN80"' >> $script
38 echo ' echo " http://xahlee.info/UnixResource_dir/_/ldpath.html"' >> $script
39 echo ' return 1' >> $script
40 echo 'fi' >> $script
41
42 echo 'export SDKTARGETSYSROOT='"$sysroot" >> $script
43 EXTRAPATH=""
44 for i in ${CANADIANEXTRAOS}; do
45 EXTRAPATH="$EXTRAPATH:$sdkpathnative$bindir/${TARGET_ARCH}${TARGET_VENDOR}-$i"
46 done
47 echo "export PATH=$sdkpathnative$bindir:$sdkpathnative$sbindir:$sdkpathnative$base_bindir:$sdkpathnative$base_sbindir:$sdkpathnative$bindir/../${HOST_SYS}/bin:$sdkpathnative$bindir/${TARGET_SYS}"$EXTRAPATH':$PATH' >> $script
48 echo 'export PKG_CONFIG_SYSROOT_DIR=$SDKTARGETSYSROOT' >> $script
49 echo 'export PKG_CONFIG_PATH=$SDKTARGETSYSROOT'"$libdir"'/pkgconfig:$SDKTARGETSYSROOT'"$prefix"'/share/pkgconfig' >> $script
50 echo 'export CONFIG_SITE=${SDKPATH}/site-config-'"${multimach_target_sys}" >> $script
51 echo "export OECORE_NATIVE_SYSROOT=\"$sdkpathnative\"" >> $script
52 echo 'export OECORE_TARGET_SYSROOT="$SDKTARGETSYSROOT"' >> $script
53 echo "export OECORE_ACLOCAL_OPTS=\"-I $sdkpathnative/usr/share/aclocal\"" >> $script
54 echo 'export OECORE_BASELIB="${baselib}"' >> $script
55 echo 'export OECORE_TARGET_ARCH="${TARGET_ARCH}"' >>$script
56 echo 'export OECORE_TARGET_OS="${TARGET_OS}"' >>$script
57
58 echo 'unset command_not_found_handle' >> $script
59
60 toolchain_shared_env_script
61}
62
63# This function creates an environment-setup-script in the TMPDIR which enables
64# a OE-core IDE to integrate with the build tree
65toolchain_create_tree_env_script () {
66 script=${TMPDIR}/environment-setup-${REAL_MULTIMACH_TARGET_SYS}
67 rm -f $script
68 touch $script
69 echo 'orig=`pwd`; cd ${COREBASE}; . ./oe-init-build-env ${TOPDIR}; cd $orig' >> $script
70 echo 'export PATH=${STAGING_DIR_NATIVE}/usr/bin:${STAGING_BINDIR_TOOLCHAIN}:$PATH' >> $script
71 echo 'export PKG_CONFIG_SYSROOT_DIR=${PKG_CONFIG_SYSROOT_DIR}' >> $script
72 echo 'export PKG_CONFIG_PATH=${PKG_CONFIG_PATH}' >> $script
73 echo 'export CONFIG_SITE="${@siteinfo_get_files(d)}"' >> $script
74 echo 'export SDKTARGETSYSROOT=${STAGING_DIR_TARGET}' >> $script
75 echo 'export OECORE_NATIVE_SYSROOT="${STAGING_DIR_NATIVE}"' >> $script
76 echo 'export OECORE_TARGET_SYSROOT="${STAGING_DIR_TARGET}"' >> $script
77 echo 'export OECORE_ACLOCAL_OPTS="-I ${STAGING_DIR_NATIVE}/usr/share/aclocal"' >> $script
78
79 toolchain_shared_env_script
80}
81
82toolchain_shared_env_script () {
83 echo 'export CC="${TARGET_PREFIX}gcc ${TARGET_CC_ARCH} --sysroot=$SDKTARGETSYSROOT"' >> $script
84 echo 'export CXX="${TARGET_PREFIX}g++ ${TARGET_CC_ARCH} --sysroot=$SDKTARGETSYSROOT"' >> $script
85 echo 'export CPP="${TARGET_PREFIX}gcc -E ${TARGET_CC_ARCH} --sysroot=$SDKTARGETSYSROOT"' >> $script
86 echo 'export AS="${TARGET_PREFIX}as ${TARGET_AS_ARCH}"' >> $script
87 echo 'export LD="${TARGET_PREFIX}ld ${TARGET_LD_ARCH} --sysroot=$SDKTARGETSYSROOT"' >> $script
88 echo 'export GDB=${TARGET_PREFIX}gdb' >> $script
89 echo 'export STRIP=${TARGET_PREFIX}strip' >> $script
90 echo 'export RANLIB=${TARGET_PREFIX}ranlib' >> $script
91 echo 'export OBJCOPY=${TARGET_PREFIX}objcopy' >> $script
92 echo 'export OBJDUMP=${TARGET_PREFIX}objdump' >> $script
93 echo 'export READELF=${TARGET_PREFIX}readelf' >> $script
94 echo 'export AR=${TARGET_PREFIX}ar' >> $script
95 echo 'export NM=${TARGET_PREFIX}nm' >> $script
96 echo 'export M4=m4' >> $script
97 echo 'export TARGET_PREFIX=${TARGET_PREFIX}' >> $script
98 echo 'export CONFIGURE_FLAGS="--target=${TARGET_SYS} --host=${TARGET_SYS} --build=${SDK_ARCH}-linux --with-libtool-sysroot=$SDKTARGETSYSROOT"' >> $script
99 echo 'export CFLAGS="${TARGET_CFLAGS}"' >> $script
100 echo 'export CXXFLAGS="${TARGET_CXXFLAGS}"' >> $script
101 echo 'export LDFLAGS="${TARGET_LDFLAGS}"' >> $script
102 echo 'export CPPFLAGS="${TARGET_CPPFLAGS}"' >> $script
103 echo 'export KCFLAGS="--sysroot=$SDKTARGETSYSROOT"' >> $script
104 echo 'export OECORE_DISTRO_VERSION="${DISTRO_VERSION}"' >> $script
105 echo 'export OECORE_SDK_VERSION="${SDK_VERSION}"' >> $script
106 echo 'export ARCH=${ARCH}' >> $script
107 echo 'export CROSS_COMPILE=${TARGET_PREFIX}' >> $script
108
109 cat >> $script <<EOF
110
111# Append environment subscripts
112if [ -d "\$OECORE_TARGET_SYSROOT/environment-setup.d" ]; then
113 for envfile in \$OECORE_TARGET_SYSROOT/environment-setup.d/*.sh; do
114 . \$envfile
115 done
116fi
117if [ -d "\$OECORE_NATIVE_SYSROOT/environment-setup.d" ]; then
118 for envfile in \$OECORE_NATIVE_SYSROOT/environment-setup.d/*.sh; do
119 . \$envfile
120 done
121fi
122EOF
123}
124
125toolchain_create_post_relocate_script() {
126 relocate_script=$1
127 env_dir=$2
128 rm -f $relocate_script
129 touch $relocate_script
130
131 cat >> $relocate_script <<EOF
132if [ -d "${SDKPATHNATIVE}/post-relocate-setup.d/" ]; then
133 # Source top-level SDK env scripts in case they are needed for the relocate
134 # scripts.
135 for env_setup_script in ${env_dir}/environment-setup-*; do
136 . \$env_setup_script
137 status=\$?
138 if [ \$status != 0 ]; then
139 echo "\$0: Failed to source \$env_setup_script with status \$status"
140 exit \$status
141 fi
142
143 for s in ${SDKPATHNATIVE}/post-relocate-setup.d/*; do
144 if [ ! -x \$s ]; then
145 continue
146 fi
147 \$s "\$1"
148 status=\$?
149 if [ \$status != 0 ]; then
150 echo "post-relocate command \"\$s \$1\" failed with status \$status" >&2
151 exit \$status
152 fi
153 done
154 done
155 rm -rf "${SDKPATHNATIVE}/post-relocate-setup.d"
156fi
157EOF
158}
159
160#we get the cached site config in the runtime
161TOOLCHAIN_CONFIGSITE_NOCACHE = "${@siteinfo_get_files(d)}"
162TOOLCHAIN_CONFIGSITE_SYSROOTCACHE = "${STAGING_DIR}/${MLPREFIX}${MACHINE}/${target_datadir}/${TARGET_SYS}_config_site.d"
163TOOLCHAIN_NEED_CONFIGSITE_CACHE ??= "virtual/${MLPREFIX}libc ncurses"
164DEPENDS += "${TOOLCHAIN_NEED_CONFIGSITE_CACHE}"
165
166#This function create a site config file
167toolchain_create_sdk_siteconfig () {
168 local siteconfig=$1
169
170 rm -f $siteconfig
171 touch $siteconfig
172
173 for sitefile in ${TOOLCHAIN_CONFIGSITE_NOCACHE} ; do
174 cat $sitefile >> $siteconfig
175 done
176
177 #get cached site config
178 for sitefile in ${TOOLCHAIN_NEED_CONFIGSITE_CACHE}; do
179 # Resolve virtual/* names to the real recipe name using sysroot-providers info
180 case $sitefile in virtual/*)
181 sitefile=`echo $sitefile | tr / _`
182 sitefile=`cat ${STAGING_DIR_TARGET}/sysroot-providers/$sitefile`
183 esac
184
185 if [ -r ${TOOLCHAIN_CONFIGSITE_SYSROOTCACHE}/${sitefile}_config ]; then
186 cat ${TOOLCHAIN_CONFIGSITE_SYSROOTCACHE}/${sitefile}_config >> $siteconfig
187 fi
188 done
189}
190# The immediate expansion above can result in unwanted path dependencies here
191toolchain_create_sdk_siteconfig[vardepsexclude] = "TOOLCHAIN_CONFIGSITE_SYSROOTCACHE"
192
193python __anonymous () {
194 import oe.classextend
195 deps = ""
196 for dep in (d.getVar('TOOLCHAIN_NEED_CONFIGSITE_CACHE') or "").split():
197 deps += " %s:do_populate_sysroot" % dep
198 for variant in (d.getVar('MULTILIB_VARIANTS') or "").split():
199 clsextend = oe.classextend.ClassExtender(variant, d)
200 newdep = clsextend.extend_name(dep)
201 deps += " %s:do_populate_sysroot" % newdep
202 d.appendVarFlag('do_configure', 'depends', deps)
203}
diff --git a/meta/classes/typecheck.bbclass b/meta/classes/typecheck.bbclass
index 72da932232..160f7a024b 100644
--- a/meta/classes/typecheck.bbclass
+++ b/meta/classes/typecheck.bbclass
@@ -1,3 +1,9 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
1# Check types of bitbake configuration variables 7# Check types of bitbake configuration variables
2# 8#
3# See oe.types for details. 9# See oe.types for details.
diff --git a/meta/classes/uboot-config.bbclass b/meta/classes/uboot-config.bbclass
deleted file mode 100644
index 89ff970fcc..0000000000
--- a/meta/classes/uboot-config.bbclass
+++ /dev/null
@@ -1,57 +0,0 @@
1# Handle U-Boot config for a machine
2#
3# The format to specify it, in the machine, is:
4#
5# UBOOT_CONFIG ??= <default>
6# UBOOT_CONFIG[foo] = "config,images,binary"
7#
8# or
9#
10# UBOOT_MACHINE = "config"
11#
12# Copyright 2013, 2014 (C) O.S. Systems Software LTDA.
13
14UBOOT_BINARY ?= "u-boot.${UBOOT_SUFFIX}"
15
16python () {
17 ubootmachine = d.getVar("UBOOT_MACHINE")
18 ubootconfigflags = d.getVarFlags('UBOOT_CONFIG')
19 ubootbinary = d.getVar('UBOOT_BINARY')
20 ubootbinaries = d.getVar('UBOOT_BINARIES')
21 # The "doc" varflag is special, we don't want to see it here
22 ubootconfigflags.pop('doc', None)
23 ubootconfig = (d.getVar('UBOOT_CONFIG') or "").split()
24
25 if not ubootmachine and not ubootconfig:
26 PN = d.getVar("PN")
27 FILE = os.path.basename(d.getVar("FILE"))
28 bb.debug(1, "To build %s, see %s for instructions on \
29 setting up your machine config" % (PN, FILE))
30 raise bb.parse.SkipRecipe("Either UBOOT_MACHINE or UBOOT_CONFIG must be set in the %s machine configuration." % d.getVar("MACHINE"))
31
32 if ubootmachine and ubootconfig:
33 raise bb.parse.SkipRecipe("You cannot use UBOOT_MACHINE and UBOOT_CONFIG at the same time.")
34
35 if ubootconfigflags and ubootbinaries:
36 raise bb.parse.SkipRecipe("You cannot use UBOOT_BINARIES as it is internal to uboot_config.bbclass.")
37
38 if len(ubootconfig) > 0:
39 for config in ubootconfig:
40 for f, v in ubootconfigflags.items():
41 if config == f:
42 items = v.split(',')
43 if items[0] and len(items) > 3:
44 raise bb.parse.SkipRecipe('Only config,images,binary can be specified!')
45 d.appendVar('UBOOT_MACHINE', ' ' + items[0])
46 # IMAGE_FSTYPES appending
47 if len(items) > 1 and items[1]:
48 bb.debug(1, "Appending '%s' to IMAGE_FSTYPES." % items[1])
49 d.appendVar('IMAGE_FSTYPES', ' ' + items[1])
50 if len(items) > 2 and items[2]:
51 bb.debug(1, "Appending '%s' to UBOOT_BINARIES." % items[2])
52 d.appendVar('UBOOT_BINARIES', ' ' + items[2])
53 else:
54 bb.debug(1, "Appending '%s' to UBOOT_BINARIES." % ubootbinary)
55 d.appendVar('UBOOT_BINARIES', ' ' + ubootbinary)
56 break
57}
diff --git a/meta/classes/uboot-extlinux-config.bbclass b/meta/classes/uboot-extlinux-config.bbclass
deleted file mode 100644
index be285daa01..0000000000
--- a/meta/classes/uboot-extlinux-config.bbclass
+++ /dev/null
@@ -1,158 +0,0 @@
1# uboot-extlinux-config.bbclass
2#
3# This class allow the extlinux.conf generation for U-Boot use. The
4# U-Boot support for it is given to allow the Generic Distribution
5# Configuration specification use by OpenEmbedded-based products.
6#
7# External variables:
8#
9# UBOOT_EXTLINUX_CONSOLE - Set to "console=ttyX" to change kernel boot
10# default console.
11# UBOOT_EXTLINUX_LABELS - A list of targets for the automatic config.
12# UBOOT_EXTLINUX_KERNEL_ARGS - Add additional kernel arguments.
13# UBOOT_EXTLINUX_KERNEL_IMAGE - Kernel image name.
14# UBOOT_EXTLINUX_FDTDIR - Device tree directory.
15# UBOOT_EXTLINUX_FDT - Device tree file.
16# UBOOT_EXTLINUX_INITRD - Indicates a list of filesystem images to
17# concatenate and use as an initrd (optional).
18# UBOOT_EXTLINUX_MENU_DESCRIPTION - Name to use as description.
19# UBOOT_EXTLINUX_ROOT - Root kernel cmdline.
20# UBOOT_EXTLINUX_TIMEOUT - Timeout before DEFAULT selection is made.
21# Measured in 1/10 of a second.
22# UBOOT_EXTLINUX_DEFAULT_LABEL - Target to be selected by default after
23# the timeout period
24#
25# If there's only one label system will boot automatically and menu won't be
26# created. If you want to use more than one labels, e.g linux and alternate,
27# use overrides to set menu description, console and others variables.
28#
29# Ex:
30#
31# UBOOT_EXTLINUX_LABELS ??= "default fallback"
32#
33# UBOOT_EXTLINUX_DEFAULT_LABEL ??= "Linux Default"
34# UBOOT_EXTLINUX_TIMEOUT ??= "30"
35#
36# UBOOT_EXTLINUX_KERNEL_IMAGE_default ??= "../zImage"
37# UBOOT_EXTLINUX_MENU_DESCRIPTION_default ??= "Linux Default"
38#
39# UBOOT_EXTLINUX_KERNEL_IMAGE_fallback ??= "../zImage-fallback"
40# UBOOT_EXTLINUX_MENU_DESCRIPTION_fallback ??= "Linux Fallback"
41#
42# Results:
43#
44# menu title Select the boot mode
45# TIMEOUT 30
46# DEFAULT Linux Default
47# LABEL Linux Default
48# KERNEL ../zImage
49# FDTDIR ../
50# APPEND root=/dev/mmcblk2p2 rootwait rw console=${console}
51# LABEL Linux Fallback
52# KERNEL ../zImage-fallback
53# FDTDIR ../
54# APPEND root=/dev/mmcblk2p2 rootwait rw console=${console}
55#
56# Copyright (C) 2016, O.S. Systems Software LTDA. All Rights Reserved
57# Released under the MIT license (see packages/COPYING)
58#
59# The kernel has an internal default console, which you can override with
60# a console=...some_tty...
61UBOOT_EXTLINUX_CONSOLE ??= "console=${console},${baudrate}"
62UBOOT_EXTLINUX_LABELS ??= "linux"
63UBOOT_EXTLINUX_FDT ??= ""
64UBOOT_EXTLINUX_FDTDIR ??= "../"
65UBOOT_EXTLINUX_KERNEL_IMAGE ??= "../${KERNEL_IMAGETYPE}"
66UBOOT_EXTLINUX_KERNEL_ARGS ??= "rootwait rw"
67UBOOT_EXTLINUX_MENU_DESCRIPTION_linux ??= "${DISTRO_NAME}"
68
69UBOOT_EXTLINUX_CONFIG = "${B}/extlinux.conf"
70
71python do_create_extlinux_config() {
72 if d.getVar("UBOOT_EXTLINUX") != "1":
73 return
74
75 if not d.getVar('WORKDIR'):
76 bb.error("WORKDIR not defined, unable to package")
77
78 labels = d.getVar('UBOOT_EXTLINUX_LABELS')
79 if not labels:
80 bb.fatal("UBOOT_EXTLINUX_LABELS not defined, nothing to do")
81
82 if not labels.strip():
83 bb.fatal("No labels, nothing to do")
84
85 cfile = d.getVar('UBOOT_EXTLINUX_CONFIG')
86 if not cfile:
87 bb.fatal('Unable to read UBOOT_EXTLINUX_CONFIG')
88
89 localdata = bb.data.createCopy(d)
90
91 try:
92 with open(cfile, 'w') as cfgfile:
93 cfgfile.write('# Generic Distro Configuration file generated by OpenEmbedded\n')
94
95 if len(labels.split()) > 1:
96 cfgfile.write('menu title Select the boot mode\n')
97
98 timeout = localdata.getVar('UBOOT_EXTLINUX_TIMEOUT')
99 if timeout:
100 cfgfile.write('TIMEOUT %s\n' % (timeout))
101
102 if len(labels.split()) > 1:
103 default = localdata.getVar('UBOOT_EXTLINUX_DEFAULT_LABEL')
104 if default:
105 cfgfile.write('DEFAULT %s\n' % (default))
106
107 # Need to deconflict the labels with existing overrides
108 label_overrides = labels.split()
109 default_overrides = localdata.getVar('OVERRIDES').split(':')
110 # We're keeping all the existing overrides that aren't used as a label
111 # an override for that label will be added back in while we're processing that label
112 keep_overrides = list(filter(lambda x: x not in label_overrides, default_overrides))
113
114 for label in labels.split():
115
116 localdata.setVar('OVERRIDES', ':'.join(keep_overrides + [label]))
117
118 extlinux_console = localdata.getVar('UBOOT_EXTLINUX_CONSOLE')
119
120 menu_description = localdata.getVar('UBOOT_EXTLINUX_MENU_DESCRIPTION')
121 if not menu_description:
122 menu_description = label
123
124 root = localdata.getVar('UBOOT_EXTLINUX_ROOT')
125 if not root:
126 bb.fatal('UBOOT_EXTLINUX_ROOT not defined')
127
128 kernel_image = localdata.getVar('UBOOT_EXTLINUX_KERNEL_IMAGE')
129 fdtdir = localdata.getVar('UBOOT_EXTLINUX_FDTDIR')
130
131 fdt = localdata.getVar('UBOOT_EXTLINUX_FDT')
132
133 if fdt:
134 cfgfile.write('LABEL %s\n\tKERNEL %s\n\tFDT %s\n' %
135 (menu_description, kernel_image, fdt))
136 elif fdtdir:
137 cfgfile.write('LABEL %s\n\tKERNEL %s\n\tFDTDIR %s\n' %
138 (menu_description, kernel_image, fdtdir))
139 else:
140 cfgfile.write('LABEL %s\n\tKERNEL %s\n' % (menu_description, kernel_image))
141
142 kernel_args = localdata.getVar('UBOOT_EXTLINUX_KERNEL_ARGS')
143
144 initrd = localdata.getVar('UBOOT_EXTLINUX_INITRD')
145 if initrd:
146 cfgfile.write('\tINITRD %s\n'% initrd)
147
148 kernel_args = root + " " + kernel_args
149 cfgfile.write('\tAPPEND %s %s\n' % (kernel_args, extlinux_console))
150
151 except OSError:
152 bb.fatal('Unable to open %s' % (cfile))
153}
154UBOOT_EXTLINUX_VARS = "CONSOLE MENU_DESCRIPTION ROOT KERNEL_IMAGE FDTDIR FDT KERNEL_ARGS INITRD"
155do_create_extlinux_config[vardeps] += "${@' '.join(['UBOOT_EXTLINUX_%s_%s' % (v, l) for v in d.getVar('UBOOT_EXTLINUX_VARS').split() for l in d.getVar('UBOOT_EXTLINUX_LABELS').split()])}"
156do_create_extlinux_config[vardepsexclude] += "OVERRIDES"
157
158addtask create_extlinux_config before do_install do_deploy after do_compile
diff --git a/meta/classes/uboot-sign.bbclass b/meta/classes/uboot-sign.bbclass
deleted file mode 100644
index 713196df41..0000000000
--- a/meta/classes/uboot-sign.bbclass
+++ /dev/null
@@ -1,132 +0,0 @@
1# This file is part of U-Boot verified boot support and is intended to be
2# inherited from u-boot recipe and from kernel-fitimage.bbclass.
3#
4# The signature procedure requires the user to generate an RSA key and
5# certificate in a directory and to define the following variable:
6#
7# UBOOT_SIGN_KEYDIR = "/keys/directory"
8# UBOOT_SIGN_KEYNAME = "dev" # keys name in keydir (eg. "dev.crt", "dev.key")
9# UBOOT_MKIMAGE_DTCOPTS = "-I dts -O dtb -p 2000"
10# UBOOT_SIGN_ENABLE = "1"
11#
12# As verified boot depends on fitImage generation, following is also required:
13#
14# KERNEL_CLASSES ?= " kernel-fitimage "
15# KERNEL_IMAGETYPE ?= "fitImage"
16#
17# The signature support is limited to the use of CONFIG_OF_SEPARATE in U-Boot.
18#
19# The tasks sequence is set as below, using DEPLOY_IMAGE_DIR as common place to
20# treat the device tree blob:
21#
22# * u-boot:do_install_append
23# Install UBOOT_DTB_BINARY to datadir, so that kernel can use it for
24# signing, and kernel will deploy UBOOT_DTB_BINARY after signs it.
25#
26# * virtual/kernel:do_assemble_fitimage
27# Sign the image
28#
29# * u-boot:do_deploy[postfuncs]
30# Deploy files like UBOOT_DTB_IMAGE, UBOOT_DTB_SYMLINK and others.
31#
32# For more details on signature process, please refer to U-Boot documentation.
33
34# Signature activation.
35UBOOT_SIGN_ENABLE ?= "0"
36
37# Default value for deployment filenames.
38UBOOT_DTB_IMAGE ?= "u-boot-${MACHINE}-${PV}-${PR}.dtb"
39UBOOT_DTB_BINARY ?= "u-boot.dtb"
40UBOOT_DTB_SYMLINK ?= "u-boot-${MACHINE}.dtb"
41UBOOT_NODTB_IMAGE ?= "u-boot-nodtb-${MACHINE}-${PV}-${PR}.${UBOOT_SUFFIX}"
42UBOOT_NODTB_BINARY ?= "u-boot-nodtb.${UBOOT_SUFFIX}"
43UBOOT_NODTB_SYMLINK ?= "u-boot-nodtb-${MACHINE}.${UBOOT_SUFFIX}"
44
45# Functions in this bbclass is for u-boot only
46UBOOT_PN = "${@d.getVar('PREFERRED_PROVIDER_u-boot') or 'u-boot'}"
47
48concat_dtb_helper() {
49 if [ -e "${UBOOT_DTB_BINARY}" ]; then
50 ln -sf ${UBOOT_DTB_IMAGE} ${DEPLOYDIR}/${UBOOT_DTB_BINARY}
51 ln -sf ${UBOOT_DTB_IMAGE} ${DEPLOYDIR}/${UBOOT_DTB_SYMLINK}
52 fi
53
54 if [ -f "${UBOOT_NODTB_BINARY}" ]; then
55 install ${UBOOT_NODTB_BINARY} ${DEPLOYDIR}/${UBOOT_NODTB_IMAGE}
56 ln -sf ${UBOOT_NODTB_IMAGE} ${DEPLOYDIR}/${UBOOT_NODTB_SYMLINK}
57 ln -sf ${UBOOT_NODTB_IMAGE} ${DEPLOYDIR}/${UBOOT_NODTB_BINARY}
58 fi
59
60 # Concatenate U-Boot w/o DTB & DTB with public key
61 # (cf. kernel-fitimage.bbclass for more details)
62 deployed_uboot_dtb_binary='${DEPLOY_DIR_IMAGE}/${UBOOT_DTB_IMAGE}'
63 if [ "x${UBOOT_SUFFIX}" = "ximg" -o "x${UBOOT_SUFFIX}" = "xrom" ] && \
64 [ -e "$deployed_uboot_dtb_binary" ]; then
65 oe_runmake EXT_DTB=$deployed_uboot_dtb_binary
66 install ${UBOOT_BINARY} ${DEPLOYDIR}/${UBOOT_IMAGE}
67 elif [ -e "${DEPLOYDIR}/${UBOOT_NODTB_IMAGE}" -a -e "$deployed_uboot_dtb_binary" ]; then
68 cd ${DEPLOYDIR}
69 cat ${UBOOT_NODTB_IMAGE} $deployed_uboot_dtb_binary | tee ${B}/${CONFIG_B_PATH}/${UBOOT_BINARY} > ${UBOOT_IMAGE}
70 else
71 bbwarn "Failure while adding public key to u-boot binary. Verified boot won't be available."
72 fi
73}
74
75concat_dtb() {
76 if [ "${UBOOT_SIGN_ENABLE}" = "1" -a "${PN}" = "${UBOOT_PN}" -a -n "${UBOOT_DTB_BINARY}" ]; then
77 mkdir -p ${DEPLOYDIR}
78 if [ -n "${UBOOT_CONFIG}" ]; then
79 for config in ${UBOOT_MACHINE}; do
80 CONFIG_B_PATH="${config}"
81 cd ${B}/${config}
82 concat_dtb_helper
83 done
84 else
85 CONFIG_B_PATH=""
86 cd ${B}
87 concat_dtb_helper
88 fi
89 fi
90}
91
92# Install UBOOT_DTB_BINARY to datadir, so that kernel can use it for
93# signing, and kernel will deploy UBOOT_DTB_BINARY after signs it.
94install_helper() {
95 if [ -f "${UBOOT_DTB_BINARY}" ]; then
96 install -d ${D}${datadir}
97 # UBOOT_DTB_BINARY is a symlink to UBOOT_DTB_IMAGE, so we
98 # need both of them.
99 install ${UBOOT_DTB_BINARY} ${D}${datadir}/${UBOOT_DTB_IMAGE}
100 ln -sf ${UBOOT_DTB_IMAGE} ${D}${datadir}/${UBOOT_DTB_BINARY}
101 else
102 bbwarn "${UBOOT_DTB_BINARY} not found"
103 fi
104}
105
106do_install_append() {
107 if [ "${UBOOT_SIGN_ENABLE}" = "1" -a "${PN}" = "${UBOOT_PN}" -a -n "${UBOOT_DTB_BINARY}" ]; then
108 if [ -n "${UBOOT_CONFIG}" ]; then
109 for config in ${UBOOT_MACHINE}; do
110 cd ${B}/${config}
111 install_helper
112 done
113 else
114 cd ${B}
115 install_helper
116 fi
117 fi
118}
119
120do_deploy_prepend_pn-${UBOOT_PN}() {
121 if [ "${UBOOT_SIGN_ENABLE}" = "1" -a -n "${UBOOT_DTB_BINARY}" ]; then
122 concat_dtb
123 fi
124}
125
126python () {
127 if d.getVar('UBOOT_SIGN_ENABLE') == '1' and d.getVar('PN') == d.getVar('UBOOT_PN') and d.getVar('UBOOT_DTB_BINARY'):
128 kernel_pn = d.getVar('PREFERRED_PROVIDER_virtual/kernel')
129
130 # Make "bitbake u-boot -cdeploy" deploys the signed u-boot.dtb
131 d.appendVarFlag('do_deploy', 'depends', ' %s:do_deploy' % kernel_pn)
132}
diff --git a/meta/classes/uninative.bbclass b/meta/classes/uninative.bbclass
deleted file mode 100644
index 1e19917a97..0000000000
--- a/meta/classes/uninative.bbclass
+++ /dev/null
@@ -1,171 +0,0 @@
1UNINATIVE_LOADER ?= "${UNINATIVE_STAGING_DIR}-uninative/${BUILD_ARCH}-linux/lib/${@bb.utils.contains('BUILD_ARCH', 'x86_64', 'ld-linux-x86-64.so.2', '', d)}${@bb.utils.contains('BUILD_ARCH', 'i686', 'ld-linux.so.2', '', d)}${@bb.utils.contains('BUILD_ARCH', 'aarch64', 'ld-linux-aarch64.so.1', '', d)}${@bb.utils.contains('BUILD_ARCH', 'ppc64le', 'ld64.so.2', '', d)}"
2UNINATIVE_STAGING_DIR ?= "${STAGING_DIR}"
3
4UNINATIVE_URL ?= "unset"
5UNINATIVE_TARBALL ?= "${BUILD_ARCH}-nativesdk-libc.tar.xz"
6# Example checksums
7#UNINATIVE_CHECKSUM[aarch64] = "dead"
8#UNINATIVE_CHECKSUM[i686] = "dead"
9#UNINATIVE_CHECKSUM[x86_64] = "dead"
10UNINATIVE_DLDIR ?= "${DL_DIR}/uninative/"
11
12# Enabling uninative will change the following variables so they need to go the parsing white list to prevent multiple recipe parsing
13BB_HASHCONFIG_WHITELIST += "NATIVELSBSTRING SSTATEPOSTUNPACKFUNCS BUILD_LDFLAGS"
14
15addhandler uninative_event_fetchloader
16uninative_event_fetchloader[eventmask] = "bb.event.BuildStarted"
17
18addhandler uninative_event_enable
19uninative_event_enable[eventmask] = "bb.event.ConfigParsed"
20
21python uninative_event_fetchloader() {
22 """
23 This event fires on the parent and will try to fetch the tarball if the
24 loader isn't already present.
25 """
26
27 chksum = d.getVarFlag("UNINATIVE_CHECKSUM", d.getVar("BUILD_ARCH"))
28 if not chksum:
29 bb.fatal("Uninative selected but not configured correctly, please set UNINATIVE_CHECKSUM[%s]" % d.getVar("BUILD_ARCH"))
30
31 loader = d.getVar("UNINATIVE_LOADER")
32 loaderchksum = loader + ".chksum"
33 if os.path.exists(loader) and os.path.exists(loaderchksum):
34 with open(loaderchksum, "r") as f:
35 readchksum = f.read().strip()
36 if readchksum == chksum:
37 return
38
39 import subprocess
40 try:
41 # Save and restore cwd as Fetch.download() does a chdir()
42 olddir = os.getcwd()
43
44 tarball = d.getVar("UNINATIVE_TARBALL")
45 tarballdir = os.path.join(d.getVar("UNINATIVE_DLDIR"), chksum)
46 tarballpath = os.path.join(tarballdir, tarball)
47
48 if not os.path.exists(tarballpath + ".done"):
49 bb.utils.mkdirhier(tarballdir)
50 if d.getVar("UNINATIVE_URL") == "unset":
51 bb.fatal("Uninative selected but not configured, please set UNINATIVE_URL")
52
53 localdata = bb.data.createCopy(d)
54 localdata.setVar('FILESPATH', "")
55 localdata.setVar('DL_DIR', tarballdir)
56 # Our games with path manipulation of DL_DIR mean standard PREMIRRORS don't work
57 # and we can't easily put 'chksum' into the url path from a url parameter with
58 # the current fetcher url handling
59 premirrors = bb.fetch2.mirror_from_string(localdata.getVar("PREMIRRORS"))
60 for line in premirrors:
61 try:
62 (find, replace) = line
63 except ValueError:
64 continue
65 if find.startswith("http"):
66 localdata.appendVar("PREMIRRORS", " ${UNINATIVE_URL}${UNINATIVE_TARBALL} %s/uninative/%s/${UNINATIVE_TARBALL}" % (replace, chksum))
67
68 srcuri = d.expand("${UNINATIVE_URL}${UNINATIVE_TARBALL};sha256sum=%s" % chksum)
69 bb.note("Fetching uninative binary shim %s (will check PREMIRRORS first)" % srcuri)
70
71 fetcher = bb.fetch2.Fetch([srcuri], localdata, cache=False)
72 fetcher.download()
73 localpath = fetcher.localpath(srcuri)
74 if localpath != tarballpath and os.path.exists(localpath) and not os.path.exists(tarballpath):
75 # Follow the symlink behavior from the bitbake fetch2.
76 # This will cover the case where an existing symlink is broken
77 # as well as if there are two processes trying to create it
78 # at the same time.
79 if os.path.islink(tarballpath):
80 # Broken symbolic link
81 os.unlink(tarballpath)
82
83 # Deal with two processes trying to make symlink at once
84 try:
85 os.symlink(localpath, tarballpath)
86 except FileExistsError:
87 pass
88
89 # ldd output is "ldd (Ubuntu GLIBC 2.23-0ubuntu10) 2.23", extract last option from first line
90 glibcver = subprocess.check_output(["ldd", "--version"]).decode('utf-8').split('\n')[0].split()[-1]
91 if bb.utils.vercmp_string(d.getVar("UNINATIVE_MAXGLIBCVERSION"), glibcver) < 0:
92 raise RuntimeError("Your host glibc version (%s) is newer than that in uninative (%s). Disabling uninative so that sstate is not corrupted." % (glibcver, d.getVar("UNINATIVE_MAXGLIBCVERSION")))
93
94 cmd = d.expand("\
95mkdir -p ${UNINATIVE_STAGING_DIR}-uninative; \
96cd ${UNINATIVE_STAGING_DIR}-uninative; \
97tar -xJf ${UNINATIVE_DLDIR}/%s/${UNINATIVE_TARBALL}; \
98${UNINATIVE_STAGING_DIR}-uninative/relocate_sdk.py \
99 ${UNINATIVE_STAGING_DIR}-uninative/${BUILD_ARCH}-linux \
100 ${UNINATIVE_LOADER} \
101 ${UNINATIVE_LOADER} \
102 ${UNINATIVE_STAGING_DIR}-uninative/${BUILD_ARCH}-linux/${bindir_native}/patchelf-uninative \
103 ${UNINATIVE_STAGING_DIR}-uninative/${BUILD_ARCH}-linux${base_libdir_native}/libc*.so" % chksum)
104 subprocess.check_output(cmd, shell=True)
105
106 with open(loaderchksum, "w") as f:
107 f.write(chksum)
108
109 enable_uninative(d)
110
111 except RuntimeError as e:
112 bb.warn(str(e))
113 except bb.fetch2.BBFetchException as exc:
114 bb.warn("Disabling uninative as unable to fetch uninative tarball: %s" % str(exc))
115 bb.warn("To build your own uninative loader, please bitbake uninative-tarball and set UNINATIVE_TARBALL appropriately.")
116 except subprocess.CalledProcessError as exc:
117 bb.warn("Disabling uninative as unable to install uninative tarball: %s" % str(exc))
118 bb.warn("To build your own uninative loader, please bitbake uninative-tarball and set UNINATIVE_TARBALL appropriately.")
119 finally:
120 os.chdir(olddir)
121}
122
123python uninative_event_enable() {
124 """
125 This event handler is called in the workers and is responsible for setting
126 up uninative if a loader is found.
127 """
128 enable_uninative(d)
129}
130
131def enable_uninative(d):
132 loader = d.getVar("UNINATIVE_LOADER")
133 if os.path.exists(loader):
134 bb.debug(2, "Enabling uninative")
135 d.setVar("NATIVELSBSTRING", "universal%s" % oe.utils.host_gcc_version(d))
136 d.appendVar("SSTATEPOSTUNPACKFUNCS", " uninative_changeinterp")
137 d.appendVarFlag("SSTATEPOSTUNPACKFUNCS", "vardepvalueexclude", "| uninative_changeinterp")
138 d.appendVar("BUILD_LDFLAGS", " -Wl,--allow-shlib-undefined -Wl,--dynamic-linker=${UNINATIVE_LOADER}")
139 d.appendVarFlag("BUILD_LDFLAGS", "vardepvalueexclude", "| -Wl,--allow-shlib-undefined -Wl,--dynamic-linker=${UNINATIVE_LOADER}")
140 d.appendVarFlag("BUILD_LDFLAGS", "vardepsexclude", "UNINATIVE_LOADER")
141 d.prependVar("PATH", "${STAGING_DIR}-uninative/${BUILD_ARCH}-linux${bindir_native}:")
142
143python uninative_changeinterp () {
144 import subprocess
145 import stat
146 import oe.qa
147
148 if not (bb.data.inherits_class('native', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('cross', d)):
149 return
150
151 sstateinst = d.getVar('SSTATE_INSTDIR')
152 for walkroot, dirs, files in os.walk(sstateinst):
153 for file in files:
154 if file.endswith(".so") or ".so." in file:
155 continue
156 f = os.path.join(walkroot, file)
157 if os.path.islink(f):
158 continue
159 s = os.stat(f)
160 if not ((s[stat.ST_MODE] & stat.S_IXUSR) or (s[stat.ST_MODE] & stat.S_IXGRP) or (s[stat.ST_MODE] & stat.S_IXOTH)):
161 continue
162 elf = oe.qa.ELFFile(f)
163 try:
164 elf.open()
165 except oe.qa.NotELFFileError:
166 continue
167 if not elf.isDynamic():
168 continue
169
170 subprocess.check_output(("patchelf-uninative", "--set-interpreter", d.getVar("UNINATIVE_LOADER"), f), stderr=subprocess.STDOUT)
171}
diff --git a/meta/classes/update-alternatives.bbclass b/meta/classes/update-alternatives.bbclass
deleted file mode 100644
index 8c2b66e7f1..0000000000
--- a/meta/classes/update-alternatives.bbclass
+++ /dev/null
@@ -1,327 +0,0 @@
1# This class is used to help the alternatives system which is useful when
2# multiple sources provide same command. You can use update-alternatives
3# command directly in your recipe, but in most cases this class simplifies
4# that job.
5#
6# To use this class a number of variables should be defined:
7#
8# List all of the alternatives needed by a package:
9# ALTERNATIVE_<pkg> = "name1 name2 name3 ..."
10#
11# i.e. ALTERNATIVE_busybox = "sh sed test bracket"
12#
13# The pathname of the link
14# ALTERNATIVE_LINK_NAME[name] = "target"
15#
16# This is the name of the binary once it's been installed onto the runtime.
17# This name is global to all split packages in this recipe, and should match
18# other recipes with the same functionality.
19# i.e. ALTERNATIVE_LINK_NAME[bracket] = "/usr/bin/["
20#
21# NOTE: If ALTERNATIVE_LINK_NAME is not defined, it defaults to ${bindir}/name
22#
23# The default link to create for all targets
24# ALTERNATIVE_TARGET = "target"
25#
26# This is useful in a multicall binary case
27# i.e. ALTERNATIVE_TARGET = "/bin/busybox"
28#
29# A non-default link to create for a target
30# ALTERNATIVE_TARGET[name] = "target"
31#
32# This is the name of the binary as it's been install by do_install
33# i.e. ALTERNATIVE_TARGET[sh] = "/bin/bash"
34#
35# A package specific link for a target
36# ALTERNATIVE_TARGET_<pkg>[name] = "target"
37#
38# This is useful when a recipe provides multiple alternatives for the
39# same item.
40#
41# NOTE: If ALTERNATIVE_TARGET is not defined, it will inherit the value
42# from ALTERNATIVE_LINK_NAME.
43#
44# NOTE: If the ALTERNATIVE_LINK_NAME and ALTERNATIVE_TARGET are the same,
45# ALTERNATIVE_TARGET will have '.{BPN}' appended to it. If the file
46# referenced has not been renamed, it will also be renamed. (This avoids
47# the need to rename alternative files in the do_install step, but still
48# supports it if necessary for some reason.)
49#
50# The default priority for any alternatives
51# ALTERNATIVE_PRIORITY = "priority"
52#
53# i.e. default is ALTERNATIVE_PRIORITY = "10"
54#
55# The non-default priority for a specific target
56# ALTERNATIVE_PRIORITY[name] = "priority"
57#
58# The package priority for a specific target
59# ALTERNATIVE_PRIORITY_<pkg>[name] = "priority"
60
61ALTERNATIVE_PRIORITY = "10"
62
63# We need special processing for vardeps because it can not work on
64# modified flag values. So we aggregate the flags into a new variable
65# and include that vairable in the set.
66UPDALTVARS = "ALTERNATIVE ALTERNATIVE_LINK_NAME ALTERNATIVE_TARGET ALTERNATIVE_PRIORITY"
67
68PACKAGE_WRITE_DEPS += "virtual/update-alternatives-native"
69
70def gen_updatealternativesvardeps(d):
71 pkgs = (d.getVar("PACKAGES") or "").split()
72 vars = (d.getVar("UPDALTVARS") or "").split()
73
74 # First compute them for non_pkg versions
75 for v in vars:
76 for flag in sorted((d.getVarFlags(v) or {}).keys()):
77 if flag == "doc" or flag == "vardeps" or flag == "vardepsexp":
78 continue
79 d.appendVar('%s_VARDEPS' % (v), ' %s:%s' % (flag, d.getVarFlag(v, flag, False)))
80
81 for p in pkgs:
82 for v in vars:
83 for flag in sorted((d.getVarFlags("%s_%s" % (v,p)) or {}).keys()):
84 if flag == "doc" or flag == "vardeps" or flag == "vardepsexp":
85 continue
86 d.appendVar('%s_VARDEPS_%s' % (v,p), ' %s:%s' % (flag, d.getVarFlag('%s_%s' % (v,p), flag, False)))
87
88def ua_extend_depends(d):
89 if not 'virtual/update-alternatives' in d.getVar('PROVIDES'):
90 d.appendVar('DEPENDS', ' virtual/${MLPREFIX}update-alternatives')
91
92def update_alternatives_enabled(d):
93 # Update Alternatives only works on target packages...
94 if bb.data.inherits_class('native', d) or \
95 bb.data.inherits_class('cross', d) or bb.data.inherits_class('crosssdk', d) or \
96 bb.data.inherits_class('cross-canadian', d):
97 return False
98
99 # Disable when targeting mingw32 (no target support)
100 if d.getVar("TARGET_OS") == "mingw32":
101 return False
102
103 return True
104
105python __anonymous() {
106 if not update_alternatives_enabled(d):
107 return
108
109 # compute special vardeps
110 gen_updatealternativesvardeps(d)
111
112 # extend the depends to include virtual/update-alternatives
113 ua_extend_depends(d)
114}
115
116def gen_updatealternativesvars(d):
117 ret = []
118 pkgs = (d.getVar("PACKAGES") or "").split()
119 vars = (d.getVar("UPDALTVARS") or "").split()
120
121 for v in vars:
122 ret.append(v + "_VARDEPS")
123
124 for p in pkgs:
125 for v in vars:
126 ret.append(v + "_" + p)
127 ret.append(v + "_VARDEPS_" + p)
128 return " ".join(ret)
129
130# Now the new stuff, we use a custom function to generate the right values
131populate_packages[vardeps] += "${UPDALTVARS} ${@gen_updatealternativesvars(d)}"
132
133# We need to do the rename after the image creation step, but before
134# the split and strip steps.. PACKAGE_PREPROCESS_FUNCS is the right
135# place for that.
136PACKAGE_PREPROCESS_FUNCS += "apply_update_alternative_renames"
137python apply_update_alternative_renames () {
138 if not update_alternatives_enabled(d):
139 return
140
141 import re
142
143 def update_files(alt_target, alt_target_rename, pkg, d):
144 f = d.getVar('FILES_' + pkg)
145 if f:
146 f = re.sub(r'(^|\s)%s(\s|$)' % re.escape (alt_target), r'\1%s\2' % alt_target_rename, f)
147 d.setVar('FILES_' + pkg, f)
148
149 # Check for deprecated usage...
150 pn = d.getVar('BPN')
151 if d.getVar('ALTERNATIVE_LINKS') != None:
152 bb.fatal('%s: Use of ALTERNATIVE_LINKS/ALTERNATIVE_PATH/ALTERNATIVE_NAME is no longer supported, please convert to the updated syntax, see update-alternatives.bbclass for more info.' % pn)
153
154 # Do actual update alternatives processing
155 pkgdest = d.getVar('PKGD')
156 for pkg in (d.getVar('PACKAGES') or "").split():
157 # If the src == dest, we know we need to rename the dest by appending ${BPN}
158 link_rename = []
159 for alt_name in (d.getVar('ALTERNATIVE_%s' % pkg) or "").split():
160 alt_link = d.getVarFlag('ALTERNATIVE_LINK_NAME', alt_name)
161 if not alt_link:
162 alt_link = "%s/%s" % (d.getVar('bindir'), alt_name)
163 d.setVarFlag('ALTERNATIVE_LINK_NAME', alt_name, alt_link)
164 if alt_link.startswith(os.path.join(d.getVar('sysconfdir'), 'init.d')):
165 # Managing init scripts does not work (bug #10433), foremost
166 # because of a race with update-rc.d
167 bb.fatal("Using update-alternatives for managing SysV init scripts is not supported")
168
169 alt_target = d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, alt_name) or d.getVarFlag('ALTERNATIVE_TARGET', alt_name)
170 alt_target = alt_target or d.getVar('ALTERNATIVE_TARGET_%s' % pkg) or d.getVar('ALTERNATIVE_TARGET') or alt_link
171 # Sometimes alt_target is specified as relative to the link name.
172 alt_target = os.path.join(os.path.dirname(alt_link), alt_target)
173
174 # If the link and target are the same name, we need to rename the target.
175 if alt_link == alt_target:
176 src = '%s/%s' % (pkgdest, alt_target)
177 alt_target_rename = '%s.%s' % (alt_target, pn)
178 dest = '%s/%s' % (pkgdest, alt_target_rename)
179 if os.path.lexists(dest):
180 bb.note('%s: Already renamed: %s' % (pn, alt_target_rename))
181 elif os.path.lexists(src):
182 if os.path.islink(src):
183 # Delay rename of links
184 link_rename.append((alt_target, alt_target_rename))
185 else:
186 bb.note('%s: Rename %s -> %s' % (pn, alt_target, alt_target_rename))
187 os.rename(src, dest)
188 update_files(alt_target, alt_target_rename, pkg, d)
189 else:
190 bb.warn("%s: alternative target (%s or %s) does not exist, skipping..." % (pn, alt_target, alt_target_rename))
191 continue
192 d.setVarFlag('ALTERNATIVE_TARGET_%s' % pkg, alt_name, alt_target_rename)
193
194 # Process delayed link names
195 # Do these after other renames so we can correct broken links
196 for (alt_target, alt_target_rename) in link_rename:
197 src = '%s/%s' % (pkgdest, alt_target)
198 dest = '%s/%s' % (pkgdest, alt_target_rename)
199 link_target = oe.path.realpath(src, pkgdest, True)
200
201 if os.path.lexists(link_target):
202 # Ok, the link_target exists, we can rename
203 bb.note('%s: Rename (link) %s -> %s' % (pn, alt_target, alt_target_rename))
204 os.rename(src, dest)
205 else:
206 # Try to resolve the broken link to link.${BPN}
207 link_maybe = '%s.%s' % (os.readlink(src), pn)
208 if os.path.lexists(os.path.join(os.path.dirname(src), link_maybe)):
209 # Ok, the renamed link target exists.. create a new link, and remove the original
210 bb.note('%s: Creating new link %s -> %s' % (pn, alt_target_rename, link_maybe))
211 os.symlink(link_maybe, dest)
212 os.unlink(src)
213 else:
214 bb.warn('%s: Unable to resolve dangling symlink: %s' % (pn, alt_target))
215 continue
216 update_files(alt_target, alt_target_rename, pkg, d)
217}
218
219def update_alternatives_alt_targets(d, pkg):
220 """
221 Returns the update-alternatives metadata for a package.
222
223 The returned format is a list of tuples where the tuple contains:
224 alt_name: The binary name
225 alt_link: The path for the binary (Shared by different packages)
226 alt_target: The path for the renamed binary (Unique per package)
227 alt_priority: The priority of the alt_target
228
229 All the alt_targets will be installed into the sysroot. The alt_link is
230 a symlink pointing to the alt_target with the highest priority.
231 """
232
233 pn = d.getVar('BPN')
234 pkgdest = d.getVar('PKGD')
235 updates = list()
236 for alt_name in (d.getVar('ALTERNATIVE_%s' % pkg) or "").split():
237 alt_link = d.getVarFlag('ALTERNATIVE_LINK_NAME', alt_name)
238 alt_target = d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, alt_name) or \
239 d.getVarFlag('ALTERNATIVE_TARGET', alt_name) or \
240 d.getVar('ALTERNATIVE_TARGET_%s' % pkg) or \
241 d.getVar('ALTERNATIVE_TARGET') or \
242 alt_link
243 alt_priority = d.getVarFlag('ALTERNATIVE_PRIORITY_%s' % pkg, alt_name) or \
244 d.getVarFlag('ALTERNATIVE_PRIORITY', alt_name) or \
245 d.getVar('ALTERNATIVE_PRIORITY_%s' % pkg) or \
246 d.getVar('ALTERNATIVE_PRIORITY')
247
248 # This shouldn't trigger, as it should have been resolved earlier!
249 if alt_link == alt_target:
250 bb.note('alt_link == alt_target: %s == %s -- correcting, this should not happen!' % (alt_link, alt_target))
251 alt_target = '%s.%s' % (alt_target, pn)
252
253 if not os.path.lexists('%s/%s' % (pkgdest, alt_target)):
254 bb.warn('%s: NOT adding alternative provide %s: %s does not exist' % (pn, alt_link, alt_target))
255 continue
256
257 alt_target = os.path.normpath(alt_target)
258 updates.append( (alt_name, alt_link, alt_target, alt_priority) )
259
260 return updates
261
262PACKAGESPLITFUNCS_prepend = "populate_packages_updatealternatives "
263
264python populate_packages_updatealternatives () {
265 if not update_alternatives_enabled(d):
266 return
267
268 # Do actual update alternatives processing
269 for pkg in (d.getVar('PACKAGES') or "").split():
270 # Create post install/removal scripts
271 alt_setup_links = ""
272 alt_remove_links = ""
273 updates = update_alternatives_alt_targets(d, pkg)
274 for alt_name, alt_link, alt_target, alt_priority in updates:
275 alt_setup_links += '\tupdate-alternatives --install %s %s %s %s\n' % (alt_link, alt_name, alt_target, alt_priority)
276 alt_remove_links += '\tupdate-alternatives --remove %s %s\n' % (alt_name, alt_target)
277
278 if alt_setup_links:
279 # RDEPENDS setup
280 provider = d.getVar('VIRTUAL-RUNTIME_update-alternatives')
281 if provider:
282 #bb.note('adding runtime requirement for update-alternatives for %s' % pkg)
283 d.appendVar('RDEPENDS_%s' % pkg, ' ' + d.getVar('MLPREFIX', False) + provider)
284
285 bb.note('adding update-alternatives calls to postinst/prerm for %s' % pkg)
286 bb.note('%s' % alt_setup_links)
287 postinst = d.getVar('pkg_postinst_%s' % pkg)
288 if postinst:
289 postinst = alt_setup_links + postinst
290 else:
291 postinst = '#!/bin/sh\n' + alt_setup_links
292 d.setVar('pkg_postinst_%s' % pkg, postinst)
293
294 bb.note('%s' % alt_remove_links)
295 prerm = d.getVar('pkg_prerm_%s' % pkg) or '#!/bin/sh\n'
296 prerm += alt_remove_links
297 d.setVar('pkg_prerm_%s' % pkg, prerm)
298}
299
300python package_do_filedeps_append () {
301 if update_alternatives_enabled(d):
302 apply_update_alternative_provides(d)
303}
304
305def apply_update_alternative_provides(d):
306 pn = d.getVar('BPN')
307 pkgdest = d.getVar('PKGDEST')
308
309 for pkg in d.getVar('PACKAGES').split():
310 for alt_name in (d.getVar('ALTERNATIVE_%s' % pkg) or "").split():
311 alt_link = d.getVarFlag('ALTERNATIVE_LINK_NAME', alt_name)
312 alt_target = d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, alt_name) or d.getVarFlag('ALTERNATIVE_TARGET', alt_name)
313 alt_target = alt_target or d.getVar('ALTERNATIVE_TARGET_%s' % pkg) or d.getVar('ALTERNATIVE_TARGET') or alt_link
314
315 if alt_link == alt_target:
316 bb.warn('%s: alt_link == alt_target: %s == %s' % (pn, alt_link, alt_target))
317 alt_target = '%s.%s' % (alt_target, pn)
318
319 if not os.path.lexists('%s/%s/%s' % (pkgdest, pkg, alt_target)):
320 continue
321
322 # Add file provide
323 trans_target = oe.package.file_translate(alt_target)
324 d.appendVar('FILERPROVIDES_%s_%s' % (trans_target, pkg), " " + alt_link)
325 if not trans_target in (d.getVar('FILERPROVIDESFLIST_%s' % pkg) or ""):
326 d.appendVar('FILERPROVIDESFLIST_%s' % pkg, " " + trans_target)
327
diff --git a/meta/classes/update-rc.d.bbclass b/meta/classes/update-rc.d.bbclass
deleted file mode 100644
index 1366fee653..0000000000
--- a/meta/classes/update-rc.d.bbclass
+++ /dev/null
@@ -1,123 +0,0 @@
1UPDATERCPN ?= "${PN}"
2
3DEPENDS_append_class-target = "${@bb.utils.contains('DISTRO_FEATURES', 'sysvinit', ' update-rc.d initscripts', '', d)}"
4
5UPDATERCD = "update-rc.d"
6UPDATERCD_class-cross = ""
7UPDATERCD_class-native = ""
8UPDATERCD_class-nativesdk = ""
9
10INITSCRIPT_PARAMS ?= "defaults"
11
12INIT_D_DIR = "${sysconfdir}/init.d"
13
14def use_updatercd(d):
15 # If the distro supports both sysvinit and systemd, and the current recipe
16 # supports systemd, only call update-rc.d on rootfs creation or if systemd
17 # is not running. That's because systemctl enable/disable will already call
18 # update-rc.d if it detects initscripts.
19 if bb.utils.contains('DISTRO_FEATURES', 'systemd', True, False, d) and bb.data.inherits_class('systemd', d):
20 return '[ -n "$D" -o ! -d /run/systemd/system ]'
21 return 'true'
22
23PACKAGE_WRITE_DEPS += "update-rc.d-native"
24
25updatercd_postinst() {
26if ${@use_updatercd(d)} && type update-rc.d >/dev/null 2>/dev/null; then
27 if [ -n "$D" ]; then
28 OPT="-r $D"
29 else
30 OPT="-s"
31 fi
32 update-rc.d $OPT ${INITSCRIPT_NAME} ${INITSCRIPT_PARAMS}
33fi
34}
35
36updatercd_prerm() {
37if ${@use_updatercd(d)} && [ -z "$D" -a -x "${INIT_D_DIR}/${INITSCRIPT_NAME}" ]; then
38 ${INIT_D_DIR}/${INITSCRIPT_NAME} stop || :
39fi
40}
41
42updatercd_postrm() {
43if ${@use_updatercd(d)} && type update-rc.d >/dev/null 2>/dev/null; then
44 if [ -n "$D" ]; then
45 OPT="-f -r $D"
46 else
47 OPT="-f"
48 fi
49 update-rc.d $OPT ${INITSCRIPT_NAME} remove
50fi
51}
52
53
54def update_rc_after_parse(d):
55 if d.getVar('INITSCRIPT_PACKAGES', False) == None:
56 if d.getVar('INITSCRIPT_NAME', False) == None:
57 bb.fatal("%s inherits update-rc.d but doesn't set INITSCRIPT_NAME" % d.getVar('FILE', False))
58 if d.getVar('INITSCRIPT_PARAMS', False) == None:
59 bb.fatal("%s inherits update-rc.d but doesn't set INITSCRIPT_PARAMS" % d.getVar('FILE', False))
60
61python __anonymous() {
62 update_rc_after_parse(d)
63}
64
65PACKAGESPLITFUNCS_prepend = "${@bb.utils.contains('DISTRO_FEATURES', 'sysvinit', 'populate_packages_updatercd ', '', d)}"
66PACKAGESPLITFUNCS_remove_class-nativesdk = "populate_packages_updatercd "
67
68populate_packages_updatercd[vardeps] += "updatercd_prerm updatercd_postrm updatercd_postinst"
69populate_packages_updatercd[vardepsexclude] += "OVERRIDES"
70
71python populate_packages_updatercd () {
72 def update_rcd_auto_depend(pkg):
73 import subprocess
74 import os
75 path = d.expand("${D}${INIT_D_DIR}/${INITSCRIPT_NAME}")
76 if not os.path.exists(path):
77 return
78 statement = "grep -q -w '/etc/init.d/functions' %s" % path
79 if subprocess.call(statement, shell=True) == 0:
80 mlprefix = d.getVar('MLPREFIX') or ""
81 d.appendVar('RDEPENDS_' + pkg, ' %sinitd-functions' % (mlprefix))
82
83 def update_rcd_package(pkg):
84 bb.debug(1, 'adding update-rc.d calls to postinst/prerm/postrm for %s' % pkg)
85
86 localdata = bb.data.createCopy(d)
87 overrides = localdata.getVar("OVERRIDES")
88 localdata.setVar("OVERRIDES", "%s:%s" % (pkg, overrides))
89
90 update_rcd_auto_depend(pkg)
91
92 postinst = d.getVar('pkg_postinst_%s' % pkg)
93 if not postinst:
94 postinst = '#!/bin/sh\n'
95 postinst += localdata.getVar('updatercd_postinst')
96 d.setVar('pkg_postinst_%s' % pkg, postinst)
97
98 prerm = d.getVar('pkg_prerm_%s' % pkg)
99 if not prerm:
100 prerm = '#!/bin/sh\n'
101 prerm += localdata.getVar('updatercd_prerm')
102 d.setVar('pkg_prerm_%s' % pkg, prerm)
103
104 postrm = d.getVar('pkg_postrm_%s' % pkg)
105 if not postrm:
106 postrm = '#!/bin/sh\n'
107 postrm += localdata.getVar('updatercd_postrm')
108 d.setVar('pkg_postrm_%s' % pkg, postrm)
109
110 d.appendVar('RRECOMMENDS_' + pkg, " ${MLPREFIX}${UPDATERCD}")
111
112 # Check that this class isn't being inhibited (generally, by
113 # systemd.bbclass) before doing any work.
114 if not d.getVar("INHIBIT_UPDATERCD_BBCLASS"):
115 pkgs = d.getVar('INITSCRIPT_PACKAGES')
116 if pkgs == None:
117 pkgs = d.getVar('UPDATERCPN')
118 packages = (d.getVar('PACKAGES') or "").split()
119 if not pkgs in packages and packages != []:
120 pkgs = packages[0]
121 for pkg in pkgs.split():
122 update_rcd_package(pkg)
123}
diff --git a/meta/classes/upstream-version-is-even.bbclass b/meta/classes/upstream-version-is-even.bbclass
deleted file mode 100644
index 256c752423..0000000000
--- a/meta/classes/upstream-version-is-even.bbclass
+++ /dev/null
@@ -1,5 +0,0 @@
1# This class ensures that the upstream version check only
2# accepts even minor versions (i.e. 3.0.x, 3.2.x, 3.4.x, etc.)
3# This scheme is used by Gnome and a number of other projects
4# to signify stable releases vs development releases.
5UPSTREAM_CHECK_REGEX = "[^\d\.](?P<pver>\d+\.(\d*[02468])+(\.\d+)+)\.tar"
diff --git a/meta/classes/useradd-staticids.bbclass b/meta/classes/useradd-staticids.bbclass
index 3a1b5f1320..1dbcba2bf1 100644
--- a/meta/classes/useradd-staticids.bbclass
+++ b/meta/classes/useradd-staticids.bbclass
@@ -1,3 +1,9 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
1# In order to support a deterministic set of 'dynamic' users/groups, 7# In order to support a deterministic set of 'dynamic' users/groups,
2# we need a function to reformat the params based on a static file 8# we need a function to reformat the params based on a static file
3def update_useradd_static_config(d): 9def update_useradd_static_config(d):
@@ -41,7 +47,7 @@ def update_useradd_static_config(d):
41 def handle_missing_id(id, type, pkg, files, var, value): 47 def handle_missing_id(id, type, pkg, files, var, value):
42 # For backwards compatibility we accept "1" in addition to "error" 48 # For backwards compatibility we accept "1" in addition to "error"
43 error_dynamic = d.getVar('USERADD_ERROR_DYNAMIC') 49 error_dynamic = d.getVar('USERADD_ERROR_DYNAMIC')
44 msg = "%s - %s: %sname %s does not have a static ID defined." % (d.getVar('PN'), pkg, type, id) 50 msg = 'Recipe %s, package %s: %sname "%s" does not have a static ID defined.' % (d.getVar('PN'), pkg, type, id)
45 if files: 51 if files:
46 msg += " Add %s to one of these files: %s" % (id, files) 52 msg += " Add %s to one of these files: %s" % (id, files)
47 else: 53 else:
@@ -77,7 +83,7 @@ def update_useradd_static_config(d):
77 try: 83 try:
78 uaargs = parser.parse_args(oe.useradd.split_args(param)) 84 uaargs = parser.parse_args(oe.useradd.split_args(param))
79 except Exception as e: 85 except Exception as e:
80 bb.fatal("%s: Unable to parse arguments for USERADD_PARAM_%s '%s': %s" % (d.getVar('PN'), pkg, param, e)) 86 bb.fatal("%s: Unable to parse arguments for USERADD_PARAM:%s '%s': %s" % (d.getVar('PN'), pkg, param, e))
81 87
82 # Read all passwd files specified in USERADD_UID_TABLES or files/passwd 88 # Read all passwd files specified in USERADD_UID_TABLES or files/passwd
83 # Use the standard passwd layout: 89 # Use the standard passwd layout:
@@ -140,13 +146,13 @@ def update_useradd_static_config(d):
140 uaargs.gid = uaargs.groupid 146 uaargs.gid = uaargs.groupid
141 uaargs.user_group = None 147 uaargs.user_group = None
142 if newgroup and is_pkg: 148 if newgroup and is_pkg:
143 groupadd = d.getVar("GROUPADD_PARAM_%s" % pkg) 149 groupadd = d.getVar("GROUPADD_PARAM:%s" % pkg)
144 if groupadd: 150 if groupadd:
145 # Only add the group if not already specified 151 # Only add the group if not already specified
146 if not uaargs.groupname in groupadd: 152 if not uaargs.groupname in groupadd:
147 d.setVar("GROUPADD_PARAM_%s" % pkg, "%s; %s" % (groupadd, newgroup)) 153 d.setVar("GROUPADD_PARAM:%s" % pkg, "%s; %s" % (groupadd, newgroup))
148 else: 154 else:
149 d.setVar("GROUPADD_PARAM_%s" % pkg, newgroup) 155 d.setVar("GROUPADD_PARAM:%s" % pkg, newgroup)
150 156
151 uaargs.comment = "'%s'" % field[4] if field[4] else uaargs.comment 157 uaargs.comment = "'%s'" % field[4] if field[4] else uaargs.comment
152 uaargs.home_dir = field[5] or uaargs.home_dir 158 uaargs.home_dir = field[5] or uaargs.home_dir
@@ -174,8 +180,6 @@ def update_useradd_static_config(d):
174 newparam += ['', ' --non-unique'][uaargs.non_unique] 180 newparam += ['', ' --non-unique'][uaargs.non_unique]
175 if uaargs.password != None: 181 if uaargs.password != None:
176 newparam += ['', ' --password %s' % uaargs.password][uaargs.password != None] 182 newparam += ['', ' --password %s' % uaargs.password][uaargs.password != None]
177 elif uaargs.clear_password:
178 newparam += ['', ' --clear-password %s' % uaargs.clear_password][uaargs.clear_password != None]
179 newparam += ['', ' --root %s' % uaargs.root][uaargs.root != None] 183 newparam += ['', ' --root %s' % uaargs.root][uaargs.root != None]
180 newparam += ['', ' --system'][uaargs.system] 184 newparam += ['', ' --system'][uaargs.system]
181 newparam += ['', ' --shell %s' % uaargs.shell][uaargs.shell != None] 185 newparam += ['', ' --shell %s' % uaargs.shell][uaargs.shell != None]
@@ -198,7 +202,7 @@ def update_useradd_static_config(d):
198 # If we're processing multiple lines, we could have left over values here... 202 # If we're processing multiple lines, we could have left over values here...
199 gaargs = parser.parse_args(oe.useradd.split_args(param)) 203 gaargs = parser.parse_args(oe.useradd.split_args(param))
200 except Exception as e: 204 except Exception as e:
201 bb.fatal("%s: Unable to parse arguments for GROUPADD_PARAM_%s '%s': %s" % (d.getVar('PN'), pkg, param, e)) 205 bb.fatal("%s: Unable to parse arguments for GROUPADD_PARAM:%s '%s': %s" % (d.getVar('PN'), pkg, param, e))
202 206
203 # Read all group files specified in USERADD_GID_TABLES or files/group 207 # Read all group files specified in USERADD_GID_TABLES or files/group
204 # Use the standard group layout: 208 # Use the standard group layout:
@@ -236,8 +240,6 @@ def update_useradd_static_config(d):
236 newparam += ['', ' --non-unique'][gaargs.non_unique] 240 newparam += ['', ' --non-unique'][gaargs.non_unique]
237 if gaargs.password != None: 241 if gaargs.password != None:
238 newparam += ['', ' --password %s' % gaargs.password][gaargs.password != None] 242 newparam += ['', ' --password %s' % gaargs.password][gaargs.password != None]
239 elif gaargs.clear_password:
240 newparam += ['', ' --clear-password %s' % gaargs.clear_password][gaargs.clear_password != None]
241 newparam += ['', ' --root %s' % gaargs.root][gaargs.root != None] 243 newparam += ['', ' --root %s' % gaargs.root][gaargs.root != None]
242 newparam += ['', ' --system'][gaargs.system] 244 newparam += ['', ' --system'][gaargs.system]
243 newparam += ' %s' % gaargs.GROUP 245 newparam += ' %s' % gaargs.GROUP
@@ -265,17 +267,17 @@ def update_useradd_static_config(d):
265 for pkg in useradd_packages.split(): 267 for pkg in useradd_packages.split():
266 # Groupmems doesn't have anything we might want to change, so simply validating 268 # Groupmems doesn't have anything we might want to change, so simply validating
267 # is a bit of a waste -- only process useradd/groupadd 269 # is a bit of a waste -- only process useradd/groupadd
268 useradd_param = d.getVar('USERADD_PARAM_%s' % pkg) 270 useradd_param = d.getVar('USERADD_PARAM:%s' % pkg)
269 if useradd_param: 271 if useradd_param:
270 #bb.warn("Before: 'USERADD_PARAM_%s' - '%s'" % (pkg, useradd_param)) 272 #bb.warn("Before: 'USERADD_PARAM:%s' - '%s'" % (pkg, useradd_param))
271 d.setVar('USERADD_PARAM_%s' % pkg, rewrite_useradd(useradd_param, True)) 273 d.setVar('USERADD_PARAM:%s' % pkg, rewrite_useradd(useradd_param, True))
272 #bb.warn("After: 'USERADD_PARAM_%s' - '%s'" % (pkg, d.getVar('USERADD_PARAM_%s' % pkg))) 274 #bb.warn("After: 'USERADD_PARAM:%s' - '%s'" % (pkg, d.getVar('USERADD_PARAM:%s' % pkg)))
273 275
274 groupadd_param = d.getVar('GROUPADD_PARAM_%s' % pkg) 276 groupadd_param = d.getVar('GROUPADD_PARAM:%s' % pkg)
275 if groupadd_param: 277 if groupadd_param:
276 #bb.warn("Before: 'GROUPADD_PARAM_%s' - '%s'" % (pkg, groupadd_param)) 278 #bb.warn("Before: 'GROUPADD_PARAM:%s' - '%s'" % (pkg, groupadd_param))
277 d.setVar('GROUPADD_PARAM_%s' % pkg, rewrite_groupadd(groupadd_param, True)) 279 d.setVar('GROUPADD_PARAM:%s' % pkg, rewrite_groupadd(groupadd_param, True))
278 #bb.warn("After: 'GROUPADD_PARAM_%s' - '%s'" % (pkg, d.getVar('GROUPADD_PARAM_%s' % pkg))) 280 #bb.warn("After: 'GROUPADD_PARAM:%s' - '%s'" % (pkg, d.getVar('GROUPADD_PARAM:%s' % pkg)))
279 281
280 # Load and process extra users and groups, rewriting only adduser/addgroup params 282 # Load and process extra users and groups, rewriting only adduser/addgroup params
281 pkg = d.getVar('PN') 283 pkg = d.getVar('PN')
diff --git a/meta/classes/useradd.bbclass b/meta/classes/useradd.bbclass
index e5f3ba24f9..16a65ac323 100644
--- a/meta/classes/useradd.bbclass
+++ b/meta/classes/useradd.bbclass
@@ -1,9 +1,15 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
1inherit useradd_base 7inherit useradd_base
2 8
3# base-passwd-cross provides the default passwd and group files in the 9# base-passwd-cross provides the default passwd and group files in the
4# target sysroot, and shadow -native and -sysroot provide the utilities 10# target sysroot, and shadow -native and -sysroot provide the utilities
5# and support files needed to add and modify user and group accounts 11# and support files needed to add and modify user and group accounts
6DEPENDS_append_class-target = " base-files shadow-native shadow-sysroot shadow base-passwd" 12DEPENDS:append:class-target = " base-files shadow-native shadow-sysroot shadow base-passwd"
7PACKAGE_WRITE_DEPS += "shadow-native" 13PACKAGE_WRITE_DEPS += "shadow-native"
8 14
9# This preinstall function can be run in four different contexts: 15# This preinstall function can be run in four different contexts:
@@ -97,6 +103,18 @@ fi
97} 103}
98 104
99useradd_sysroot () { 105useradd_sysroot () {
106 user_group_groupmems_add_sysroot user
107}
108
109groupadd_sysroot () {
110 user_group_groupmems_add_sysroot group
111}
112
113groupmemsadd_sysroot () {
114 user_group_groupmems_add_sysroot groupmems
115}
116
117user_group_groupmems_add_sysroot () {
100 # Pseudo may (do_prepare_recipe_sysroot) or may not (do_populate_sysroot_setscene) be running 118 # Pseudo may (do_prepare_recipe_sysroot) or may not (do_populate_sysroot_setscene) be running
101 # at this point so we're explicit about the environment so pseudo can load if 119 # at this point so we're explicit about the environment so pseudo can load if
102 # not already present. 120 # not already present.
@@ -125,9 +143,15 @@ useradd_sysroot () {
125 fi 143 fi
126 144
127 # Add groups and users defined for all recipe packages 145 # Add groups and users defined for all recipe packages
128 GROUPADD_PARAM="${@get_all_cmd_params(d, 'groupadd')}" 146 if test "$1" = "group"; then
129 USERADD_PARAM="${@get_all_cmd_params(d, 'useradd')}" 147 GROUPADD_PARAM="${@get_all_cmd_params(d, 'groupadd')}"
130 GROUPMEMS_PARAM="${@get_all_cmd_params(d, 'groupmems')}" 148 elif test "$1" = "user"; then
149 USERADD_PARAM="${@get_all_cmd_params(d, 'useradd')}"
150 elif test "$1" = "groupmems"; then
151 GROUPMEMS_PARAM="${@get_all_cmd_params(d, 'groupmems')}"
152 elif test "x$1" = "x"; then
153 bbwarn "missing type of passwd db action"
154 fi
131 155
132 # Tell the system to use the environment vars 156 # Tell the system to use the environment vars
133 UA_SYSROOT=1 157 UA_SYSROOT=1
@@ -142,38 +166,45 @@ useradd_sysroot () {
142EXTRA_STAGING_FIXMES += "PSEUDO_SYSROOT PSEUDO_LOCALSTATEDIR LOGFIFO" 166EXTRA_STAGING_FIXMES += "PSEUDO_SYSROOT PSEUDO_LOCALSTATEDIR LOGFIFO"
143 167
144python useradd_sysroot_sstate () { 168python useradd_sysroot_sstate () {
145 scriptfile = None 169 for type, sort_prefix in [("group", "01"), ("user", "02"), ("groupmems", "03")]:
146 task = d.getVar("BB_CURRENTTASK") 170 scriptfile = None
147 if task == "package_setscene": 171 task = d.getVar("BB_CURRENTTASK")
148 bb.build.exec_func("useradd_sysroot", d) 172 if task == "package_setscene":
149 elif task == "prepare_recipe_sysroot": 173 bb.build.exec_func(type + "add_sysroot", d)
150 # Used to update this recipe's own sysroot so the user/groups are available to do_install 174 elif task == "prepare_recipe_sysroot":
151 scriptfile = d.expand("${RECIPE_SYSROOT}${bindir}/postinst-useradd-${PN}") 175 # Used to update this recipe's own sysroot so the user/groups are available to do_install
152 bb.build.exec_func("useradd_sysroot", d) 176
153 elif task == "populate_sysroot": 177 # If do_populate_sysroot is triggered and we write the file here, there would be an overlapping
154 # Used when installed in dependent task sysroots 178 # files. See usergrouptests.UserGroupTests.test_add_task_between_p_sysroot_and_package
155 scriptfile = d.expand("${SYSROOT_DESTDIR}${bindir}/postinst-useradd-${PN}") 179 scriptfile = d.expand("${RECIPE_SYSROOT}${bindir}/postinst-useradd-" + sort_prefix + type + "-${PN}-recipedebug")
156 180
157 if scriptfile: 181 bb.build.exec_func(type + "add_sysroot", d)
158 bb.utils.mkdirhier(os.path.dirname(scriptfile)) 182 elif task == "populate_sysroot":
159 with open(scriptfile, 'w') as script: 183 # Used when installed in dependent task sysroots
160 script.write("#!/bin/sh\n") 184 scriptfile = d.expand("${SYSROOT_DESTDIR}${bindir}/postinst-useradd-" + sort_prefix + type + "-${PN}")
161 bb.data.emit_func("useradd_sysroot", script, d) 185
162 script.write("useradd_sysroot\n") 186 if scriptfile:
163 os.chmod(scriptfile, 0o755) 187 bb.utils.mkdirhier(os.path.dirname(scriptfile))
188 with open(scriptfile, 'w') as script:
189 script.write("#!/bin/sh -e\n")
190 bb.data.emit_func(type + "add_sysroot", script, d)
191 script.write(type + "add_sysroot\n")
192 os.chmod(scriptfile, 0o755)
164} 193}
165 194
166do_prepare_recipe_sysroot[postfuncs] += "${SYSROOTFUNC}" 195do_prepare_recipe_sysroot[postfuncs] += "${SYSROOTFUNC}"
167SYSROOTFUNC_class-target = "useradd_sysroot_sstate" 196SYSROOTFUNC:class-target = "useradd_sysroot_sstate"
168SYSROOTFUNC = "" 197SYSROOTFUNC = ""
169 198
170SYSROOT_PREPROCESS_FUNCS += "${SYSROOTFUNC}" 199SYSROOT_PREPROCESS_FUNCS += "${SYSROOTFUNC}"
171 200
172SSTATEPREINSTFUNCS_append_class-target = " useradd_sysroot_sstate" 201SSTATEPREINSTFUNCS:append:class-target = " useradd_sysroot_sstate"
173 202
203USERADD_DEPENDS ??= ""
204DEPENDS += "${USERADD_DEPENDS}"
174do_package_setscene[depends] += "${USERADDSETSCENEDEPS}" 205do_package_setscene[depends] += "${USERADDSETSCENEDEPS}"
175do_populate_sysroot_setscene[depends] += "${USERADDSETSCENEDEPS}" 206do_populate_sysroot_setscene[depends] += "${USERADDSETSCENEDEPS}"
176USERADDSETSCENEDEPS_class-target = "${MLPREFIX}base-passwd:do_populate_sysroot_setscene pseudo-native:do_populate_sysroot_setscene shadow-native:do_populate_sysroot_setscene ${MLPREFIX}shadow-sysroot:do_populate_sysroot_setscene" 207USERADDSETSCENEDEPS:class-target = "${MLPREFIX}base-passwd:do_populate_sysroot_setscene pseudo-native:do_populate_sysroot_setscene shadow-native:do_populate_sysroot_setscene ${MLPREFIX}shadow-sysroot:do_populate_sysroot_setscene ${@' '.join(['%s:do_populate_sysroot_setscene' % pkg for pkg in d.getVar("USERADD_DEPENDS").split()])}"
177USERADDSETSCENEDEPS = "" 208USERADDSETSCENEDEPS = ""
178 209
179# Recipe parse-time sanity checks 210# Recipe parse-time sanity checks
@@ -184,8 +215,8 @@ def update_useradd_after_parse(d):
184 bb.fatal("%s inherits useradd but doesn't set USERADD_PACKAGES" % d.getVar('FILE', False)) 215 bb.fatal("%s inherits useradd but doesn't set USERADD_PACKAGES" % d.getVar('FILE', False))
185 216
186 for pkg in useradd_packages.split(): 217 for pkg in useradd_packages.split():
187 d.appendVarFlag("do_populate_sysroot", "vardeps", "USERADD_PARAM_%s GROUPADD_PARAM_%s GROUPMEMS_PARAM_%s" % (pkg, pkg, pkg)) 218 d.appendVarFlag("do_populate_sysroot", "vardeps", " USERADD_PARAM:%s GROUPADD_PARAM:%s GROUPMEMS_PARAM:%s" % (pkg, pkg, pkg))
188 if not d.getVar('USERADD_PARAM_%s' % pkg) and not d.getVar('GROUPADD_PARAM_%s' % pkg) and not d.getVar('GROUPMEMS_PARAM_%s' % pkg): 219 if not d.getVar('USERADD_PARAM:%s' % pkg) and not d.getVar('GROUPADD_PARAM:%s' % pkg) and not d.getVar('GROUPMEMS_PARAM:%s' % pkg):
189 bb.fatal("%s inherits useradd but doesn't set USERADD_PARAM, GROUPADD_PARAM or GROUPMEMS_PARAM for package %s" % (d.getVar('FILE', False), pkg)) 220 bb.fatal("%s inherits useradd but doesn't set USERADD_PARAM, GROUPADD_PARAM or GROUPMEMS_PARAM for package %s" % (d.getVar('FILE', False), pkg))
190 221
191python __anonymous() { 222python __anonymous() {
@@ -199,7 +230,7 @@ python __anonymous() {
199def get_all_cmd_params(d, cmd_type): 230def get_all_cmd_params(d, cmd_type):
200 import string 231 import string
201 232
202 param_type = cmd_type.upper() + "_PARAM_%s" 233 param_type = cmd_type.upper() + "_PARAM:%s"
203 params = [] 234 params = []
204 235
205 useradd_packages = d.getVar('USERADD_PACKAGES') or "" 236 useradd_packages = d.getVar('USERADD_PACKAGES') or ""
@@ -211,7 +242,7 @@ def get_all_cmd_params(d, cmd_type):
211 return "; ".join(params) 242 return "; ".join(params)
212 243
213# Adds the preinst script into generated packages 244# Adds the preinst script into generated packages
214fakeroot python populate_packages_prepend () { 245fakeroot python populate_packages:prepend () {
215 def update_useradd_package(pkg): 246 def update_useradd_package(pkg):
216 bb.debug(1, 'adding user/group calls to preinst for %s' % pkg) 247 bb.debug(1, 'adding user/group calls to preinst for %s' % pkg)
217 248
@@ -220,7 +251,7 @@ fakeroot python populate_packages_prepend () {
220 required to execute on the target. Not doing so may cause 251 required to execute on the target. Not doing so may cause
221 useradd preinst to be invoked twice, causing unwanted warnings. 252 useradd preinst to be invoked twice, causing unwanted warnings.
222 """ 253 """
223 preinst = d.getVar('pkg_preinst_%s' % pkg) or d.getVar('pkg_preinst') 254 preinst = d.getVar('pkg_preinst:%s' % pkg) or d.getVar('pkg_preinst')
224 if not preinst: 255 if not preinst:
225 preinst = '#!/bin/sh\n' 256 preinst = '#!/bin/sh\n'
226 preinst += 'bbnote () {\n\techo "NOTE: $*"\n}\n' 257 preinst += 'bbnote () {\n\techo "NOTE: $*"\n}\n'
@@ -230,15 +261,19 @@ fakeroot python populate_packages_prepend () {
230 preinst += 'perform_useradd () {\n%s}\n' % d.getVar('perform_useradd') 261 preinst += 'perform_useradd () {\n%s}\n' % d.getVar('perform_useradd')
231 preinst += 'perform_groupmems () {\n%s}\n' % d.getVar('perform_groupmems') 262 preinst += 'perform_groupmems () {\n%s}\n' % d.getVar('perform_groupmems')
232 preinst += d.getVar('useradd_preinst') 263 preinst += d.getVar('useradd_preinst')
233 d.setVar('pkg_preinst_%s' % pkg, preinst) 264 # Expand out the *_PARAM variables to the package specific versions
265 for rep in ["GROUPADD_PARAM", "USERADD_PARAM", "GROUPMEMS_PARAM"]:
266 val = d.getVar(rep + ":" + pkg) or ""
267 preinst = preinst.replace("${" + rep + "}", val)
268 d.setVar('pkg_preinst:%s' % pkg, preinst)
234 269
235 # RDEPENDS setup 270 # RDEPENDS setup
236 rdepends = d.getVar("RDEPENDS_%s" % pkg) or "" 271 rdepends = d.getVar("RDEPENDS:%s" % pkg) or ""
237 rdepends += ' ' + d.getVar('MLPREFIX', False) + 'base-passwd' 272 rdepends += ' ' + d.getVar('MLPREFIX', False) + 'base-passwd'
238 rdepends += ' ' + d.getVar('MLPREFIX', False) + 'shadow' 273 rdepends += ' ' + d.getVar('MLPREFIX', False) + 'shadow'
239 # base-files is where the default /etc/skel is packaged 274 # base-files is where the default /etc/skel is packaged
240 rdepends += ' ' + d.getVar('MLPREFIX', False) + 'base-files' 275 rdepends += ' ' + d.getVar('MLPREFIX', False) + 'base-files'
241 d.setVar("RDEPENDS_%s" % pkg, rdepends) 276 d.setVar("RDEPENDS:%s" % pkg, rdepends)
242 277
243 # Add the user/group preinstall scripts and RDEPENDS requirements 278 # Add the user/group preinstall scripts and RDEPENDS requirements
244 # to packages specified by USERADD_PACKAGES 279 # to packages specified by USERADD_PACKAGES
@@ -252,4 +287,4 @@ fakeroot python populate_packages_prepend () {
252# Use the following to extend the useradd with custom functions 287# Use the following to extend the useradd with custom functions
253USERADDEXTENSION ?= "" 288USERADDEXTENSION ?= ""
254 289
255inherit ${USERADDEXTENSION} 290inherit_defer ${USERADDEXTENSION}
diff --git a/meta/classes/useradd_base.bbclass b/meta/classes/useradd_base.bbclass
index 7f5b9b7219..5e1c699118 100644
--- a/meta/classes/useradd_base.bbclass
+++ b/meta/classes/useradd_base.bbclass
@@ -1,3 +1,9 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
1# This bbclass provides basic functionality for user/group settings. 7# This bbclass provides basic functionality for user/group settings.
2# This bbclass is intended to be inherited by useradd.bbclass and 8# This bbclass is intended to be inherited by useradd.bbclass and
3# extrausers.bbclass. 9# extrausers.bbclass.
@@ -154,7 +160,7 @@ perform_passwd_expire () {
154 local username=`echo "$opts" | awk '{ print $NF }'` 160 local username=`echo "$opts" | awk '{ print $NF }'`
155 local user_exists="`grep "^$username:" $rootdir/etc/passwd || true`" 161 local user_exists="`grep "^$username:" $rootdir/etc/passwd || true`"
156 if test "x$user_exists" != "x"; then 162 if test "x$user_exists" != "x"; then
157 eval flock -x $rootdir${sysconfdir} -c \"$PSEUDO sed -i \''s/^\('$username':[^:]*\):[^:]*:/\1:0:/'\' $rootdir/etc/shadow \" || true 163 eval flock -x $rootdir${sysconfdir} -c \"$PSEUDO sed --follow-symlinks -i \''s/^\('$username':[^:]*\):[^:]*:/\1:0:/'\' $rootdir/etc/shadow \" || true
158 local passwd_lastchanged="`grep "^$username:" $rootdir/etc/shadow | cut -d: -f3`" 164 local passwd_lastchanged="`grep "^$username:" $rootdir/etc/shadow | cut -d: -f3`"
159 if test "x$passwd_lastchanged" != "x0"; then 165 if test "x$passwd_lastchanged" != "x0"; then
160 bbfatal "${PN}: passwd --expire operation did not succeed." 166 bbfatal "${PN}: passwd --expire operation did not succeed."
diff --git a/meta/classes/utility-tasks.bbclass b/meta/classes/utility-tasks.bbclass
deleted file mode 100644
index b1f27d3658..0000000000
--- a/meta/classes/utility-tasks.bbclass
+++ /dev/null
@@ -1,53 +0,0 @@
1addtask listtasks
2do_listtasks[nostamp] = "1"
3python do_listtasks() {
4 taskdescs = {}
5 maxlen = 0
6 for e in d.keys():
7 if d.getVarFlag(e, 'task'):
8 maxlen = max(maxlen, len(e))
9 if e.endswith('_setscene'):
10 desc = "%s (setscene version)" % (d.getVarFlag(e[:-9], 'doc') or '')
11 else:
12 desc = d.getVarFlag(e, 'doc') or ''
13 taskdescs[e] = desc
14
15 tasks = sorted(taskdescs.keys())
16 for taskname in tasks:
17 bb.plain("%s %s" % (taskname.ljust(maxlen), taskdescs[taskname]))
18}
19
20CLEANFUNCS ?= ""
21
22T_task-clean = "${LOG_DIR}/cleanlogs/${PN}"
23addtask clean
24do_clean[nostamp] = "1"
25python do_clean() {
26 """clear the build and temp directories"""
27 dir = d.expand("${WORKDIR}")
28 bb.note("Removing " + dir)
29 oe.path.remove(dir)
30
31 dir = "%s.*" % d.getVar('STAMP')
32 bb.note("Removing " + dir)
33 oe.path.remove(dir)
34
35 for f in (d.getVar('CLEANFUNCS') or '').split():
36 bb.build.exec_func(f, d)
37}
38
39addtask checkuri
40do_checkuri[nostamp] = "1"
41python do_checkuri() {
42 src_uri = (d.getVar('SRC_URI') or "").split()
43 if len(src_uri) == 0:
44 return
45
46 try:
47 fetcher = bb.fetch2.Fetch(src_uri, d)
48 fetcher.checkstatus()
49 except bb.fetch2.BBFetchException as e:
50 bb.fatal(str(e))
51}
52
53
diff --git a/meta/classes/utils.bbclass b/meta/classes/utils.bbclass
deleted file mode 100644
index 120bcc64a6..0000000000
--- a/meta/classes/utils.bbclass
+++ /dev/null
@@ -1,362 +0,0 @@
1
2oe_soinstall() {
3 # Purpose: Install shared library file and
4 # create the necessary links
5 # Example: oe_soinstall libfoo.so.1.2.3 ${D}${libdir}
6 libname=`basename $1`
7 case "$libname" in
8 *.so)
9 bbfatal "oe_soinstall: Shared library must haved versioned filename (e.g. libfoo.so.1.2.3)"
10 ;;
11 esac
12 install -m 755 $1 $2/$libname
13 sonamelink=`${HOST_PREFIX}readelf -d $1 |grep 'Library soname:' |sed -e 's/.*\[\(.*\)\].*/\1/'`
14 if [ -z $sonamelink ]; then
15 bbfatal "oe_soinstall: $libname is missing ELF tag 'SONAME'."
16 fi
17 solink=`echo $libname | sed -e 's/\.so\..*/.so/'`
18 ln -sf $libname $2/$sonamelink
19 ln -sf $libname $2/$solink
20}
21
22oe_libinstall() {
23 # Purpose: Install a library, in all its forms
24 # Example
25 #
26 # oe_libinstall libltdl ${STAGING_LIBDIR}/
27 # oe_libinstall -C src/libblah libblah ${D}/${libdir}/
28 dir=""
29 libtool=""
30 silent=""
31 require_static=""
32 require_shared=""
33 staging_install=""
34 while [ "$#" -gt 0 ]; do
35 case "$1" in
36 -C)
37 shift
38 dir="$1"
39 ;;
40 -s)
41 silent=1
42 ;;
43 -a)
44 require_static=1
45 ;;
46 -so)
47 require_shared=1
48 ;;
49 -*)
50 bbfatal "oe_libinstall: unknown option: $1"
51 ;;
52 *)
53 break;
54 ;;
55 esac
56 shift
57 done
58
59 libname="$1"
60 shift
61 destpath="$1"
62 if [ -z "$destpath" ]; then
63 bbfatal "oe_libinstall: no destination path specified"
64 fi
65 if echo "$destpath/" | egrep '^${STAGING_LIBDIR}/' >/dev/null
66 then
67 staging_install=1
68 fi
69
70 __runcmd () {
71 if [ -z "$silent" ]; then
72 echo >&2 "oe_libinstall: $*"
73 fi
74 $*
75 }
76
77 if [ -z "$dir" ]; then
78 dir=`pwd`
79 fi
80
81 dotlai=$libname.lai
82
83 # Sanity check that the libname.lai is unique
84 number_of_files=`(cd $dir; find . -name "$dotlai") | wc -l`
85 if [ $number_of_files -gt 1 ]; then
86 bbfatal "oe_libinstall: $dotlai is not unique in $dir"
87 fi
88
89
90 dir=$dir`(cd $dir;find . -name "$dotlai") | sed "s/^\.//;s/\/$dotlai\$//;q"`
91 olddir=`pwd`
92 __runcmd cd $dir
93
94 lafile=$libname.la
95
96 # If such file doesn't exist, try to cut version suffix
97 if [ ! -f "$lafile" ]; then
98 libname1=`echo "$libname" | sed 's/-[0-9.]*$//'`
99 lafile1=$libname.la
100 if [ -f "$lafile1" ]; then
101 libname=$libname1
102 lafile=$lafile1
103 fi
104 fi
105
106 if [ -f "$lafile" ]; then
107 # libtool archive
108 eval `cat $lafile|grep "^library_names="`
109 libtool=1
110 else
111 library_names="$libname.so* $libname.dll.a $libname.*.dylib"
112 fi
113
114 __runcmd install -d $destpath/
115 dota=$libname.a
116 if [ -f "$dota" -o -n "$require_static" ]; then
117 rm -f $destpath/$dota
118 __runcmd install -m 0644 $dota $destpath/
119 fi
120 if [ -f "$dotlai" -a -n "$libtool" ]; then
121 rm -f $destpath/$libname.la
122 __runcmd install -m 0644 $dotlai $destpath/$libname.la
123 fi
124
125 for name in $library_names; do
126 files=`eval echo $name`
127 for f in $files; do
128 if [ ! -e "$f" ]; then
129 if [ -n "$libtool" ]; then
130 bbfatal "oe_libinstall: $dir/$f not found."
131 fi
132 elif [ -L "$f" ]; then
133 __runcmd cp -P "$f" $destpath/
134 elif [ ! -L "$f" ]; then
135 libfile="$f"
136 rm -f $destpath/$libfile
137 __runcmd install -m 0755 $libfile $destpath/
138 fi
139 done
140 done
141
142 if [ -z "$libfile" ]; then
143 if [ -n "$require_shared" ]; then
144 bbfatal "oe_libinstall: unable to locate shared library"
145 fi
146 elif [ -z "$libtool" ]; then
147 # special case hack for non-libtool .so.#.#.# links
148 baselibfile=`basename "$libfile"`
149 if (echo $baselibfile | grep -qE '^lib.*\.so\.[0-9.]*$'); then
150 sonamelink=`${HOST_PREFIX}readelf -d $libfile |grep 'Library soname:' |sed -e 's/.*\[\(.*\)\].*/\1/'`
151 solink=`echo $baselibfile | sed -e 's/\.so\..*/.so/'`
152 if [ -n "$sonamelink" -a x"$baselibfile" != x"$sonamelink" ]; then
153 __runcmd ln -sf $baselibfile $destpath/$sonamelink
154 fi
155 __runcmd ln -sf $baselibfile $destpath/$solink
156 fi
157 fi
158
159 __runcmd cd "$olddir"
160}
161
162oe_machinstall() {
163 # Purpose: Install machine dependent files, if available
164 # If not available, check if there is a default
165 # If no default, just touch the destination
166 # Example:
167 # $1 $2 $3 $4
168 # oe_machinstall -m 0644 fstab ${D}/etc/fstab
169 #
170 # TODO: Check argument number?
171 #
172 filename=`basename $3`
173 dirname=`dirname $3`
174
175 for o in `echo ${OVERRIDES} | tr ':' ' '`; do
176 if [ -e $dirname/$o/$filename ]; then
177 bbnote $dirname/$o/$filename present, installing to $4
178 install $1 $2 $dirname/$o/$filename $4
179 return
180 fi
181 done
182# bbnote overrides specific file NOT present, trying default=$3...
183 if [ -e $3 ]; then
184 bbnote $3 present, installing to $4
185 install $1 $2 $3 $4
186 else
187 bbnote $3 NOT present, touching empty $4
188 touch $4
189 fi
190}
191
192create_cmdline_wrapper () {
193 # Create a wrapper script where commandline options are needed
194 #
195 # These are useful to work around relocation issues, by passing extra options
196 # to a program
197 #
198 # Usage: create_cmdline_wrapper FILENAME <extra-options>
199
200 cmd=$1
201 shift
202
203 echo "Generating wrapper script for $cmd"
204
205 mv $cmd $cmd.real
206 cmdname=`basename $cmd`
207 dirname=`dirname $cmd`
208 cmdoptions=$@
209 if [ "${base_prefix}" != "" ]; then
210 relpath=`python3 -c "import os; print(os.path.relpath('${D}${base_prefix}', '$dirname'))"`
211 cmdoptions=`echo $@ | sed -e "s:${base_prefix}:\\$realdir/$relpath:g"`
212 fi
213 cat <<END >$cmd
214#!/bin/bash
215realpath=\`readlink -fn \$0\`
216realdir=\`dirname \$realpath\`
217exec -a \`dirname \$realpath\`/$cmdname \`dirname \$realpath\`/$cmdname.real $cmdoptions "\$@"
218END
219 chmod +x $cmd
220}
221
222create_wrapper () {
223 # Create a wrapper script where extra environment variables are needed
224 #
225 # These are useful to work around relocation issues, by setting environment
226 # variables which point to paths in the filesystem.
227 #
228 # Usage: create_wrapper FILENAME [[VAR=VALUE]..]
229
230 cmd=$1
231 shift
232
233 echo "Generating wrapper script for $cmd"
234
235 mv $cmd $cmd.real
236 cmdname=`basename $cmd`
237 dirname=`dirname $cmd`
238 exportstring=$@
239 if [ "${base_prefix}" != "" ]; then
240 relpath=`python3 -c "import os; print(os.path.relpath('${D}${base_prefix}', '$dirname'))"`
241 exportstring=`echo $@ | sed -e "s:${base_prefix}:\\$realdir/$relpath:g"`
242 fi
243 cat <<END >$cmd
244#!/bin/bash
245realpath=\`readlink -fn \$0\`
246realdir=\`dirname \$realpath\`
247export $exportstring
248exec -a "\$0" \$realdir/$cmdname.real "\$@"
249END
250 chmod +x $cmd
251}
252
253# Copy files/directories from $1 to $2 but using hardlinks
254# (preserve symlinks)
255hardlinkdir () {
256 from=$1
257 to=$2
258 (cd $from; find . -print0 | cpio --null -pdlu $to)
259}
260
261
262def check_app_exists(app, d):
263 app = d.expand(app).split()[0].strip()
264 path = d.getVar('PATH')
265 return bool(bb.utils.which(path, app))
266
267def explode_deps(s):
268 return bb.utils.explode_deps(s)
269
270def base_set_filespath(path, d):
271 filespath = []
272 extrapaths = (d.getVar("FILESEXTRAPATHS") or "")
273 # Remove default flag which was used for checking
274 extrapaths = extrapaths.replace("__default:", "")
275 # Don't prepend empty strings to the path list
276 if extrapaths != "":
277 path = extrapaths.split(":") + path
278 # The ":" ensures we have an 'empty' override
279 overrides = (":" + (d.getVar("FILESOVERRIDES") or "")).split(":")
280 overrides.reverse()
281 for o in overrides:
282 for p in path:
283 if p != "":
284 filespath.append(os.path.join(p, o))
285 return ":".join(filespath)
286
287def extend_variants(d, var, extend, delim=':'):
288 """Return a string of all bb class extend variants for the given extend"""
289 variants = []
290 whole = d.getVar(var) or ""
291 for ext in whole.split():
292 eext = ext.split(delim)
293 if len(eext) > 1 and eext[0] == extend:
294 variants.append(eext[1])
295 return " ".join(variants)
296
297def multilib_pkg_extend(d, pkg):
298 variants = (d.getVar("MULTILIB_VARIANTS") or "").split()
299 if not variants:
300 return pkg
301 pkgs = pkg
302 for v in variants:
303 pkgs = pkgs + " " + v + "-" + pkg
304 return pkgs
305
306def get_multilib_datastore(variant, d):
307 return oe.utils.get_multilib_datastore(variant, d)
308
309def all_multilib_tune_values(d, var, unique = True, need_split = True, delim = ' '):
310 """Return a string of all ${var} in all multilib tune configuration"""
311 values = []
312 variants = (d.getVar("MULTILIB_VARIANTS") or "").split() + ['']
313 for item in variants:
314 localdata = get_multilib_datastore(item, d)
315 # We need WORKDIR to be consistent with the original datastore
316 localdata.setVar("WORKDIR", d.getVar("WORKDIR"))
317 value = localdata.getVar(var) or ""
318 if value != "":
319 if need_split:
320 for item in value.split(delim):
321 values.append(item)
322 else:
323 values.append(value)
324 if unique:
325 #we do this to keep order as much as possible
326 ret = []
327 for value in values:
328 if not value in ret:
329 ret.append(value)
330 else:
331 ret = values
332 return " ".join(ret)
333
334def all_multilib_tune_list(vars, d):
335 """
336 Return a list of ${VAR} for each variable VAR in vars from each
337 multilib tune configuration.
338 Is safe to be called from a multilib recipe/context as it can
339 figure out the original tune and remove the multilib overrides.
340 """
341 values = {}
342 for v in vars:
343 values[v] = []
344 values['ml'] = ['']
345
346 variants = (d.getVar("MULTILIB_VARIANTS") or "").split() + ['']
347 for item in variants:
348 localdata = get_multilib_datastore(item, d)
349 values[v].append(localdata.getVar(v))
350 values['ml'].append(item)
351 return values
352all_multilib_tune_list[vardepsexclude] = "OVERRIDES"
353
354# If the user hasn't set up their name/email, set some defaults
355check_git_config() {
356 if ! git config user.email > /dev/null ; then
357 git config --local user.email "${PATCH_GIT_USER_EMAIL}"
358 fi
359 if ! git config user.name > /dev/null ; then
360 git config --local user.name "${PATCH_GIT_USER_NAME}"
361 fi
362}
diff --git a/meta/classes/vala.bbclass b/meta/classes/vala.bbclass
deleted file mode 100644
index bcaf68c5a7..0000000000
--- a/meta/classes/vala.bbclass
+++ /dev/null
@@ -1,24 +0,0 @@
1# Everyone needs vala-native and targets need vala, too,
2# because that is where target builds look for .vapi files.
3#
4VALADEPENDS = ""
5VALADEPENDS_class-target = "vala"
6DEPENDS_append = " vala-native ${VALADEPENDS}"
7
8# Our patched version of Vala looks in STAGING_DATADIR for .vapi files
9export STAGING_DATADIR
10# Upstream Vala >= 0.11 looks in XDG_DATA_DIRS for .vapi files
11export XDG_DATA_DIRS = "${STAGING_DATADIR}:${STAGING_LIBDIR}"
12
13# Package additional files
14FILES_${PN}-dev += "\
15 ${datadir}/vala/vapi/*.vapi \
16 ${datadir}/vala/vapi/*.deps \
17 ${datadir}/gir-1.0 \
18"
19
20# Remove vapigen.m4 that is bundled with tarballs
21# because it does not yet have our cross-compile fixes
22do_configure_prepend() {
23 rm -f ${S}/m4/vapigen.m4
24}
diff --git a/meta/classes/waf.bbclass b/meta/classes/waf.bbclass
deleted file mode 100644
index 188119f356..0000000000
--- a/meta/classes/waf.bbclass
+++ /dev/null
@@ -1,76 +0,0 @@
1# avoids build breaks when using no-static-libs.inc
2DISABLE_STATIC = ""
3
4# What Python interpretter to use. Defaults to Python 3 but can be
5# overridden if required.
6WAF_PYTHON ?= "python3"
7
8B = "${WORKDIR}/build"
9do_configure[cleandirs] += "${B}"
10
11EXTRA_OECONF_append = " ${PACKAGECONFIG_CONFARGS}"
12
13EXTRA_OEWAF_BUILD ??= ""
14# In most cases, you want to pass the same arguments to `waf build` and `waf
15# install`, but you can override it if necessary
16EXTRA_OEWAF_INSTALL ??= "${EXTRA_OEWAF_BUILD}"
17
18def waflock_hash(d):
19 # Calculates the hash used for the waf lock file. This should include
20 # all of the user controllable inputs passed to waf configure. Note
21 # that the full paths for ${B} and ${S} are used; this is OK and desired
22 # because a change to either of these should create a unique lock file
23 # to prevent collisions.
24 import hashlib
25 h = hashlib.sha512()
26 def update(name):
27 val = d.getVar(name)
28 if val is not None:
29 h.update(val.encode('utf-8'))
30 update('S')
31 update('B')
32 update('prefix')
33 update('EXTRA_OECONF')
34 return h.hexdigest()
35
36# Use WAFLOCK to specify a separate lock file. The build is already
37# sufficiently isolated by setting the output directory, this ensures that
38# bitbake won't step on toes of any other configured context in the source
39# directory (e.g. if the source is coming from externalsrc and was previously
40# configured elsewhere).
41export WAFLOCK = ".lock-waf_oe_${@waflock_hash(d)}_build"
42BB_HASHBASE_WHITELIST += "WAFLOCK"
43
44python waf_preconfigure() {
45 import subprocess
46 from distutils.version import StrictVersion
47 subsrcdir = d.getVar('S')
48 python = d.getVar('WAF_PYTHON')
49 wafbin = os.path.join(subsrcdir, 'waf')
50 try:
51 result = subprocess.check_output([python, wafbin, '--version'], cwd=subsrcdir, stderr=subprocess.STDOUT)
52 version = result.decode('utf-8').split()[1]
53 if StrictVersion(version) >= StrictVersion("1.8.7"):
54 d.setVar("WAF_EXTRA_CONF", "--bindir=${bindir} --libdir=${libdir}")
55 except subprocess.CalledProcessError as e:
56 bb.warn("Unable to execute waf --version, exit code %d. Assuming waf version without bindir/libdir support." % e.returncode)
57 except FileNotFoundError:
58 bb.fatal("waf does not exist in %s" % subsrcdir)
59}
60
61do_configure[prefuncs] += "waf_preconfigure"
62
63waf_do_configure() {
64 (cd ${S} && ${WAF_PYTHON} ./waf configure -o ${B} --prefix=${prefix} ${WAF_EXTRA_CONF} ${EXTRA_OECONF})
65}
66
67do_compile[progress] = "outof:^\[\s*(\d+)/\s*(\d+)\]\s+"
68waf_do_compile() {
69 (cd ${S} && ${WAF_PYTHON} ./waf build ${@oe.utils.parallel_make_argument(d, '-j%d', limit=64)} ${EXTRA_OEWAF_BUILD})
70}
71
72waf_do_install() {
73 (cd ${S} && ${WAF_PYTHON} ./waf install --destdir=${D} ${EXTRA_OEWAF_INSTALL})
74}
75
76EXPORT_FUNCTIONS do_configure do_compile do_install
diff --git a/meta/classes/xmlcatalog.bbclass b/meta/classes/xmlcatalog.bbclass
deleted file mode 100644
index ae4811fdeb..0000000000
--- a/meta/classes/xmlcatalog.bbclass
+++ /dev/null
@@ -1,26 +0,0 @@
1DEPENDS = "libxml2-native"
2
3# A whitespace-separated list of XML catalogs to be registered, for example
4# "${sysconfdir}/xml/docbook-xml.xml".
5XMLCATALOGS ?= ""
6
7SYSROOT_PREPROCESS_FUNCS_append = " xmlcatalog_sstate_postinst"
8
9xmlcatalog_complete() {
10 ROOTCATALOG="${STAGING_ETCDIR_NATIVE}/xml/catalog"
11 if [ ! -f $ROOTCATALOG ]; then
12 mkdir --parents $(dirname $ROOTCATALOG)
13 xmlcatalog --noout --create $ROOTCATALOG
14 fi
15 for CATALOG in ${XMLCATALOGS}; do
16 xmlcatalog --noout --add nextCatalog unused file://$CATALOG $ROOTCATALOG
17 done
18}
19
20xmlcatalog_sstate_postinst() {
21 mkdir -p ${SYSROOT_DESTDIR}${bindir}
22 dest=${SYSROOT_DESTDIR}${bindir}/postinst-${PN}-xmlcatalog
23 echo '#!/bin/sh' > $dest
24 echo '${xmlcatalog_complete}' >> $dest
25 chmod 0755 $dest
26}
diff --git a/meta/classes/yocto-check-layer.bbclass b/meta/classes/yocto-check-layer.bbclass
new file mode 100644
index 0000000000..404f5fd9f2
--- /dev/null
+++ b/meta/classes/yocto-check-layer.bbclass
@@ -0,0 +1,22 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7#
8# This class is used by yocto-check-layer script for additional per-recipe tests
9# The first test ensures that the layer has no recipes skipping 'installed-vs-shipped' QA checks
10#
11
12WARN_QA:remove = "installed-vs-shipped"
13ERROR_QA:append = " installed-vs-shipped"
14
15python () {
16 packages = set((d.getVar('PACKAGES') or '').split())
17 for package in packages:
18 skip = set((d.getVar('INSANE_SKIP') or "").split() +
19 (d.getVar('INSANE_SKIP:' + package) or "").split())
20 if 'installed-vs-shipped' in skip:
21 oe.qa.handle_error("installed-vs-shipped", 'Package %s is skipping "installed-vs-shipped" QA test.' % package, d)
22}