summaryrefslogtreecommitdiffstats
path: root/meta/classes
diff options
context:
space:
mode:
Diffstat (limited to 'meta/classes')
-rw-r--r--meta/classes/allarch.bbclass71
-rw-r--r--meta/classes/autotools-brokensep.bbclass11
-rw-r--r--meta/classes/autotools.bbclass260
-rw-r--r--meta/classes/baremetal-image.bbclass128
-rw-r--r--meta/classes/base.bbclass789
-rw-r--r--meta/classes/bash-completion.bbclass13
-rw-r--r--meta/classes/bin_package.bbclass42
-rw-r--r--meta/classes/binconfig-disabled.bbclass36
-rw-r--r--meta/classes/binconfig.bbclass60
-rw-r--r--meta/classes/buildstats.bbclass302
-rw-r--r--meta/classes/cargo.bbclass97
-rw-r--r--meta/classes/cargo_common.bbclass139
-rw-r--r--meta/classes/cmake.bbclass223
-rw-r--r--meta/classes/cml1.bbclass107
-rw-r--r--meta/classes/compress_doc.bbclass269
-rw-r--r--meta/classes/core-image.bbclass81
-rw-r--r--meta/classes/cpan-base.bbclass33
-rw-r--r--meta/classes/cpan.bbclass71
-rw-r--r--meta/classes/cpan_build.bbclass47
-rw-r--r--meta/classes/cross-canadian.bbclass200
-rw-r--r--meta/classes/cross.bbclass103
-rw-r--r--meta/classes/crosssdk.bbclass57
-rw-r--r--meta/classes/debian.bbclass156
-rw-r--r--meta/classes/deploy.bbclass18
-rw-r--r--meta/classes/devicetree.bbclass154
-rw-r--r--meta/classes/devshell.bbclass166
-rw-r--r--meta/classes/devupstream.bbclass61
-rw-r--r--meta/classes/distro_features_check.bbclass13
-rw-r--r--meta/classes/distrooverrides.bbclass38
-rw-r--r--meta/classes/dos2unix.bbclass20
-rw-r--r--meta/classes/externalsrc.bbclass269
-rw-r--r--meta/classes/features_check.bbclass57
-rw-r--r--meta/classes/fontcache.bbclass63
-rw-r--r--meta/classes/fs-uuid.bbclass30
-rw-r--r--meta/classes/gconf.bbclass77
-rw-r--r--meta/classes/gettext.bbclass28
-rw-r--r--meta/classes/gi-docgen.bbclass30
-rw-r--r--meta/classes/gio-module-cache.bbclass44
-rw-r--r--meta/classes/glide.bbclass15
-rw-r--r--meta/classes/gnomebase.bbclass37
-rw-r--r--meta/classes/go-mod.bbclass26
-rw-r--r--meta/classes/go-ptest.bbclass60
-rw-r--r--meta/classes/go.bbclass170
-rw-r--r--meta/classes/goarch.bbclass122
-rw-r--r--meta/classes/gobject-introspection-data.bbclass18
-rw-r--r--meta/classes/gobject-introspection.bbclass61
-rw-r--r--meta/classes/grub-efi-cfg.bbclass122
-rw-r--r--meta/classes/grub-efi.bbclass14
-rw-r--r--meta/classes/gsettings.bbclass48
-rw-r--r--meta/classes/gtk-doc.bbclass89
-rw-r--r--meta/classes/gtk-icon-cache.bbclass95
-rw-r--r--meta/classes/gtk-immodules-cache.bbclass82
-rw-r--r--meta/classes/image-artifact-names.bbclass28
-rw-r--r--meta/classes/image-combined-dbg.bbclass15
-rw-r--r--meta/classes/image-container.bbclass27
-rw-r--r--meta/classes/image-live.bbclass265
-rw-r--r--meta/classes/image-postinst-intercepts.bbclass29
-rw-r--r--meta/classes/image.bbclass684
-rw-r--r--meta/classes/image_types.bbclass355
-rw-r--r--meta/classes/image_types_wic.bbclass190
-rw-r--r--meta/classes/insane.bbclass1453
-rw-r--r--meta/classes/kernel-arch.bbclass74
-rw-r--r--meta/classes/kernel-artifact-names.bbclass37
-rw-r--r--meta/classes/kernel-devicetree.bbclass119
-rw-r--r--meta/classes/kernel-fitimage.bbclass803
-rw-r--r--meta/classes/kernel-grub.bbclass111
-rw-r--r--meta/classes/kernel-module-split.bbclass197
-rw-r--r--meta/classes/kernel-uboot.bbclass49
-rw-r--r--meta/classes/kernel-uimage.bbclass41
-rw-r--r--meta/classes/kernel-yocto.bbclass732
-rw-r--r--meta/classes/kernel.bbclass821
-rw-r--r--meta/classes/kernelsrc.bbclass16
-rw-r--r--meta/classes/lib_package.bbclass12
-rw-r--r--meta/classes/libc-package.bbclass390
-rw-r--r--meta/classes/license.bbclass426
-rw-r--r--meta/classes/license_image.bbclass295
-rw-r--r--meta/classes/linux-dummy.bbclass31
-rw-r--r--meta/classes/linux-kernel-base.bbclass47
-rw-r--r--meta/classes/linuxloader.bbclass82
-rw-r--r--meta/classes/live-vm-common.bbclass100
-rw-r--r--meta/classes/logging.bbclass107
-rw-r--r--meta/classes/manpages.bbclass51
-rw-r--r--meta/classes/meson-routines.bbclass57
-rw-r--r--meta/classes/meson.bbclass179
-rw-r--r--meta/classes/mime-xdg.bbclass78
-rw-r--r--meta/classes/mime.bbclass76
-rw-r--r--meta/classes/mirrors.bbclass95
-rw-r--r--meta/classes/module-base.bbclass27
-rw-r--r--meta/classes/module.bbclass80
-rw-r--r--meta/classes/multilib_header.bbclass58
-rw-r--r--meta/classes/multilib_script.bbclass40
-rw-r--r--meta/classes/native.bbclass236
-rw-r--r--meta/classes/nativesdk.bbclass124
-rw-r--r--meta/classes/nopackages.bbclass19
-rw-r--r--meta/classes/npm.bbclass340
-rw-r--r--meta/classes/package.bbclass2558
-rw-r--r--meta/classes/package_deb.bbclass329
-rw-r--r--meta/classes/package_ipk.bbclass292
-rw-r--r--meta/classes/package_pkgdata.bbclass173
-rw-r--r--meta/classes/package_rpm.bbclass761
-rw-r--r--meta/classes/package_tar.bbclass77
-rw-r--r--meta/classes/packagedata.bbclass40
-rw-r--r--meta/classes/packagegroup.bbclass67
-rw-r--r--meta/classes/patch.bbclass171
-rw-r--r--meta/classes/perl-version.bbclass72
-rw-r--r--meta/classes/perlnative.bbclass9
-rw-r--r--meta/classes/pixbufcache.bbclass69
-rw-r--r--meta/classes/pkgconfig.bbclass8
-rw-r--r--meta/classes/populate_sdk.bbclass13
-rw-r--r--meta/classes/populate_sdk_base.bbclass384
-rw-r--r--meta/classes/populate_sdk_ext.bbclass842
-rw-r--r--meta/classes/ptest-gnome.bbclass14
-rw-r--r--meta/classes/ptest-perl.bbclass36
-rw-r--r--meta/classes/ptest.bbclass142
-rw-r--r--meta/classes/pypi.bbclass34
-rw-r--r--meta/classes/python3-dir.bbclass11
-rw-r--r--meta/classes/python3native.bbclass30
-rw-r--r--meta/classes/python3targetconfig.bbclass35
-rw-r--r--meta/classes/python_flit_core.bbclass14
-rw-r--r--meta/classes/python_hatchling.bbclass9
-rw-r--r--meta/classes/python_pep517.bbclass60
-rw-r--r--meta/classes/python_poetry_core.bbclass9
-rw-r--r--meta/classes/python_pyo3.bbclass36
-rw-r--r--meta/classes/python_setuptools3_rust.bbclass17
-rw-r--r--meta/classes/python_setuptools_build_meta.bbclass9
-rw-r--r--meta/classes/qemu.bbclass77
-rw-r--r--meta/classes/qemuboot.bbclass171
-rw-r--r--meta/classes/rootfs-postcommands.bbclass440
-rw-r--r--meta/classes/rootfs_deb.bbclass41
-rw-r--r--meta/classes/rootfs_ipk.bbclass44
-rw-r--r--meta/classes/rootfs_rpm.bbclass45
-rw-r--r--meta/classes/rootfsdebugfiles.bbclass47
-rw-r--r--meta/classes/rust-bin.bbclass154
-rw-r--r--meta/classes/rust-common.bbclass177
-rw-r--r--meta/classes/rust-target-config.bbclass391
-rw-r--r--meta/classes/rust.bbclass51
-rw-r--r--meta/classes/sanity.bbclass1028
-rw-r--r--meta/classes/scons.bbclass34
-rw-r--r--meta/classes/setuptools3-base.bbclass37
-rw-r--r--meta/classes/setuptools3.bbclass38
-rw-r--r--meta/classes/setuptools3_legacy.bbclass84
-rw-r--r--meta/classes/siteinfo.bbclass232
-rw-r--r--meta/classes/sstate.bbclass1364
-rw-r--r--meta/classes/staging.bbclass690
-rw-r--r--meta/classes/syslinux.bbclass194
-rw-r--r--meta/classes/systemd-boot-cfg.bbclass77
-rw-r--r--meta/classes/systemd-boot.bbclass35
-rw-r--r--meta/classes/systemd.bbclass239
-rw-r--r--meta/classes/testimage.bbclass508
-rw-r--r--meta/classes/testsdk.bbclass52
-rw-r--r--meta/classes/texinfo.bbclass24
-rw-r--r--meta/classes/toolchain-scripts-base.bbclass17
-rw-r--r--meta/classes/toolchain-scripts.bbclass236
-rw-r--r--meta/classes/uboot-config.bbclass133
-rw-r--r--meta/classes/uboot-extlinux-config.bbclass158
-rw-r--r--meta/classes/uboot-sign.bbclass505
-rw-r--r--meta/classes/uninative.bbclass177
-rw-r--r--meta/classes/update-alternatives.bbclass333
-rw-r--r--meta/classes/update-rc.d.bbclass129
-rw-r--r--meta/classes/upstream-version-is-even.bbclass11
-rw-r--r--meta/classes/utility-tasks.bbclass60
-rw-r--r--meta/classes/utils.bbclass369
-rw-r--r--meta/classes/vala.bbclass30
-rw-r--r--meta/classes/waf.bbclass81
-rw-r--r--meta/classes/xmlcatalog.bbclass32
165 files changed, 0 insertions, 29184 deletions
diff --git a/meta/classes/allarch.bbclass b/meta/classes/allarch.bbclass
deleted file mode 100644
index 9138f40ed8..0000000000
--- a/meta/classes/allarch.bbclass
+++ /dev/null
@@ -1,71 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7#
8# This class is used for architecture independent recipes/data files (usually scripts)
9#
10
11python allarch_package_arch_handler () {
12 if bb.data.inherits_class("native", d) or bb.data.inherits_class("nativesdk", d) \
13 or bb.data.inherits_class("crosssdk", d):
14 return
15
16 variants = d.getVar("MULTILIB_VARIANTS")
17 if not variants:
18 d.setVar("PACKAGE_ARCH", "all" )
19}
20
21addhandler allarch_package_arch_handler
22allarch_package_arch_handler[eventmask] = "bb.event.RecipePreFinalise"
23
24python () {
25 # Allow this class to be included but overridden - only set
26 # the values if we're still "all" package arch.
27 if d.getVar("PACKAGE_ARCH") == "all":
28 # No need for virtual/libc or a cross compiler
29 d.setVar("INHIBIT_DEFAULT_DEPS","1")
30
31 # Set these to a common set of values, we shouldn't be using them other that for WORKDIR directory
32 # naming anyway
33 d.setVar("baselib", "lib")
34 d.setVar("TARGET_ARCH", "allarch")
35 d.setVar("TARGET_OS", "linux")
36 d.setVar("TARGET_CC_ARCH", "none")
37 d.setVar("TARGET_LD_ARCH", "none")
38 d.setVar("TARGET_AS_ARCH", "none")
39 d.setVar("TARGET_FPU", "")
40 d.setVar("TARGET_PREFIX", "")
41 # Expand PACKAGE_EXTRA_ARCHS since the staging code needs this
42 # (this removes any dependencies from the hash perspective)
43 d.setVar("PACKAGE_EXTRA_ARCHS", d.getVar("PACKAGE_EXTRA_ARCHS"))
44 d.setVar("SDK_ARCH", "none")
45 d.setVar("SDK_CC_ARCH", "none")
46 d.setVar("TARGET_CPPFLAGS", "none")
47 d.setVar("TARGET_CFLAGS", "none")
48 d.setVar("TARGET_CXXFLAGS", "none")
49 d.setVar("TARGET_LDFLAGS", "none")
50 d.setVar("POPULATESYSROOTDEPS", "")
51
52 # Avoid this being unnecessarily different due to nuances of
53 # the target machine that aren't important for "all" arch
54 # packages.
55 d.setVar("LDFLAGS", "")
56
57 # No need to do shared library processing or debug symbol handling
58 d.setVar("EXCLUDE_FROM_SHLIBS", "1")
59 d.setVar("INHIBIT_PACKAGE_DEBUG_SPLIT", "1")
60 d.setVar("INHIBIT_PACKAGE_STRIP", "1")
61
62 # These multilib values shouldn't change allarch packages so exclude them
63 d.appendVarFlag("emit_pkgdata", "vardepsexclude", " MULTILIB_VARIANTS")
64 d.appendVarFlag("write_specfile", "vardepsexclude", " MULTILIBS")
65 d.appendVarFlag("do_package", "vardepsexclude", " package_do_shlibs")
66 elif bb.data.inherits_class('packagegroup', d) and not bb.data.inherits_class('nativesdk', d):
67 bb.error("Please ensure recipe %s sets PACKAGE_ARCH before inherit packagegroup" % d.getVar("FILE"))
68}
69
70def qemu_wrapper_cmdline(data, rootfs_path, library_paths):
71 return 'false'
diff --git a/meta/classes/autotools-brokensep.bbclass b/meta/classes/autotools-brokensep.bbclass
deleted file mode 100644
index a0fb4b7b50..0000000000
--- a/meta/classes/autotools-brokensep.bbclass
+++ /dev/null
@@ -1,11 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# Autotools class for recipes where separate build dir doesn't work
8# Ideally we should fix software so it does work. Standard autotools supports
9# this.
10inherit autotools
11B = "${S}"
diff --git a/meta/classes/autotools.bbclass b/meta/classes/autotools.bbclass
deleted file mode 100644
index a4c1c4be41..0000000000
--- a/meta/classes/autotools.bbclass
+++ /dev/null
@@ -1,260 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7def get_autotools_dep(d):
8 if d.getVar('INHIBIT_AUTOTOOLS_DEPS'):
9 return ''
10
11 pn = d.getVar('PN')
12 deps = ''
13
14 if pn in ['autoconf-native', 'automake-native']:
15 return deps
16 deps += 'autoconf-native automake-native '
17
18 if not pn in ['libtool', 'libtool-native'] and not pn.endswith("libtool-cross"):
19 deps += 'libtool-native '
20 if not bb.data.inherits_class('native', d) \
21 and not bb.data.inherits_class('nativesdk', d) \
22 and not bb.data.inherits_class('cross', d) \
23 and not d.getVar('INHIBIT_DEFAULT_DEPS'):
24 deps += 'libtool-cross '
25
26 return deps
27
28
29DEPENDS:prepend = "${@get_autotools_dep(d)} "
30
31inherit siteinfo
32
33# Space separated list of shell scripts with variables defined to supply test
34# results for autoconf tests we cannot run at build time.
35# The value of this variable is filled in in a prefunc because it depends on
36# the contents of the sysroot.
37export CONFIG_SITE
38
39acpaths ?= "default"
40EXTRA_AUTORECONF = "--exclude=autopoint --exclude=gtkdocize"
41
42export lt_cv_sys_lib_dlsearch_path_spec = "${libdir} ${base_libdir}"
43
44# When building tools for use at build-time it's recommended for the build
45# system to use these variables when cross-compiling.
46# (http://sources.redhat.com/autobook/autobook/autobook_270.html)
47export CPP_FOR_BUILD = "${BUILD_CPP}"
48export CPPFLAGS_FOR_BUILD = "${BUILD_CPPFLAGS}"
49
50export CC_FOR_BUILD = "${BUILD_CC}"
51export CFLAGS_FOR_BUILD = "${BUILD_CFLAGS}"
52
53export CXX_FOR_BUILD = "${BUILD_CXX}"
54export CXXFLAGS_FOR_BUILD="${BUILD_CXXFLAGS}"
55
56export LD_FOR_BUILD = "${BUILD_LD}"
57export LDFLAGS_FOR_BUILD = "${BUILD_LDFLAGS}"
58
59def append_libtool_sysroot(d):
60 # Only supply libtool sysroot option for non-native packages
61 if not bb.data.inherits_class('native', d):
62 return '--with-libtool-sysroot=${STAGING_DIR_HOST}'
63 return ""
64
65CONFIGUREOPTS = " --build=${BUILD_SYS} \
66 --host=${HOST_SYS} \
67 --target=${TARGET_SYS} \
68 --prefix=${prefix} \
69 --exec_prefix=${exec_prefix} \
70 --bindir=${bindir} \
71 --sbindir=${sbindir} \
72 --libexecdir=${libexecdir} \
73 --datadir=${datadir} \
74 --sysconfdir=${sysconfdir} \
75 --sharedstatedir=${sharedstatedir} \
76 --localstatedir=${localstatedir} \
77 --libdir=${libdir} \
78 --includedir=${includedir} \
79 --oldincludedir=${oldincludedir} \
80 --infodir=${infodir} \
81 --mandir=${mandir} \
82 --disable-silent-rules \
83 ${CONFIGUREOPT_DEPTRACK} \
84 ${@append_libtool_sysroot(d)}"
85CONFIGUREOPT_DEPTRACK ?= "--disable-dependency-tracking"
86
87CACHED_CONFIGUREVARS ?= ""
88
89AUTOTOOLS_SCRIPT_PATH ?= "${S}"
90CONFIGURE_SCRIPT ?= "${AUTOTOOLS_SCRIPT_PATH}/configure"
91
92AUTOTOOLS_AUXDIR ?= "${AUTOTOOLS_SCRIPT_PATH}"
93
94oe_runconf () {
95 # Use relative path to avoid buildpaths in files
96 cfgscript_name="`basename ${CONFIGURE_SCRIPT}`"
97 cfgscript=`python3 -c "import os; print(os.path.relpath(os.path.dirname('${CONFIGURE_SCRIPT}'), '.'))"`/$cfgscript_name
98 if [ -x "$cfgscript" ] ; then
99 bbnote "Running $cfgscript ${CONFIGUREOPTS} ${EXTRA_OECONF} $@"
100 if ! CONFIG_SHELL=${CONFIG_SHELL-/bin/bash} ${CACHED_CONFIGUREVARS} $cfgscript ${CONFIGUREOPTS} ${EXTRA_OECONF} "$@"; then
101 bbnote "The following config.log files may provide further information."
102 bbnote `find ${B} -ignore_readdir_race -type f -name config.log`
103 bbfatal_log "configure failed"
104 fi
105 else
106 bbfatal "no configure script found at $cfgscript"
107 fi
108}
109
110CONFIGURESTAMPFILE = "${WORKDIR}/configure.sstate"
111
112autotools_preconfigure() {
113 if [ -n "${CONFIGURESTAMPFILE}" -a -e "${CONFIGURESTAMPFILE}" ]; then
114 if [ "`cat ${CONFIGURESTAMPFILE}`" != "${BB_TASKHASH}" ]; then
115 if [ "${S}" != "${B}" ]; then
116 echo "Previously configured separate build directory detected, cleaning ${B}"
117 rm -rf ${B}
118 mkdir -p ${B}
119 else
120 # At least remove the .la files since automake won't automatically
121 # regenerate them even if CFLAGS/LDFLAGS are different
122 cd ${S}
123 if [ "${CLEANBROKEN}" != "1" -a \( -e Makefile -o -e makefile -o -e GNUmakefile \) ]; then
124 oe_runmake clean
125 fi
126 find ${S} -ignore_readdir_race -name \*.la -delete
127 fi
128 fi
129 fi
130}
131
132autotools_postconfigure(){
133 if [ -n "${CONFIGURESTAMPFILE}" ]; then
134 mkdir -p `dirname ${CONFIGURESTAMPFILE}`
135 echo ${BB_TASKHASH} > ${CONFIGURESTAMPFILE}
136 fi
137}
138
139EXTRACONFFUNCS ??= ""
140
141EXTRA_OECONF:append = " ${PACKAGECONFIG_CONFARGS}"
142
143do_configure[prefuncs] += "autotools_preconfigure autotools_aclocals ${EXTRACONFFUNCS}"
144do_compile[prefuncs] += "autotools_aclocals"
145do_install[prefuncs] += "autotools_aclocals"
146do_configure[postfuncs] += "autotools_postconfigure"
147
148ACLOCALDIR = "${STAGING_DATADIR}/aclocal"
149ACLOCALEXTRAPATH = ""
150ACLOCALEXTRAPATH:class-target = " -I ${STAGING_DATADIR_NATIVE}/aclocal/"
151ACLOCALEXTRAPATH:class-nativesdk = " -I ${STAGING_DATADIR_NATIVE}/aclocal/"
152
153python autotools_aclocals () {
154 sitefiles, searched = siteinfo_get_files(d, sysrootcache=True)
155 d.setVar("CONFIG_SITE", " ".join(sitefiles))
156}
157
158do_configure[file-checksums] += "${@' '.join(siteinfo_get_files(d, sysrootcache=False)[1])}"
159
160CONFIGURE_FILES = "${S}/configure.in ${S}/configure.ac ${S}/config.h.in ${S}/acinclude.m4 Makefile.am"
161
162autotools_do_configure() {
163 # WARNING: gross hack follows:
164 # An autotools built package generally needs these scripts, however only
165 # automake or libtoolize actually install the current versions of them.
166 # This is a problem in builds that do not use libtool or automake, in the case
167 # where we -need- the latest version of these scripts. e.g. running a build
168 # for a package whose autotools are old, on an x86_64 machine, which the old
169 # config.sub does not support. Work around this by installing them manually
170 # regardless.
171
172 PRUNE_M4=""
173
174 for ac in `find ${S} -ignore_readdir_race -name configure.in -o -name configure.ac`; do
175 rm -f `dirname $ac`/configure
176 done
177 if [ -e ${AUTOTOOLS_SCRIPT_PATH}/configure.in -o -e ${AUTOTOOLS_SCRIPT_PATH}/configure.ac ]; then
178 olddir=`pwd`
179 cd ${AUTOTOOLS_SCRIPT_PATH}
180 mkdir -p ${ACLOCALDIR}
181 ACLOCAL="aclocal --system-acdir=${ACLOCALDIR}/"
182 if [ x"${acpaths}" = xdefault ]; then
183 acpaths=
184 for i in `find ${AUTOTOOLS_SCRIPT_PATH} -ignore_readdir_race -maxdepth 2 -name \*.m4|grep -v 'aclocal.m4'| \
185 grep -v 'acinclude.m4' | sed -e 's,\(.*/\).*$,\1,'|sort -u`; do
186 acpaths="$acpaths -I $i"
187 done
188 else
189 acpaths="${acpaths}"
190 fi
191 acpaths="$acpaths ${ACLOCALEXTRAPATH}"
192 AUTOV=`automake --version | sed -e '1{s/.* //;s/\.[0-9]\+$//};q'`
193 automake --version
194 echo "AUTOV is $AUTOV"
195 if [ -d ${STAGING_DATADIR_NATIVE}/aclocal-$AUTOV ]; then
196 ACLOCAL="$ACLOCAL --automake-acdir=${STAGING_DATADIR_NATIVE}/aclocal-$AUTOV"
197 fi
198 # autoreconf is too shy to overwrite aclocal.m4 if it doesn't look
199 # like it was auto-generated. Work around this by blowing it away
200 # by hand, unless the package specifically asked not to run aclocal.
201 if ! echo ${EXTRA_AUTORECONF} | grep -q "aclocal"; then
202 rm -f aclocal.m4
203 fi
204 if [ -e configure.in ]; then
205 CONFIGURE_AC=configure.in
206 else
207 CONFIGURE_AC=configure.ac
208 fi
209 if grep -q "^[[:space:]]*AM_GLIB_GNU_GETTEXT" $CONFIGURE_AC; then
210 if grep -q "sed.*POTFILES" $CONFIGURE_AC; then
211 : do nothing -- we still have an old unmodified configure.ac
212 else
213 bbnote Executing glib-gettextize --force --copy
214 echo "no" | glib-gettextize --force --copy
215 fi
216 elif [ "${BPN}" != "gettext" ] && grep -q "^[[:space:]]*AM_GNU_GETTEXT" $CONFIGURE_AC; then
217 # We'd call gettextize here if it wasn't so broken...
218 cp ${STAGING_DATADIR_NATIVE}/gettext/config.rpath ${AUTOTOOLS_AUXDIR}/
219 if [ -d ${S}/po/ ]; then
220 cp -f ${STAGING_DATADIR_NATIVE}/gettext/po/Makefile.in.in ${S}/po/
221 if [ ! -e ${S}/po/remove-potcdate.sin ]; then
222 cp ${STAGING_DATADIR_NATIVE}/gettext/po/remove-potcdate.sin ${S}/po/
223 fi
224 fi
225 PRUNE_M4="$PRUNE_M4 gettext.m4 iconv.m4 lib-ld.m4 lib-link.m4 lib-prefix.m4 nls.m4 po.m4 progtest.m4"
226 fi
227 mkdir -p m4
228
229 for i in $PRUNE_M4; do
230 find ${S} -ignore_readdir_race -name $i -delete
231 done
232
233 bbnote Executing ACLOCAL=\"$ACLOCAL\" autoreconf -Wcross --verbose --install --force ${EXTRA_AUTORECONF} $acpaths
234 ACLOCAL="$ACLOCAL" autoreconf -Wcross -Wno-obsolete --verbose --install --force ${EXTRA_AUTORECONF} $acpaths || die "autoreconf execution failed."
235 cd $olddir
236 fi
237 if [ -e ${CONFIGURE_SCRIPT} ]; then
238 oe_runconf
239 else
240 bbnote "nothing to configure"
241 fi
242}
243
244autotools_do_compile() {
245 oe_runmake
246}
247
248autotools_do_install() {
249 oe_runmake 'DESTDIR=${D}' install
250 # Info dir listing isn't interesting at this point so remove it if it exists.
251 if [ -e "${D}${infodir}/dir" ]; then
252 rm -f ${D}${infodir}/dir
253 fi
254}
255
256inherit siteconfig
257
258EXPORT_FUNCTIONS do_configure do_compile do_install
259
260B = "${WORKDIR}/build"
diff --git a/meta/classes/baremetal-image.bbclass b/meta/classes/baremetal-image.bbclass
deleted file mode 100644
index 3a979f2ed1..0000000000
--- a/meta/classes/baremetal-image.bbclass
+++ /dev/null
@@ -1,128 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# Baremetal image class
8#
9# This class is meant to be inherited by recipes for baremetal/RTOS applications
10# It contains code that would be used by all of them, every recipe just needs to
11# override certain variables.
12#
13# For scalability purposes, code within this class focuses on the "image" wiring
14# to satisfy the OpenEmbedded image creation and testing infrastructure.
15#
16# See meta-skeleton for a working example.
17
18
19# Toolchain should be baremetal or newlib based.
20# TCLIBC="baremetal" or TCLIBC="newlib"
21COMPATIBLE_HOST:libc-musl:class-target = "null"
22COMPATIBLE_HOST:libc-glibc:class-target = "null"
23
24
25inherit rootfs-postcommands
26
27# Set some defaults, but these should be overriden by each recipe if required
28IMGDEPLOYDIR ?= "${WORKDIR}/deploy-${PN}-image-complete"
29BAREMETAL_BINNAME ?= "hello_baremetal_${MACHINE}"
30IMAGE_LINK_NAME ?= "baremetal-helloworld-image-${MACHINE}"
31IMAGE_NAME_SUFFIX ?= ""
32
33do_rootfs[dirs] = "${IMGDEPLOYDIR} ${DEPLOY_DIR_IMAGE}"
34
35do_image(){
36 install ${D}/${base_libdir}/firmware/${BAREMETAL_BINNAME}.bin ${IMGDEPLOYDIR}/${IMAGE_LINK_NAME}.bin
37 install ${D}/${base_libdir}/firmware/${BAREMETAL_BINNAME}.elf ${IMGDEPLOYDIR}/${IMAGE_LINK_NAME}.elf
38}
39
40do_image_complete(){
41 :
42}
43
44python do_rootfs(){
45 from oe.utils import execute_pre_post_process
46 from pathlib import Path
47
48 # Write empty manifest file to satisfy test infrastructure
49 deploy_dir = d.getVar('IMGDEPLOYDIR')
50 link_name = d.getVar('IMAGE_LINK_NAME')
51 manifest_name = d.getVar('IMAGE_MANIFEST')
52
53 Path(manifest_name).touch()
54 if os.path.exists(manifest_name) and link_name:
55 manifest_link = deploy_dir + "/" + link_name + ".manifest"
56 if manifest_link != manifest_name:
57 if os.path.lexists(manifest_link):
58 os.remove(manifest_link)
59 os.symlink(os.path.basename(manifest_name), manifest_link)
60 # A lot of postprocess commands assume the existence of rootfs/etc
61 sysconfdir = d.getVar("IMAGE_ROOTFS") + d.getVar('sysconfdir')
62 bb.utils.mkdirhier(sysconfdir)
63
64 execute_pre_post_process(d, d.getVar('ROOTFS_POSTPROCESS_COMMAND'))
65}
66
67
68# Assure binaries, manifest and qemubootconf are populated on DEPLOY_DIR_IMAGE
69do_image_complete[dirs] = "${TOPDIR}"
70SSTATETASKS += "do_image_complete"
71SSTATE_SKIP_CREATION:task-image-complete = '1'
72do_image_complete[sstate-inputdirs] = "${IMGDEPLOYDIR}"
73do_image_complete[sstate-outputdirs] = "${DEPLOY_DIR_IMAGE}"
74do_image_complete[stamp-extra-info] = "${MACHINE_ARCH}"
75addtask do_image_complete after do_image before do_build
76
77python do_image_complete_setscene () {
78 sstate_setscene(d)
79}
80addtask do_image_complete_setscene
81
82# QEMU generic Baremetal/RTOS parameters
83QB_DEFAULT_KERNEL ?= "${IMAGE_LINK_NAME}.bin"
84QB_MEM ?= "-m 256"
85QB_DEFAULT_FSTYPE ?= "bin"
86QB_DTB ?= ""
87QB_OPT_APPEND:append = " -nographic"
88
89# RISC-V tunes set the BIOS, unset, and instruct QEMU to
90# ignore the BIOS and boot from -kernel
91QB_DEFAULT_BIOS:qemuriscv64 = ""
92QB_DEFAULT_BIOS:qemuriscv32 = ""
93QB_OPT_APPEND:append:qemuriscv64 = " -bios none"
94QB_OPT_APPEND:append:qemuriscv32 = " -bios none"
95
96
97# Use the medium-any code model for the RISC-V 64 bit implementation,
98# since medlow can only access addresses below 0x80000000 and RAM
99# starts at 0x80000000 on RISC-V 64
100# Keep RISC-V 32 using -mcmodel=medlow (symbols lie between -2GB:2GB)
101CFLAGS:append:qemuriscv64 = " -mcmodel=medany"
102
103
104# This next part is necessary to trick the build system into thinking
105# its building an image recipe so it generates the qemuboot.conf
106addtask do_rootfs before do_image after do_install
107addtask do_image after do_rootfs before do_image_complete
108addtask do_image_complete after do_image before do_build
109inherit qemuboot
110
111# Based on image.bbclass to make sure we build qemu
112python(){
113 # do_addto_recipe_sysroot doesnt exist for all recipes, but we need it to have
114 # /usr/bin on recipe-sysroot (qemu) populated
115 # The do_addto_recipe_sysroot dependency is coming from EXTRA_IMAGDEPENDS now,
116 # we just need to add the logic to add its dependency to do_image.
117 def extraimage_getdepends(task):
118 deps = ""
119 for dep in (d.getVar('EXTRA_IMAGEDEPENDS') or "").split():
120 # Make sure we only add it for qemu
121 if 'qemu' in dep:
122 if ":" in dep:
123 deps += " %s " % (dep)
124 else:
125 deps += " %s:%s" % (dep, task)
126 return deps
127 d.appendVarFlag('do_image', 'depends', extraimage_getdepends('do_populate_sysroot'))
128}
diff --git a/meta/classes/base.bbclass b/meta/classes/base.bbclass
deleted file mode 100644
index 8203f54519..0000000000
--- a/meta/classes/base.bbclass
+++ /dev/null
@@ -1,789 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7BB_DEFAULT_TASK ?= "build"
8CLASSOVERRIDE ?= "class-target"
9
10inherit patch
11inherit staging
12
13inherit mirrors
14inherit utils
15inherit utility-tasks
16inherit logging
17
18OE_EXTRA_IMPORTS ?= ""
19
20OE_IMPORTS += "os sys time oe.path oe.utils oe.types oe.package oe.packagegroup oe.sstatesig oe.lsb oe.cachedpath oe.license oe.qa oe.reproducible oe.rust oe.buildcfg ${OE_EXTRA_IMPORTS}"
21OE_IMPORTS[type] = "list"
22
23PACKAGECONFIG_CONFARGS ??= ""
24
25def oe_import(d):
26 import sys
27
28 bbpath = [os.path.join(dir, "lib") for dir in d.getVar("BBPATH").split(":")]
29 sys.path[0:0] = [dir for dir in bbpath if dir not in sys.path]
30
31 import oe.data
32 for toimport in oe.data.typed_value("OE_IMPORTS", d):
33 try:
34 # Make a python object accessible from the metadata
35 bb.utils._context[toimport.split(".", 1)[0]] = __import__(toimport)
36 except AttributeError as e:
37 bb.error("Error importing OE modules: %s" % str(e))
38 return ""
39
40# We need the oe module name space early (before INHERITs get added)
41OE_IMPORTED := "${@oe_import(d)}"
42
43inherit metadata_scm
44
45def lsb_distro_identifier(d):
46 adjust = d.getVar('LSB_DISTRO_ADJUST')
47 adjust_func = None
48 if adjust:
49 try:
50 adjust_func = globals()[adjust]
51 except KeyError:
52 pass
53 return oe.lsb.distro_identifier(adjust_func)
54
55die() {
56 bbfatal_log "$*"
57}
58
59oe_runmake_call() {
60 bbnote ${MAKE} ${EXTRA_OEMAKE} "$@"
61 ${MAKE} ${EXTRA_OEMAKE} "$@"
62}
63
64oe_runmake() {
65 oe_runmake_call "$@" || die "oe_runmake failed"
66}
67
68
69def get_base_dep(d):
70 if d.getVar('INHIBIT_DEFAULT_DEPS', False):
71 return ""
72 return "${BASE_DEFAULT_DEPS}"
73
74BASE_DEFAULT_DEPS = "virtual/${HOST_PREFIX}gcc virtual/${HOST_PREFIX}compilerlibs virtual/libc"
75
76BASEDEPENDS = ""
77BASEDEPENDS:class-target = "${@get_base_dep(d)}"
78BASEDEPENDS:class-nativesdk = "${@get_base_dep(d)}"
79
80DEPENDS:prepend="${BASEDEPENDS} "
81
82FILESPATH = "${@base_set_filespath(["${FILE_DIRNAME}/${BP}", "${FILE_DIRNAME}/${BPN}", "${FILE_DIRNAME}/files"], d)}"
83# THISDIR only works properly with imediate expansion as it has to run
84# in the context of the location its used (:=)
85THISDIR = "${@os.path.dirname(d.getVar('FILE'))}"
86
87def extra_path_elements(d):
88 path = ""
89 elements = (d.getVar('EXTRANATIVEPATH') or "").split()
90 for e in elements:
91 path = path + "${STAGING_BINDIR_NATIVE}/" + e + ":"
92 return path
93
94PATH:prepend = "${@extra_path_elements(d)}"
95
96def get_lic_checksum_file_list(d):
97 filelist = []
98 lic_files = d.getVar("LIC_FILES_CHKSUM") or ''
99 tmpdir = d.getVar("TMPDIR")
100 s = d.getVar("S")
101 b = d.getVar("B")
102 workdir = d.getVar("WORKDIR")
103
104 urls = lic_files.split()
105 for url in urls:
106 # We only care about items that are absolute paths since
107 # any others should be covered by SRC_URI.
108 try:
109 (method, host, path, user, pswd, parm) = bb.fetch.decodeurl(url)
110 if method != "file" or not path:
111 raise bb.fetch.MalformedUrl(url)
112
113 if path[0] == '/':
114 if path.startswith((tmpdir, s, b, workdir)):
115 continue
116 filelist.append(path + ":" + str(os.path.exists(path)))
117 except bb.fetch.MalformedUrl:
118 bb.fatal(d.getVar('PN') + ": LIC_FILES_CHKSUM contains an invalid URL: " + url)
119 return " ".join(filelist)
120
121def setup_hosttools_dir(dest, toolsvar, d, fatal=True):
122 tools = d.getVar(toolsvar).split()
123 origbbenv = d.getVar("BB_ORIGENV", False)
124 path = origbbenv.getVar("PATH")
125 # Need to ignore our own scripts directories to avoid circular links
126 for p in path.split(":"):
127 if p.endswith("/scripts"):
128 path = path.replace(p, "/ignoreme")
129 bb.utils.mkdirhier(dest)
130 notfound = []
131 for tool in tools:
132 desttool = os.path.join(dest, tool)
133 if not os.path.exists(desttool):
134 # clean up dead symlink
135 if os.path.islink(desttool):
136 os.unlink(desttool)
137 srctool = bb.utils.which(path, tool, executable=True)
138 # gcc/g++ may link to ccache on some hosts, e.g.,
139 # /usr/local/bin/ccache/gcc -> /usr/bin/ccache, then which(gcc)
140 # would return /usr/local/bin/ccache/gcc, but what we need is
141 # /usr/bin/gcc, this code can check and fix that.
142 if "ccache" in srctool:
143 srctool = bb.utils.which(path, tool, executable=True, direction=1)
144 if srctool:
145 os.symlink(srctool, desttool)
146 else:
147 notfound.append(tool)
148
149 if notfound and fatal:
150 bb.fatal("The following required tools (as specified by HOSTTOOLS) appear to be unavailable in PATH, please install them in order to proceed:\n %s" % " ".join(notfound))
151
152addtask fetch
153do_fetch[dirs] = "${DL_DIR}"
154do_fetch[file-checksums] = "${@bb.fetch.get_checksum_file_list(d)}"
155do_fetch[file-checksums] += " ${@get_lic_checksum_file_list(d)}"
156do_fetch[vardeps] += "SRCREV"
157do_fetch[network] = "1"
158python base_do_fetch() {
159
160 src_uri = (d.getVar('SRC_URI') or "").split()
161 if not src_uri:
162 return
163
164 try:
165 fetcher = bb.fetch2.Fetch(src_uri, d)
166 fetcher.download()
167 except bb.fetch2.BBFetchException as e:
168 bb.fatal("Bitbake Fetcher Error: " + repr(e))
169}
170
171addtask unpack after do_fetch
172do_unpack[dirs] = "${WORKDIR}"
173
174do_unpack[cleandirs] = "${@d.getVar('S') if os.path.normpath(d.getVar('S')) != os.path.normpath(d.getVar('WORKDIR')) else os.path.join('${S}', 'patches')}"
175
176python base_do_unpack() {
177 src_uri = (d.getVar('SRC_URI') or "").split()
178 if not src_uri:
179 return
180
181 try:
182 fetcher = bb.fetch2.Fetch(src_uri, d)
183 fetcher.unpack(d.getVar('WORKDIR'))
184 except bb.fetch2.BBFetchException as e:
185 bb.fatal("Bitbake Fetcher Error: " + repr(e))
186}
187
188SSTATETASKS += "do_deploy_source_date_epoch"
189
190do_deploy_source_date_epoch () {
191 mkdir -p ${SDE_DEPLOYDIR}
192 if [ -e ${SDE_FILE} ]; then
193 echo "Deploying SDE from ${SDE_FILE} -> ${SDE_DEPLOYDIR}."
194 cp -p ${SDE_FILE} ${SDE_DEPLOYDIR}/__source_date_epoch.txt
195 else
196 echo "${SDE_FILE} not found!"
197 fi
198}
199
200python do_deploy_source_date_epoch_setscene () {
201 sstate_setscene(d)
202 bb.utils.mkdirhier(d.getVar('SDE_DIR'))
203 sde_file = os.path.join(d.getVar('SDE_DEPLOYDIR'), '__source_date_epoch.txt')
204 if os.path.exists(sde_file):
205 target = d.getVar('SDE_FILE')
206 bb.debug(1, "Moving setscene SDE file %s -> %s" % (sde_file, target))
207 bb.utils.rename(sde_file, target)
208 else:
209 bb.debug(1, "%s not found!" % sde_file)
210}
211
212do_deploy_source_date_epoch[dirs] = "${SDE_DEPLOYDIR}"
213do_deploy_source_date_epoch[sstate-plaindirs] = "${SDE_DEPLOYDIR}"
214addtask do_deploy_source_date_epoch_setscene
215addtask do_deploy_source_date_epoch before do_configure after do_patch
216
217python create_source_date_epoch_stamp() {
218 # Version: 1
219 source_date_epoch = oe.reproducible.get_source_date_epoch(d, d.getVar('S'))
220 oe.reproducible.epochfile_write(source_date_epoch, d.getVar('SDE_FILE'), d)
221}
222do_unpack[postfuncs] += "create_source_date_epoch_stamp"
223
224def get_source_date_epoch_value(d):
225 return oe.reproducible.epochfile_read(d.getVar('SDE_FILE'), d)
226
227def get_layers_branch_rev(d):
228 revisions = oe.buildcfg.get_layer_revisions(d)
229 layers_branch_rev = ["%-20s = \"%s:%s\"" % (r[1], r[2], r[3]) for r in revisions]
230 i = len(layers_branch_rev)-1
231 p1 = layers_branch_rev[i].find("=")
232 s1 = layers_branch_rev[i][p1:]
233 while i > 0:
234 p2 = layers_branch_rev[i-1].find("=")
235 s2= layers_branch_rev[i-1][p2:]
236 if s1 == s2:
237 layers_branch_rev[i-1] = layers_branch_rev[i-1][0:p2]
238 i -= 1
239 else:
240 i -= 1
241 p1 = layers_branch_rev[i].find("=")
242 s1= layers_branch_rev[i][p1:]
243 return layers_branch_rev
244
245
246BUILDCFG_FUNCS ??= "buildcfg_vars get_layers_branch_rev buildcfg_neededvars"
247BUILDCFG_FUNCS[type] = "list"
248
249def buildcfg_vars(d):
250 statusvars = oe.data.typed_value('BUILDCFG_VARS', d)
251 for var in statusvars:
252 value = d.getVar(var)
253 if value is not None:
254 yield '%-20s = "%s"' % (var, value)
255
256def buildcfg_neededvars(d):
257 needed_vars = oe.data.typed_value("BUILDCFG_NEEDEDVARS", d)
258 pesteruser = []
259 for v in needed_vars:
260 val = d.getVar(v)
261 if not val or val == 'INVALID':
262 pesteruser.append(v)
263
264 if pesteruser:
265 bb.fatal('The following variable(s) were not set: %s\nPlease set them directly, or choose a MACHINE or DISTRO that sets them.' % ', '.join(pesteruser))
266
267addhandler base_eventhandler
268base_eventhandler[eventmask] = "bb.event.ConfigParsed bb.event.MultiConfigParsed bb.event.BuildStarted bb.event.RecipePreFinalise bb.event.RecipeParsed"
269python base_eventhandler() {
270 import bb.runqueue
271
272 if isinstance(e, bb.event.ConfigParsed):
273 if not d.getVar("NATIVELSBSTRING", False):
274 d.setVar("NATIVELSBSTRING", lsb_distro_identifier(d))
275 d.setVar("ORIGNATIVELSBSTRING", d.getVar("NATIVELSBSTRING", False))
276 d.setVar('BB_VERSION', bb.__version__)
277
278 # There might be no bb.event.ConfigParsed event if bitbake server is
279 # running, so check bb.event.BuildStarted too to make sure ${HOSTTOOLS_DIR}
280 # exists.
281 if isinstance(e, bb.event.ConfigParsed) or \
282 (isinstance(e, bb.event.BuildStarted) and not os.path.exists(d.getVar('HOSTTOOLS_DIR'))):
283 # Works with the line in layer.conf which changes PATH to point here
284 setup_hosttools_dir(d.getVar('HOSTTOOLS_DIR'), 'HOSTTOOLS', d)
285 setup_hosttools_dir(d.getVar('HOSTTOOLS_DIR'), 'HOSTTOOLS_NONFATAL', d, fatal=False)
286
287 if isinstance(e, bb.event.MultiConfigParsed):
288 # We need to expand SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS in each of the multiconfig data stores
289 # own contexts so the variables get expanded correctly for that arch, then inject back into
290 # the main data store.
291 deps = []
292 for config in e.mcdata:
293 deps.append(e.mcdata[config].getVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS"))
294 deps = " ".join(deps)
295 e.mcdata[''].setVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS", deps)
296
297 if isinstance(e, bb.event.BuildStarted):
298 localdata = bb.data.createCopy(d)
299 statuslines = []
300 for func in oe.data.typed_value('BUILDCFG_FUNCS', localdata):
301 g = globals()
302 if func not in g:
303 bb.warn("Build configuration function '%s' does not exist" % func)
304 else:
305 flines = g[func](localdata)
306 if flines:
307 statuslines.extend(flines)
308
309 statusheader = d.getVar('BUILDCFG_HEADER')
310 if statusheader:
311 bb.plain('\n%s\n%s\n' % (statusheader, '\n'.join(statuslines)))
312
313 # This code is to silence warnings where the SDK variables overwrite the
314 # target ones and we'd see dulpicate key names overwriting each other
315 # for various PREFERRED_PROVIDERS
316 if isinstance(e, bb.event.RecipePreFinalise):
317 if d.getVar("TARGET_PREFIX") == d.getVar("SDK_PREFIX"):
318 d.delVar("PREFERRED_PROVIDER_virtual/${TARGET_PREFIX}binutils")
319 d.delVar("PREFERRED_PROVIDER_virtual/${TARGET_PREFIX}gcc")
320 d.delVar("PREFERRED_PROVIDER_virtual/${TARGET_PREFIX}g++")
321 d.delVar("PREFERRED_PROVIDER_virtual/${TARGET_PREFIX}compilerlibs")
322
323 if isinstance(e, bb.event.RecipeParsed):
324 #
325 # If we have multiple providers of virtual/X and a PREFERRED_PROVIDER_virtual/X is set
326 # skip parsing for all the other providers which will mean they get uninstalled from the
327 # sysroot since they're now "unreachable". This makes switching virtual/kernel work in
328 # particular.
329 #
330 pn = d.getVar('PN')
331 source_mirror_fetch = d.getVar('SOURCE_MIRROR_FETCH', False)
332 if not source_mirror_fetch:
333 provs = (d.getVar("PROVIDES") or "").split()
334 multiprovidersallowed = (d.getVar("BB_MULTI_PROVIDER_ALLOWED") or "").split()
335 for p in provs:
336 if p.startswith("virtual/") and p not in multiprovidersallowed:
337 profprov = d.getVar("PREFERRED_PROVIDER_" + p)
338 if profprov and pn != profprov:
339 raise bb.parse.SkipRecipe("PREFERRED_PROVIDER_%s set to %s, not %s" % (p, profprov, pn))
340}
341
342CONFIGURESTAMPFILE = "${WORKDIR}/configure.sstate"
343CLEANBROKEN = "0"
344
345addtask configure after do_patch
346do_configure[dirs] = "${B}"
347base_do_configure() {
348 if [ -n "${CONFIGURESTAMPFILE}" -a -e "${CONFIGURESTAMPFILE}" ]; then
349 if [ "`cat ${CONFIGURESTAMPFILE}`" != "${BB_TASKHASH}" ]; then
350 cd ${B}
351 if [ "${CLEANBROKEN}" != "1" -a \( -e Makefile -o -e makefile -o -e GNUmakefile \) ]; then
352 oe_runmake clean
353 fi
354 # -ignore_readdir_race does not work correctly with -delete;
355 # use xargs to avoid spurious build failures
356 find ${B} -ignore_readdir_race -name \*.la -type f -print0 | xargs -0 rm -f
357 fi
358 fi
359 if [ -n "${CONFIGURESTAMPFILE}" ]; then
360 mkdir -p `dirname ${CONFIGURESTAMPFILE}`
361 echo ${BB_TASKHASH} > ${CONFIGURESTAMPFILE}
362 fi
363}
364
365addtask compile after do_configure
366do_compile[dirs] = "${B}"
367base_do_compile() {
368 if [ -e Makefile -o -e makefile -o -e GNUmakefile ]; then
369 oe_runmake || die "make failed"
370 else
371 bbnote "nothing to compile"
372 fi
373}
374
375addtask install after do_compile
376do_install[dirs] = "${B}"
377# Remove and re-create ${D} so that is it guaranteed to be empty
378do_install[cleandirs] = "${D}"
379
380base_do_install() {
381 :
382}
383
384base_do_package() {
385 :
386}
387
388addtask build after do_populate_sysroot
389do_build[noexec] = "1"
390do_build[recrdeptask] += "do_deploy"
391do_build () {
392 :
393}
394
395def set_packagetriplet(d):
396 archs = []
397 tos = []
398 tvs = []
399
400 archs.append(d.getVar("PACKAGE_ARCHS").split())
401 tos.append(d.getVar("TARGET_OS"))
402 tvs.append(d.getVar("TARGET_VENDOR"))
403
404 def settriplet(d, varname, archs, tos, tvs):
405 triplets = []
406 for i in range(len(archs)):
407 for arch in archs[i]:
408 triplets.append(arch + tvs[i] + "-" + tos[i])
409 triplets.reverse()
410 d.setVar(varname, " ".join(triplets))
411
412 settriplet(d, "PKGTRIPLETS", archs, tos, tvs)
413
414 variants = d.getVar("MULTILIB_VARIANTS") or ""
415 for item in variants.split():
416 localdata = bb.data.createCopy(d)
417 overrides = localdata.getVar("OVERRIDES", False) + ":virtclass-multilib-" + item
418 localdata.setVar("OVERRIDES", overrides)
419
420 archs.append(localdata.getVar("PACKAGE_ARCHS").split())
421 tos.append(localdata.getVar("TARGET_OS"))
422 tvs.append(localdata.getVar("TARGET_VENDOR"))
423
424 settriplet(d, "PKGMLTRIPLETS", archs, tos, tvs)
425
426python () {
427 import string, re
428
429 # Handle backfilling
430 oe.utils.features_backfill("DISTRO_FEATURES", d)
431 oe.utils.features_backfill("MACHINE_FEATURES", d)
432
433 if d.getVar("S")[-1] == '/':
434 bb.warn("Recipe %s sets S variable with trailing slash '%s', remove it" % (d.getVar("PN"), d.getVar("S")))
435 if d.getVar("B")[-1] == '/':
436 bb.warn("Recipe %s sets B variable with trailing slash '%s', remove it" % (d.getVar("PN"), d.getVar("B")))
437
438 if os.path.normpath(d.getVar("WORKDIR")) != os.path.normpath(d.getVar("S")):
439 d.appendVar("PSEUDO_IGNORE_PATHS", ",${S}")
440 if os.path.normpath(d.getVar("WORKDIR")) != os.path.normpath(d.getVar("B")):
441 d.appendVar("PSEUDO_IGNORE_PATHS", ",${B}")
442
443 # To add a recipe to the skip list , set:
444 # SKIP_RECIPE[pn] = "message"
445 pn = d.getVar('PN')
446 skip_msg = d.getVarFlag('SKIP_RECIPE', pn)
447 if skip_msg:
448 bb.debug(1, "Skipping %s %s" % (pn, skip_msg))
449 raise bb.parse.SkipRecipe("Recipe will be skipped because: %s" % (skip_msg))
450
451 # Handle PACKAGECONFIG
452 #
453 # These take the form:
454 #
455 # PACKAGECONFIG ??= "<default options>"
456 # PACKAGECONFIG[foo] = "--enable-foo,--disable-foo,foo_depends,foo_runtime_depends,foo_runtime_recommends,foo_conflict_packageconfig"
457 pkgconfigflags = d.getVarFlags("PACKAGECONFIG") or {}
458 if pkgconfigflags:
459 pkgconfig = (d.getVar('PACKAGECONFIG') or "").split()
460 pn = d.getVar("PN")
461
462 mlprefix = d.getVar("MLPREFIX")
463
464 def expandFilter(appends, extension, prefix):
465 appends = bb.utils.explode_deps(d.expand(" ".join(appends)))
466 newappends = []
467 for a in appends:
468 if a.endswith("-native") or ("-cross-" in a):
469 newappends.append(a)
470 elif a.startswith("virtual/"):
471 subs = a.split("/", 1)[1]
472 if subs.startswith(prefix):
473 newappends.append(a + extension)
474 else:
475 newappends.append("virtual/" + prefix + subs + extension)
476 else:
477 if a.startswith(prefix):
478 newappends.append(a + extension)
479 else:
480 newappends.append(prefix + a + extension)
481 return newappends
482
483 def appendVar(varname, appends):
484 if not appends:
485 return
486 if varname.find("DEPENDS") != -1:
487 if bb.data.inherits_class('nativesdk', d) or bb.data.inherits_class('cross-canadian', d) :
488 appends = expandFilter(appends, "", "nativesdk-")
489 elif bb.data.inherits_class('native', d):
490 appends = expandFilter(appends, "-native", "")
491 elif mlprefix:
492 appends = expandFilter(appends, "", mlprefix)
493 varname = d.expand(varname)
494 d.appendVar(varname, " " + " ".join(appends))
495
496 extradeps = []
497 extrardeps = []
498 extrarrecs = []
499 extraconf = []
500 for flag, flagval in sorted(pkgconfigflags.items()):
501 items = flagval.split(",")
502 num = len(items)
503 if num > 6:
504 bb.error("%s: PACKAGECONFIG[%s] Only enable,disable,depend,rdepend,rrecommend,conflict_packageconfig can be specified!"
505 % (d.getVar('PN'), flag))
506
507 if flag in pkgconfig:
508 if num >= 3 and items[2]:
509 extradeps.append(items[2])
510 if num >= 4 and items[3]:
511 extrardeps.append(items[3])
512 if num >= 5 and items[4]:
513 extrarrecs.append(items[4])
514 if num >= 1 and items[0]:
515 extraconf.append(items[0])
516 elif num >= 2 and items[1]:
517 extraconf.append(items[1])
518
519 if num >= 6 and items[5]:
520 conflicts = set(items[5].split())
521 invalid = conflicts.difference(set(pkgconfigflags.keys()))
522 if invalid:
523 bb.error("%s: PACKAGECONFIG[%s] Invalid conflict package config%s '%s' specified."
524 % (d.getVar('PN'), flag, 's' if len(invalid) > 1 else '', ' '.join(invalid)))
525
526 if flag in pkgconfig:
527 intersec = conflicts.intersection(set(pkgconfig))
528 if intersec:
529 bb.fatal("%s: PACKAGECONFIG[%s] Conflict package config%s '%s' set in PACKAGECONFIG."
530 % (d.getVar('PN'), flag, 's' if len(intersec) > 1 else '', ' '.join(intersec)))
531
532 appendVar('DEPENDS', extradeps)
533 appendVar('RDEPENDS:${PN}', extrardeps)
534 appendVar('RRECOMMENDS:${PN}', extrarrecs)
535 appendVar('PACKAGECONFIG_CONFARGS', extraconf)
536
537 pn = d.getVar('PN')
538 license = d.getVar('LICENSE')
539 if license == "INVALID" and pn != "defaultpkgname":
540 bb.fatal('This recipe does not have the LICENSE field set (%s)' % pn)
541
542 if bb.data.inherits_class('license', d):
543 check_license_format(d)
544 unmatched_license_flags = check_license_flags(d)
545 if unmatched_license_flags:
546 if len(unmatched_license_flags) == 1:
547 message = "because it has a restricted license '{0}'. Which is not listed in LICENSE_FLAGS_ACCEPTED".format(unmatched_license_flags[0])
548 else:
549 message = "because it has restricted licenses {0}. Which are not listed in LICENSE_FLAGS_ACCEPTED".format(
550 ", ".join("'{0}'".format(f) for f in unmatched_license_flags))
551 bb.debug(1, "Skipping %s %s" % (pn, message))
552 raise bb.parse.SkipRecipe(message)
553
554 # If we're building a target package we need to use fakeroot (pseudo)
555 # in order to capture permissions, owners, groups and special files
556 if not bb.data.inherits_class('native', d) and not bb.data.inherits_class('cross', d):
557 d.appendVarFlag('do_prepare_recipe_sysroot', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
558 d.appendVarFlag('do_install', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
559 d.setVarFlag('do_install', 'fakeroot', '1')
560 d.appendVarFlag('do_package', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
561 d.setVarFlag('do_package', 'fakeroot', '1')
562 d.setVarFlag('do_package_setscene', 'fakeroot', '1')
563 d.appendVarFlag('do_package_setscene', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
564 d.setVarFlag('do_devshell', 'fakeroot', '1')
565 d.appendVarFlag('do_devshell', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
566
567 need_machine = d.getVar('COMPATIBLE_MACHINE')
568 if need_machine and not d.getVar('PARSE_ALL_RECIPES', False):
569 import re
570 compat_machines = (d.getVar('MACHINEOVERRIDES') or "").split(":")
571 for m in compat_machines:
572 if re.match(need_machine, m):
573 break
574 else:
575 raise bb.parse.SkipRecipe("incompatible with machine %s (not in COMPATIBLE_MACHINE)" % d.getVar('MACHINE'))
576
577 source_mirror_fetch = d.getVar('SOURCE_MIRROR_FETCH', False) or d.getVar('PARSE_ALL_RECIPES', False)
578 if not source_mirror_fetch:
579 need_host = d.getVar('COMPATIBLE_HOST')
580 if need_host:
581 import re
582 this_host = d.getVar('HOST_SYS')
583 if not re.match(need_host, this_host):
584 raise bb.parse.SkipRecipe("incompatible with host %s (not in COMPATIBLE_HOST)" % this_host)
585
586 bad_licenses = (d.getVar('INCOMPATIBLE_LICENSE') or "").split()
587
588 check_license = False if pn.startswith("nativesdk-") else True
589 for t in ["-native", "-cross-${TARGET_ARCH}", "-cross-initial-${TARGET_ARCH}",
590 "-crosssdk-${SDK_SYS}", "-crosssdk-initial-${SDK_SYS}",
591 "-cross-canadian-${TRANSLATED_TARGET_ARCH}"]:
592 if pn.endswith(d.expand(t)):
593 check_license = False
594 if pn.startswith("gcc-source-"):
595 check_license = False
596
597 if check_license and bad_licenses:
598 bad_licenses = expand_wildcard_licenses(d, bad_licenses)
599
600 exceptions = (d.getVar("INCOMPATIBLE_LICENSE_EXCEPTIONS") or "").split()
601
602 for lic_exception in exceptions:
603 if ":" in lic_exception:
604 lic_exception = lic_exception.split(":")[1]
605 if lic_exception in oe.license.obsolete_license_list():
606 bb.fatal("Obsolete license %s used in INCOMPATIBLE_LICENSE_EXCEPTIONS" % lic_exception)
607
608 pkgs = d.getVar('PACKAGES').split()
609 skipped_pkgs = {}
610 unskipped_pkgs = []
611 for pkg in pkgs:
612 remaining_bad_licenses = oe.license.apply_pkg_license_exception(pkg, bad_licenses, exceptions)
613
614 incompatible_lic = incompatible_license(d, remaining_bad_licenses, pkg)
615 if incompatible_lic:
616 skipped_pkgs[pkg] = incompatible_lic
617 else:
618 unskipped_pkgs.append(pkg)
619
620 if unskipped_pkgs:
621 for pkg in skipped_pkgs:
622 bb.debug(1, "Skipping the package %s at do_rootfs because of incompatible license(s): %s" % (pkg, ' '.join(skipped_pkgs[pkg])))
623 d.setVar('_exclude_incompatible-' + pkg, ' '.join(skipped_pkgs[pkg]))
624 for pkg in unskipped_pkgs:
625 bb.debug(1, "Including the package %s" % pkg)
626 else:
627 incompatible_lic = incompatible_license(d, bad_licenses)
628 for pkg in skipped_pkgs:
629 incompatible_lic += skipped_pkgs[pkg]
630 incompatible_lic = sorted(list(set(incompatible_lic)))
631
632 if incompatible_lic:
633 bb.debug(1, "Skipping recipe %s because of incompatible license(s): %s" % (pn, ' '.join(incompatible_lic)))
634 raise bb.parse.SkipRecipe("it has incompatible license(s): %s" % ' '.join(incompatible_lic))
635
636 needsrcrev = False
637 srcuri = d.getVar('SRC_URI')
638 for uri_string in srcuri.split():
639 uri = bb.fetch.URI(uri_string)
640 # Also check downloadfilename as the URL path might not be useful for sniffing
641 path = uri.params.get("downloadfilename", uri.path)
642
643 # HTTP/FTP use the wget fetcher
644 if uri.scheme in ("http", "https", "ftp"):
645 d.appendVarFlag('do_fetch', 'depends', ' wget-native:do_populate_sysroot')
646
647 # Svn packages should DEPEND on subversion-native
648 if uri.scheme == "svn":
649 needsrcrev = True
650 d.appendVarFlag('do_fetch', 'depends', ' subversion-native:do_populate_sysroot')
651
652 # Git packages should DEPEND on git-native
653 elif uri.scheme in ("git", "gitsm"):
654 needsrcrev = True
655 d.appendVarFlag('do_fetch', 'depends', ' git-native:do_populate_sysroot')
656
657 # Mercurial packages should DEPEND on mercurial-native
658 elif uri.scheme == "hg":
659 needsrcrev = True
660 d.appendVar("EXTRANATIVEPATH", ' python3-native ')
661 d.appendVarFlag('do_fetch', 'depends', ' mercurial-native:do_populate_sysroot')
662
663 # Perforce packages support SRCREV = "${AUTOREV}"
664 elif uri.scheme == "p4":
665 needsrcrev = True
666
667 # OSC packages should DEPEND on osc-native
668 elif uri.scheme == "osc":
669 d.appendVarFlag('do_fetch', 'depends', ' osc-native:do_populate_sysroot')
670
671 elif uri.scheme == "npm":
672 d.appendVarFlag('do_fetch', 'depends', ' nodejs-native:do_populate_sysroot')
673
674 elif uri.scheme == "repo":
675 needsrcrev = True
676 d.appendVarFlag('do_fetch', 'depends', ' repo-native:do_populate_sysroot')
677
678 # *.lz4 should DEPEND on lz4-native for unpacking
679 if path.endswith('.lz4'):
680 d.appendVarFlag('do_unpack', 'depends', ' lz4-native:do_populate_sysroot')
681
682 # *.zst should DEPEND on zstd-native for unpacking
683 elif path.endswith('.zst'):
684 d.appendVarFlag('do_unpack', 'depends', ' zstd-native:do_populate_sysroot')
685
686 # *.lz should DEPEND on lzip-native for unpacking
687 elif path.endswith('.lz'):
688 d.appendVarFlag('do_unpack', 'depends', ' lzip-native:do_populate_sysroot')
689
690 # *.xz should DEPEND on xz-native for unpacking
691 elif path.endswith('.xz') or path.endswith('.txz'):
692 d.appendVarFlag('do_unpack', 'depends', ' xz-native:do_populate_sysroot')
693
694 # .zip should DEPEND on unzip-native for unpacking
695 elif path.endswith('.zip') or path.endswith('.jar'):
696 d.appendVarFlag('do_unpack', 'depends', ' unzip-native:do_populate_sysroot')
697
698 # Some rpm files may be compressed internally using xz (for example, rpms from Fedora)
699 elif path.endswith('.rpm'):
700 d.appendVarFlag('do_unpack', 'depends', ' xz-native:do_populate_sysroot')
701
702 # *.deb should DEPEND on xz-native for unpacking
703 elif path.endswith('.deb'):
704 d.appendVarFlag('do_unpack', 'depends', ' xz-native:do_populate_sysroot')
705
706 if needsrcrev:
707 d.setVar("SRCPV", "${@bb.fetch2.get_srcrev(d)}")
708
709 # Gather all named SRCREVs to add to the sstate hash calculation
710 # This anonymous python snippet is called multiple times so we
711 # need to be careful to not double up the appends here and cause
712 # the base hash to mismatch the task hash
713 for uri in srcuri.split():
714 parm = bb.fetch.decodeurl(uri)[5]
715 uri_names = parm.get("name", "").split(",")
716 for uri_name in filter(None, uri_names):
717 srcrev_name = "SRCREV_{}".format(uri_name)
718 if srcrev_name not in (d.getVarFlag("do_fetch", "vardeps") or "").split():
719 d.appendVarFlag("do_fetch", "vardeps", " {}".format(srcrev_name))
720
721 set_packagetriplet(d)
722
723 # 'multimachine' handling
724 mach_arch = d.getVar('MACHINE_ARCH')
725 pkg_arch = d.getVar('PACKAGE_ARCH')
726
727 if (pkg_arch == mach_arch):
728 # Already machine specific - nothing further to do
729 return
730
731 #
732 # We always try to scan SRC_URI for urls with machine overrides
733 # unless the package sets SRC_URI_OVERRIDES_PACKAGE_ARCH=0
734 #
735 override = d.getVar('SRC_URI_OVERRIDES_PACKAGE_ARCH')
736 if override != '0':
737 paths = []
738 fpaths = (d.getVar('FILESPATH') or '').split(':')
739 machine = d.getVar('MACHINE')
740 for p in fpaths:
741 if os.path.basename(p) == machine and os.path.isdir(p):
742 paths.append(p)
743
744 if paths:
745 for s in srcuri.split():
746 if not s.startswith("file://"):
747 continue
748 fetcher = bb.fetch2.Fetch([s], d)
749 local = fetcher.localpath(s)
750 for mp in paths:
751 if local.startswith(mp):
752 #bb.note("overriding PACKAGE_ARCH from %s to %s for %s" % (pkg_arch, mach_arch, pn))
753 d.setVar('PACKAGE_ARCH', "${MACHINE_ARCH}")
754 return
755
756 packages = d.getVar('PACKAGES').split()
757 for pkg in packages:
758 pkgarch = d.getVar("PACKAGE_ARCH_%s" % pkg)
759
760 # We could look for != PACKAGE_ARCH here but how to choose
761 # if multiple differences are present?
762 # Look through PACKAGE_ARCHS for the priority order?
763 if pkgarch and pkgarch == mach_arch:
764 d.setVar('PACKAGE_ARCH', "${MACHINE_ARCH}")
765 bb.warn("Recipe %s is marked as only being architecture specific but seems to have machine specific packages?! The recipe may as well mark itself as machine specific directly." % d.getVar("PN"))
766}
767
768addtask cleansstate after do_clean
769python do_cleansstate() {
770 sstate_clean_cachefiles(d)
771}
772addtask cleanall after do_cleansstate
773do_cleansstate[nostamp] = "1"
774
775python do_cleanall() {
776 src_uri = (d.getVar('SRC_URI') or "").split()
777 if not src_uri:
778 return
779
780 try:
781 fetcher = bb.fetch2.Fetch(src_uri, d)
782 fetcher.clean()
783 except bb.fetch2.BBFetchException as e:
784 bb.fatal(str(e))
785}
786do_cleanall[nostamp] = "1"
787
788
789EXPORT_FUNCTIONS do_fetch do_unpack do_configure do_compile do_install do_package
diff --git a/meta/classes/bash-completion.bbclass b/meta/classes/bash-completion.bbclass
deleted file mode 100644
index b656e76c09..0000000000
--- a/meta/classes/bash-completion.bbclass
+++ /dev/null
@@ -1,13 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7DEPENDS:append:class-target = " bash-completion"
8
9PACKAGES += "${PN}-bash-completion"
10
11FILES:${PN}-bash-completion = "${datadir}/bash-completion ${sysconfdir}/bash_completion.d"
12
13RDEPENDS:${PN}-bash-completion = "bash-completion"
diff --git a/meta/classes/bin_package.bbclass b/meta/classes/bin_package.bbclass
deleted file mode 100644
index 3a1befc29c..0000000000
--- a/meta/classes/bin_package.bbclass
+++ /dev/null
@@ -1,42 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# Common variable and task for the binary package recipe.
8# Basic principle:
9# * The files have been unpacked to ${S} by base.bbclass
10# * Skip do_configure and do_compile
11# * Use do_install to install the files to ${D}
12#
13# Note:
14# The "subdir" parameter in the SRC_URI is useful when the input package
15# is rpm, ipk, deb and so on, for example:
16#
17# SRC_URI = "http://foo.com/foo-1.0-r1.i586.rpm;subdir=foo-1.0"
18#
19# Then the files would be unpacked to ${WORKDIR}/foo-1.0, otherwise
20# they would be in ${WORKDIR}.
21#
22
23# Skip the unwanted steps
24do_configure[noexec] = "1"
25do_compile[noexec] = "1"
26
27# Install the files to ${D}
28bin_package_do_install () {
29 # Do it carefully
30 [ -d "${S}" ] || exit 1
31 if [ -z "$(ls -A ${S})" ]; then
32 bbfatal bin_package has nothing to install. Be sure the SRC_URI unpacks into S.
33 fi
34 cd ${S}
35 install -d ${D}${base_prefix}
36 tar --no-same-owner --exclude='./patches' --exclude='./.pc' -cpf - . \
37 | tar --no-same-owner -xpf - -C ${D}${base_prefix}
38}
39
40FILES:${PN} = "/"
41
42EXPORT_FUNCTIONS do_install
diff --git a/meta/classes/binconfig-disabled.bbclass b/meta/classes/binconfig-disabled.bbclass
deleted file mode 100644
index cbe2078e0f..0000000000
--- a/meta/classes/binconfig-disabled.bbclass
+++ /dev/null
@@ -1,36 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7#
8# Class to disable binconfig files instead of installing them
9#
10
11# The list of scripts which should be disabled.
12BINCONFIG ?= ""
13
14FILES:${PN}-dev += "${bindir}/*-config"
15
16do_install:append () {
17 for x in ${BINCONFIG}; do
18 # Make the disabled script emit invalid parameters for those configure
19 # scripts which call it without checking the return code.
20 echo "#!/bin/sh" > ${D}$x
21 echo "echo 'ERROR: $x should not be used, use an alternative such as pkg-config' >&2" >> ${D}$x
22 echo "echo '--should-not-have-used-$x'" >> ${D}$x
23 echo "exit 1" >> ${D}$x
24 chmod +x ${D}$x
25 done
26}
27
28SYSROOT_PREPROCESS_FUNCS += "binconfig_disabled_sysroot_preprocess"
29
30binconfig_disabled_sysroot_preprocess () {
31 for x in ${BINCONFIG}; do
32 configname=`basename $x`
33 install -d ${SYSROOT_DESTDIR}${bindir_crossscripts}
34 install ${D}$x ${SYSROOT_DESTDIR}${bindir_crossscripts}
35 done
36}
diff --git a/meta/classes/binconfig.bbclass b/meta/classes/binconfig.bbclass
deleted file mode 100644
index 427dba7f1f..0000000000
--- a/meta/classes/binconfig.bbclass
+++ /dev/null
@@ -1,60 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7FILES:${PN}-dev += "${bindir}/*-config"
8
9# The namespaces can clash here hence the two step replace
10def get_binconfig_mangle(d):
11 s = "-e ''"
12 if not bb.data.inherits_class('native', d):
13 optional_quote = r"\(\"\?\)"
14 s += " -e 's:=%s${base_libdir}:=\\1OEBASELIBDIR:;'" % optional_quote
15 s += " -e 's:=%s${libdir}:=\\1OELIBDIR:;'" % optional_quote
16 s += " -e 's:=%s${includedir}:=\\1OEINCDIR:;'" % optional_quote
17 s += " -e 's:=%s${datadir}:=\\1OEDATADIR:'" % optional_quote
18 s += " -e 's:=%s${prefix}/:=\\1OEPREFIX/:'" % optional_quote
19 s += " -e 's:=%s${exec_prefix}/:=\\1OEEXECPREFIX/:'" % optional_quote
20 s += " -e 's:-L${libdir}:-LOELIBDIR:;'"
21 s += " -e 's:-I${includedir}:-IOEINCDIR:;'"
22 s += " -e 's:-L${WORKDIR}:-LOELIBDIR:'"
23 s += " -e 's:-I${WORKDIR}:-IOEINCDIR:'"
24 s += " -e 's:OEBASELIBDIR:${STAGING_BASELIBDIR}:;'"
25 s += " -e 's:OELIBDIR:${STAGING_LIBDIR}:;'"
26 s += " -e 's:OEINCDIR:${STAGING_INCDIR}:;'"
27 s += " -e 's:OEDATADIR:${STAGING_DATADIR}:'"
28 s += " -e 's:OEPREFIX:${STAGING_DIR_HOST}${prefix}:'"
29 s += " -e 's:OEEXECPREFIX:${STAGING_DIR_HOST}${exec_prefix}:'"
30 if d.getVar("OE_BINCONFIG_EXTRA_MANGLE", False):
31 s += d.getVar("OE_BINCONFIG_EXTRA_MANGLE")
32
33 return s
34
35BINCONFIG_GLOB ?= "*-config"
36
37PACKAGE_PREPROCESS_FUNCS += "binconfig_package_preprocess"
38
39binconfig_package_preprocess () {
40 for config in `find ${PKGD} -type f -name '${BINCONFIG_GLOB}'`; do
41 sed -i \
42 -e 's:${STAGING_BASELIBDIR}:${base_libdir}:g;' \
43 -e 's:${STAGING_LIBDIR}:${libdir}:g;' \
44 -e 's:${STAGING_INCDIR}:${includedir}:g;' \
45 -e 's:${STAGING_DATADIR}:${datadir}:' \
46 -e 's:${STAGING_DIR_HOST}${prefix}:${prefix}:' \
47 $config
48 done
49}
50
51SYSROOT_PREPROCESS_FUNCS += "binconfig_sysroot_preprocess"
52
53binconfig_sysroot_preprocess () {
54 for config in `find ${S} -type f -name '${BINCONFIG_GLOB}'` `find ${B} -type f -name '${BINCONFIG_GLOB}'`; do
55 configname=`basename $config`
56 install -d ${SYSROOT_DESTDIR}${bindir_crossscripts}
57 sed ${@get_binconfig_mangle(d)} $config > ${SYSROOT_DESTDIR}${bindir_crossscripts}/$configname
58 chmod u+x ${SYSROOT_DESTDIR}${bindir_crossscripts}/$configname
59 done
60}
diff --git a/meta/classes/buildstats.bbclass b/meta/classes/buildstats.bbclass
deleted file mode 100644
index f49a67aa4f..0000000000
--- a/meta/classes/buildstats.bbclass
+++ /dev/null
@@ -1,302 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7BUILDSTATS_BASE = "${TMPDIR}/buildstats/"
8
9################################################################################
10# Build statistics gathering.
11#
12# The CPU and Time gathering/tracking functions and bbevent inspiration
13# were written by Christopher Larson.
14#
15################################################################################
16
17def get_buildprocess_cputime(pid):
18 with open("/proc/%d/stat" % pid, "r") as f:
19 fields = f.readline().rstrip().split()
20 # 13: utime, 14: stime, 15: cutime, 16: cstime
21 return sum(int(field) for field in fields[13:16])
22
23def get_process_cputime(pid):
24 import resource
25 with open("/proc/%d/stat" % pid, "r") as f:
26 fields = f.readline().rstrip().split()
27 stats = {
28 'utime' : fields[13],
29 'stime' : fields[14],
30 'cutime' : fields[15],
31 'cstime' : fields[16],
32 }
33 iostats = {}
34 if os.path.isfile("/proc/%d/io" % pid):
35 with open("/proc/%d/io" % pid, "r") as f:
36 while True:
37 i = f.readline().strip()
38 if not i:
39 break
40 if not ":" in i:
41 # one more extra line is appended (empty or containing "0")
42 # most probably due to race condition in kernel while
43 # updating IO stats
44 break
45 i = i.split(": ")
46 iostats[i[0]] = i[1]
47 resources = resource.getrusage(resource.RUSAGE_SELF)
48 childres = resource.getrusage(resource.RUSAGE_CHILDREN)
49 return stats, iostats, resources, childres
50
51def get_cputime():
52 with open("/proc/stat", "r") as f:
53 fields = f.readline().rstrip().split()[1:]
54 return sum(int(field) for field in fields)
55
56def set_timedata(var, d, server_time):
57 d.setVar(var, server_time)
58
59def get_timedata(var, d, end_time):
60 oldtime = d.getVar(var, False)
61 if oldtime is None:
62 return
63 return end_time - oldtime
64
65def set_buildtimedata(var, d):
66 import time
67 time = time.time()
68 cputime = get_cputime()
69 proctime = get_buildprocess_cputime(os.getpid())
70 d.setVar(var, (time, cputime, proctime))
71
72def get_buildtimedata(var, d):
73 import time
74 timedata = d.getVar(var, False)
75 if timedata is None:
76 return
77 oldtime, oldcpu, oldproc = timedata
78 procdiff = get_buildprocess_cputime(os.getpid()) - oldproc
79 cpudiff = get_cputime() - oldcpu
80 end_time = time.time()
81 timediff = end_time - oldtime
82 if cpudiff > 0:
83 cpuperc = float(procdiff) * 100 / cpudiff
84 else:
85 cpuperc = None
86 return timediff, cpuperc
87
88def write_task_data(status, logfile, e, d):
89 with open(os.path.join(logfile), "a") as f:
90 elapsedtime = get_timedata("__timedata_task", d, e.time)
91 if elapsedtime:
92 f.write(d.expand("${PF}: %s\n" % e.task))
93 f.write(d.expand("Elapsed time: %0.2f seconds\n" % elapsedtime))
94 cpu, iostats, resources, childres = get_process_cputime(os.getpid())
95 if cpu:
96 f.write("utime: %s\n" % cpu['utime'])
97 f.write("stime: %s\n" % cpu['stime'])
98 f.write("cutime: %s\n" % cpu['cutime'])
99 f.write("cstime: %s\n" % cpu['cstime'])
100 for i in iostats:
101 f.write("IO %s: %s\n" % (i, iostats[i]))
102 rusages = ["ru_utime", "ru_stime", "ru_maxrss", "ru_minflt", "ru_majflt", "ru_inblock", "ru_oublock", "ru_nvcsw", "ru_nivcsw"]
103 for i in rusages:
104 f.write("rusage %s: %s\n" % (i, getattr(resources, i)))
105 for i in rusages:
106 f.write("Child rusage %s: %s\n" % (i, getattr(childres, i)))
107 if status == "passed":
108 f.write("Status: PASSED \n")
109 else:
110 f.write("Status: FAILED \n")
111 f.write("Ended: %0.2f \n" % e.time)
112
113def write_host_data(logfile, e, d, type):
114 import subprocess, os, datetime
115 # minimum time allowed for each command to run, in seconds
116 time_threshold = 0.5
117 limit = 10
118 # the total number of commands
119 num_cmds = 0
120 msg = ""
121 if type == "interval":
122 # interval at which data will be logged
123 interval = d.getVar("BB_HEARTBEAT_EVENT", False)
124 if interval is None:
125 bb.warn("buildstats: Collecting host data at intervals failed. Set BB_HEARTBEAT_EVENT=\"<interval>\" in conf/local.conf for the interval at which host data will be logged.")
126 d.setVar("BB_LOG_HOST_STAT_ON_INTERVAL", "0")
127 return
128 interval = int(interval)
129 cmds = d.getVar('BB_LOG_HOST_STAT_CMDS_INTERVAL')
130 msg = "Host Stats: Collecting data at %d second intervals.\n" % interval
131 if cmds is None:
132 d.setVar("BB_LOG_HOST_STAT_ON_INTERVAL", "0")
133 bb.warn("buildstats: Collecting host data at intervals failed. Set BB_LOG_HOST_STAT_CMDS_INTERVAL=\"command1 ; command2 ; ... \" in conf/local.conf.")
134 return
135 if type == "failure":
136 cmds = d.getVar('BB_LOG_HOST_STAT_CMDS_FAILURE')
137 msg = "Host Stats: Collecting data on failure.\n"
138 msg += "Failed at task: " + e.task + "\n"
139 if cmds is None:
140 d.setVar("BB_LOG_HOST_STAT_ON_FAILURE", "0")
141 bb.warn("buildstats: Collecting host data on failure failed. Set BB_LOG_HOST_STAT_CMDS_FAILURE=\"command1 ; command2 ; ... \" in conf/local.conf.")
142 return
143 c_san = []
144 for cmd in cmds.split(";"):
145 if len(cmd) == 0:
146 continue
147 num_cmds += 1
148 c_san.append(cmd)
149 if num_cmds == 0:
150 if type == "interval":
151 d.setVar("BB_LOG_HOST_STAT_ON_INTERVAL", "0")
152 if type == "failure":
153 d.setVar("BB_LOG_HOST_STAT_ON_FAILURE", "0")
154 return
155
156 # return if the interval is not enough to run all commands within the specified BB_HEARTBEAT_EVENT interval
157 if type == "interval":
158 limit = interval / num_cmds
159 if limit <= time_threshold:
160 d.setVar("BB_LOG_HOST_STAT_ON_INTERVAL", "0")
161 bb.warn("buildstats: Collecting host data failed. BB_HEARTBEAT_EVENT interval not enough to run the specified commands. Increase value of BB_HEARTBEAT_EVENT in conf/local.conf.")
162 return
163
164 # set the environment variables
165 path = d.getVar("PATH")
166 opath = d.getVar("BB_ORIGENV", False).getVar("PATH")
167 ospath = os.environ['PATH']
168 os.environ['PATH'] = path + ":" + opath + ":" + ospath
169 with open(logfile, "a") as f:
170 f.write("Event Time: %f\nDate: %s\n" % (e.time, datetime.datetime.now()))
171 f.write("%s" % msg)
172 for c in c_san:
173 try:
174 output = subprocess.check_output(c.split(), stderr=subprocess.STDOUT, timeout=limit).decode('utf-8')
175 except (subprocess.CalledProcessError, subprocess.TimeoutExpired, FileNotFoundError) as err:
176 output = "Error running command: %s\n%s\n" % (c, err)
177 f.write("%s\n%s\n" % (c, output))
178 # reset the environment
179 os.environ['PATH'] = ospath
180
181python run_buildstats () {
182 import bb.build
183 import bb.event
184 import time, subprocess, platform
185
186 bn = d.getVar('BUILDNAME')
187 ########################################################################
188 # bitbake fires HeartbeatEvent even before a build has been
189 # triggered, causing BUILDNAME to be None
190 ########################################################################
191 if bn is not None:
192 bsdir = os.path.join(d.getVar('BUILDSTATS_BASE'), bn)
193 taskdir = os.path.join(bsdir, d.getVar('PF'))
194 if isinstance(e, bb.event.HeartbeatEvent) and bb.utils.to_boolean(d.getVar("BB_LOG_HOST_STAT_ON_INTERVAL")):
195 bb.utils.mkdirhier(bsdir)
196 write_host_data(os.path.join(bsdir, "host_stats_interval"), e, d, "interval")
197
198 if isinstance(e, bb.event.BuildStarted):
199 ########################################################################
200 # If the kernel was not configured to provide I/O statistics, issue
201 # a one time warning.
202 ########################################################################
203 if not os.path.isfile("/proc/%d/io" % os.getpid()):
204 bb.warn("The Linux kernel on your build host was not configured to provide process I/O statistics. (CONFIG_TASK_IO_ACCOUNTING is not set)")
205
206 ########################################################################
207 # at first pass make the buildstats hierarchy and then
208 # set the buildname
209 ########################################################################
210 bb.utils.mkdirhier(bsdir)
211 set_buildtimedata("__timedata_build", d)
212 build_time = os.path.join(bsdir, "build_stats")
213 # write start of build into build_time
214 with open(build_time, "a") as f:
215 host_info = platform.uname()
216 f.write("Host Info: ")
217 for x in host_info:
218 if x:
219 f.write(x + " ")
220 f.write("\n")
221 f.write("Build Started: %0.2f \n" % d.getVar('__timedata_build', False)[0])
222
223 elif isinstance(e, bb.event.BuildCompleted):
224 build_time = os.path.join(bsdir, "build_stats")
225 with open(build_time, "a") as f:
226 ########################################################################
227 # Write build statistics for the build
228 ########################################################################
229 timedata = get_buildtimedata("__timedata_build", d)
230 if timedata:
231 time, cpu = timedata
232 # write end of build and cpu used into build_time
233 f.write("Elapsed time: %0.2f seconds \n" % (time))
234 if cpu:
235 f.write("CPU usage: %0.1f%% \n" % cpu)
236
237 if isinstance(e, bb.build.TaskStarted):
238 set_timedata("__timedata_task", d, e.time)
239 bb.utils.mkdirhier(taskdir)
240 # write into the task event file the name and start time
241 with open(os.path.join(taskdir, e.task), "a") as f:
242 f.write("Event: %s \n" % bb.event.getName(e))
243 f.write("Started: %0.2f \n" % e.time)
244
245 elif isinstance(e, bb.build.TaskSucceeded):
246 write_task_data("passed", os.path.join(taskdir, e.task), e, d)
247 if e.task == "do_rootfs":
248 bs = os.path.join(bsdir, "build_stats")
249 with open(bs, "a") as f:
250 rootfs = d.getVar('IMAGE_ROOTFS')
251 if os.path.isdir(rootfs):
252 try:
253 rootfs_size = subprocess.check_output(["du", "-sh", rootfs],
254 stderr=subprocess.STDOUT).decode('utf-8')
255 f.write("Uncompressed Rootfs size: %s" % rootfs_size)
256 except subprocess.CalledProcessError as err:
257 bb.warn("Failed to get rootfs size: %s" % err.output.decode('utf-8'))
258
259 elif isinstance(e, bb.build.TaskFailed):
260 # Can have a failure before TaskStarted so need to mkdir here too
261 bb.utils.mkdirhier(taskdir)
262 write_task_data("failed", os.path.join(taskdir, e.task), e, d)
263 ########################################################################
264 # Lets make things easier and tell people where the build failed in
265 # build_status. We do this here because BuildCompleted triggers no
266 # matter what the status of the build actually is
267 ########################################################################
268 build_status = os.path.join(bsdir, "build_stats")
269 with open(build_status, "a") as f:
270 f.write(d.expand("Failed at: ${PF} at task: %s \n" % e.task))
271 if bb.utils.to_boolean(d.getVar("BB_LOG_HOST_STAT_ON_FAILURE")):
272 write_host_data(os.path.join(bsdir, "host_stats_%s_failure" % e.task), e, d, "failure")
273}
274
275addhandler run_buildstats
276run_buildstats[eventmask] = "bb.event.BuildStarted bb.event.BuildCompleted bb.event.HeartbeatEvent bb.build.TaskStarted bb.build.TaskSucceeded bb.build.TaskFailed"
277
278python runqueue_stats () {
279 import buildstats
280 from bb import event, runqueue
281 # We should not record any samples before the first task has started,
282 # because that's the first activity shown in the process chart.
283 # Besides, at that point we are sure that the build variables
284 # are available that we need to find the output directory.
285 # The persistent SystemStats is stored in the datastore and
286 # closed when the build is done.
287 system_stats = d.getVar('_buildstats_system_stats', False)
288 if not system_stats and isinstance(e, (bb.runqueue.sceneQueueTaskStarted, bb.runqueue.runQueueTaskStarted)):
289 system_stats = buildstats.SystemStats(d)
290 d.setVar('_buildstats_system_stats', system_stats)
291 if system_stats:
292 # Ensure that we sample at important events.
293 done = isinstance(e, bb.event.BuildCompleted)
294 if system_stats.sample(e, force=done):
295 d.setVar('_buildstats_system_stats', system_stats)
296 if done:
297 system_stats.close()
298 d.delVar('_buildstats_system_stats')
299}
300
301addhandler runqueue_stats
302runqueue_stats[eventmask] = "bb.runqueue.sceneQueueTaskStarted bb.runqueue.runQueueTaskStarted bb.event.HeartbeatEvent bb.event.BuildCompleted bb.event.MonitorDiskEvent"
diff --git a/meta/classes/cargo.bbclass b/meta/classes/cargo.bbclass
deleted file mode 100644
index d1e83518b5..0000000000
--- a/meta/classes/cargo.bbclass
+++ /dev/null
@@ -1,97 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7##
8## Purpose:
9## This class is used by any recipes that are built using
10## Cargo.
11
12inherit cargo_common
13inherit rust-target-config
14
15# the binary we will use
16CARGO = "cargo"
17
18# We need cargo to compile for the target
19BASEDEPENDS:append = " cargo-native"
20
21# Ensure we get the right rust variant
22DEPENDS:append:class-target = " rust-native ${RUSTLIB_DEP}"
23DEPENDS:append:class-nativesdk = " rust-native ${RUSTLIB_DEP}"
24DEPENDS:append:class-native = " rust-native"
25
26# Enable build separation
27B = "${WORKDIR}/build"
28
29# In case something fails in the build process, give a bit more feedback on
30# where the issue occured
31export RUST_BACKTRACE = "1"
32
33# The directory of the Cargo.toml relative to the root directory, per default
34# assume there's a Cargo.toml directly in the root directory
35CARGO_SRC_DIR ??= ""
36
37# The actual path to the Cargo.toml
38MANIFEST_PATH ??= "${S}/${CARGO_SRC_DIR}/Cargo.toml"
39
40RUSTFLAGS ??= ""
41BUILD_MODE = "${@['--release', ''][d.getVar('DEBUG_BUILD') == '1']}"
42CARGO_BUILD_FLAGS = "-v --target ${RUST_HOST_SYS} ${BUILD_MODE} --manifest-path=${MANIFEST_PATH}"
43
44# This is based on the content of CARGO_BUILD_FLAGS and generally will need to
45# change if CARGO_BUILD_FLAGS changes.
46BUILD_DIR = "${@['release', 'debug'][d.getVar('DEBUG_BUILD') == '1']}"
47CARGO_TARGET_SUBDIR="${RUST_HOST_SYS}/${BUILD_DIR}"
48oe_cargo_build () {
49 export RUSTFLAGS="${RUSTFLAGS}"
50 bbnote "Using rust targets from ${RUST_TARGET_PATH}"
51 bbnote "cargo = $(which ${CARGO})"
52 bbnote "rustc = $(which ${RUSTC})"
53 bbnote "${CARGO} build ${CARGO_BUILD_FLAGS} $@"
54 "${CARGO}" build ${CARGO_BUILD_FLAGS} "$@"
55}
56
57do_compile[progress] = "outof:\s+(\d+)/(\d+)"
58cargo_do_compile () {
59 oe_cargo_fix_env
60 oe_cargo_build
61}
62
63cargo_do_install () {
64 local have_installed=false
65 for tgt in "${B}/target/${CARGO_TARGET_SUBDIR}/"*; do
66 case $tgt in
67 *.so|*.rlib)
68 install -d "${D}${rustlibdir}"
69 install -m755 "$tgt" "${D}${rustlibdir}"
70 have_installed=true
71 ;;
72 *examples)
73 if [ -d "$tgt" ]; then
74 for example in "$tgt/"*; do
75 if [ -f "$example" ] && [ -x "$example" ]; then
76 install -d "${D}${bindir}"
77 install -m755 "$example" "${D}${bindir}"
78 have_installed=true
79 fi
80 done
81 fi
82 ;;
83 *)
84 if [ -f "$tgt" ] && [ -x "$tgt" ]; then
85 install -d "${D}${bindir}"
86 install -m755 "$tgt" "${D}${bindir}"
87 have_installed=true
88 fi
89 ;;
90 esac
91 done
92 if ! $have_installed; then
93 die "Did not find anything to install"
94 fi
95}
96
97EXPORT_FUNCTIONS do_compile do_install
diff --git a/meta/classes/cargo_common.bbclass b/meta/classes/cargo_common.bbclass
deleted file mode 100644
index eec7710a4c..0000000000
--- a/meta/classes/cargo_common.bbclass
+++ /dev/null
@@ -1,139 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7##
8## Purpose:
9## This class is to support building with cargo. It
10## must be different than cargo.bbclass because Rust
11## now builds with Cargo but cannot use cargo.bbclass
12## due to dependencies and assumptions in cargo.bbclass
13## that Rust & Cargo are already installed. So this
14## is used by cargo.bbclass and Rust
15##
16
17# add crate fetch support
18inherit rust-common
19
20# Where we download our registry and dependencies to
21export CARGO_HOME = "${WORKDIR}/cargo_home"
22
23# The pkg-config-rs library used by cargo build scripts disables itself when
24# cross compiling unless this is defined. We set up pkg-config appropriately
25# for cross compilation, so tell it we know better than it.
26export PKG_CONFIG_ALLOW_CROSS = "1"
27
28# Don't instruct cargo to use crates downloaded by bitbake. Some rust packages,
29# for example the rust compiler itself, come with their own vendored sources.
30# Specifying two [source.crates-io] will not work.
31CARGO_DISABLE_BITBAKE_VENDORING ?= "0"
32
33# Used by libstd-rs to point to the vendor dir included in rustc src
34CARGO_VENDORING_DIRECTORY ?= "${CARGO_HOME}/bitbake"
35
36CARGO_RUST_TARGET_CCLD ?= "${RUST_TARGET_CCLD}"
37cargo_common_do_configure () {
38 mkdir -p ${CARGO_HOME}/bitbake
39
40 cat <<- EOF > ${CARGO_HOME}/config
41 # EXTRA_OECARGO_PATHS
42 paths = [
43 $(for p in ${EXTRA_OECARGO_PATHS}; do echo \"$p\",; done)
44 ]
45 EOF
46
47 cat <<- EOF >> ${CARGO_HOME}/config
48
49 # Local mirror vendored by bitbake
50 [source.bitbake]
51 directory = "${CARGO_VENDORING_DIRECTORY}"
52 EOF
53
54 if [ ${CARGO_DISABLE_BITBAKE_VENDORING} = "0" ]; then
55 cat <<- EOF >> ${CARGO_HOME}/config
56
57 [source.crates-io]
58 replace-with = "bitbake"
59 local-registry = "/nonexistant"
60 EOF
61 fi
62
63 cat <<- EOF >> ${CARGO_HOME}/config
64
65 [http]
66 # Multiplexing can't be enabled because http2 can't be enabled
67 # in curl-native without dependency loops
68 multiplexing = false
69
70 # Ignore the hard coded and incorrect path to certificates
71 cainfo = "${STAGING_ETCDIR_NATIVE}/ssl/certs/ca-certificates.crt"
72
73 EOF
74
75 cat <<- EOF >> ${CARGO_HOME}/config
76
77 # HOST_SYS
78 [target.${RUST_HOST_SYS}]
79 linker = "${CARGO_RUST_TARGET_CCLD}"
80 EOF
81
82 if [ "${RUST_HOST_SYS}" != "${RUST_BUILD_SYS}" ]; then
83 cat <<- EOF >> ${CARGO_HOME}/config
84
85 # BUILD_SYS
86 [target.${RUST_BUILD_SYS}]
87 linker = "${RUST_BUILD_CCLD}"
88 EOF
89 fi
90
91 if [ "${RUST_TARGET_SYS}" != "${RUST_BUILD_SYS}" -a "${RUST_TARGET_SYS}" != "${RUST_HOST_SYS}"]; then
92 cat <<- EOF >> ${CARGO_HOME}/config
93
94 # TARGET_SYS
95 [target.${RUST_TARGET_SYS}]
96 linker = "${RUST_TARGET_CCLD}"
97 EOF
98 fi
99
100 # Put build output in build directory preferred by bitbake instead of
101 # inside source directory unless they are the same
102 if [ "${B}" != "${S}" ]; then
103 cat <<- EOF >> ${CARGO_HOME}/config
104
105 [build]
106 # Use out of tree build destination to avoid poluting the source tree
107 target-dir = "${B}/target"
108 EOF
109 fi
110
111 cat <<- EOF >> ${CARGO_HOME}/config
112
113 [term]
114 progress.when = 'always'
115 progress.width = 80
116 EOF
117}
118
119oe_cargo_fix_env () {
120 export CC="${RUST_TARGET_CC}"
121 export CXX="${RUST_TARGET_CXX}"
122 export CFLAGS="${CFLAGS}"
123 export CXXFLAGS="${CXXFLAGS}"
124 export AR="${AR}"
125 export TARGET_CC="${RUST_TARGET_CC}"
126 export TARGET_CXX="${RUST_TARGET_CXX}"
127 export TARGET_CFLAGS="${CFLAGS}"
128 export TARGET_CXXFLAGS="${CXXFLAGS}"
129 export TARGET_AR="${AR}"
130 export HOST_CC="${RUST_BUILD_CC}"
131 export HOST_CXX="${RUST_BUILD_CXX}"
132 export HOST_CFLAGS="${BUILD_CFLAGS}"
133 export HOST_CXXFLAGS="${BUILD_CXXFLAGS}"
134 export HOST_AR="${BUILD_AR}"
135}
136
137EXTRA_OECARGO_PATHS ??= ""
138
139EXPORT_FUNCTIONS do_configure
diff --git a/meta/classes/cmake.bbclass b/meta/classes/cmake.bbclass
deleted file mode 100644
index 554b948c32..0000000000
--- a/meta/classes/cmake.bbclass
+++ /dev/null
@@ -1,223 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# Path to the CMake file to process.
8OECMAKE_SOURCEPATH ??= "${S}"
9
10DEPENDS:prepend = "cmake-native "
11B = "${WORKDIR}/build"
12
13# What CMake generator to use.
14# The supported options are "Unix Makefiles" or "Ninja".
15OECMAKE_GENERATOR ?= "Ninja"
16
17python() {
18 generator = d.getVar("OECMAKE_GENERATOR")
19 if "Unix Makefiles" in generator:
20 args = "-G '" + generator + "' -DCMAKE_MAKE_PROGRAM=" + d.getVar("MAKE")
21 d.setVar("OECMAKE_GENERATOR_ARGS", args)
22 d.setVarFlag("do_compile", "progress", "percent")
23 elif "Ninja" in generator:
24 args = "-G '" + generator + "' -DCMAKE_MAKE_PROGRAM=ninja"
25 d.appendVar("DEPENDS", " ninja-native")
26 d.setVar("OECMAKE_GENERATOR_ARGS", args)
27 d.setVarFlag("do_compile", "progress", r"outof:^\[(\d+)/(\d+)\]\s+")
28 else:
29 bb.fatal("Unknown CMake Generator %s" % generator)
30}
31OECMAKE_AR ?= "${AR}"
32
33# Compiler flags
34OECMAKE_C_FLAGS ?= "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS} ${CFLAGS}"
35OECMAKE_CXX_FLAGS ?= "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS} ${CXXFLAGS}"
36OECMAKE_C_FLAGS_RELEASE ?= "-DNDEBUG"
37OECMAKE_CXX_FLAGS_RELEASE ?= "-DNDEBUG"
38OECMAKE_C_LINK_FLAGS ?= "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS} ${CPPFLAGS} ${LDFLAGS}"
39OECMAKE_CXX_LINK_FLAGS ?= "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS} ${CXXFLAGS} ${LDFLAGS}"
40
41def oecmake_map_compiler(compiler, d):
42 args = d.getVar(compiler).split()
43 if args[0] == "ccache":
44 return args[1], args[0]
45 return args[0], ""
46
47# C/C++ Compiler (without cpu arch/tune arguments)
48OECMAKE_C_COMPILER ?= "${@oecmake_map_compiler('CC', d)[0]}"
49OECMAKE_C_COMPILER_LAUNCHER ?= "${@oecmake_map_compiler('CC', d)[1]}"
50OECMAKE_CXX_COMPILER ?= "${@oecmake_map_compiler('CXX', d)[0]}"
51OECMAKE_CXX_COMPILER_LAUNCHER ?= "${@oecmake_map_compiler('CXX', d)[1]}"
52
53# clear compiler vars for allarch to avoid sig hash difference
54OECMAKE_C_COMPILER_allarch = ""
55OECMAKE_C_COMPILER_LAUNCHER_allarch = ""
56OECMAKE_CXX_COMPILER_allarch = ""
57OECMAKE_CXX_COMPILER_LAUNCHER_allarch = ""
58
59OECMAKE_RPATH ?= ""
60OECMAKE_PERLNATIVE_DIR ??= ""
61OECMAKE_EXTRA_ROOT_PATH ?= ""
62
63OECMAKE_FIND_ROOT_PATH_MODE_PROGRAM = "ONLY"
64OECMAKE_FIND_ROOT_PATH_MODE_PROGRAM:class-native = "BOTH"
65
66EXTRA_OECMAKE:append = " ${PACKAGECONFIG_CONFARGS}"
67
68export CMAKE_BUILD_PARALLEL_LEVEL
69CMAKE_BUILD_PARALLEL_LEVEL:task-compile = "${@oe.utils.parallel_make(d, False)}"
70CMAKE_BUILD_PARALLEL_LEVEL:task-install = "${@oe.utils.parallel_make(d, True)}"
71
72OECMAKE_TARGET_COMPILE ?= "all"
73OECMAKE_TARGET_INSTALL ?= "install"
74
75def map_host_os_to_system_name(host_os):
76 if host_os.startswith('mingw'):
77 return 'Windows'
78 if host_os.startswith('linux'):
79 return 'Linux'
80 return host_os
81
82# CMake expects target architectures in the format of uname(2),
83# which do not always match TARGET_ARCH, so all the necessary
84# conversions should happen here.
85def map_host_arch_to_uname_arch(host_arch):
86 if host_arch == "powerpc":
87 return "ppc"
88 if host_arch == "powerpc64le":
89 return "ppc64le"
90 if host_arch == "powerpc64":
91 return "ppc64"
92 return host_arch
93
94cmake_do_generate_toolchain_file() {
95 if [ "${BUILD_SYS}" = "${HOST_SYS}" ]; then
96 cmake_crosscompiling="set( CMAKE_CROSSCOMPILING FALSE )"
97 fi
98 cat > ${WORKDIR}/toolchain.cmake <<EOF
99# CMake system name must be something like "Linux".
100# This is important for cross-compiling.
101$cmake_crosscompiling
102set( CMAKE_SYSTEM_NAME ${@map_host_os_to_system_name(d.getVar('HOST_OS'))} )
103set( CMAKE_SYSTEM_PROCESSOR ${@map_host_arch_to_uname_arch(d.getVar('HOST_ARCH'))} )
104set( CMAKE_C_COMPILER ${OECMAKE_C_COMPILER} )
105set( CMAKE_CXX_COMPILER ${OECMAKE_CXX_COMPILER} )
106set( CMAKE_C_COMPILER_LAUNCHER ${OECMAKE_C_COMPILER_LAUNCHER} )
107set( CMAKE_CXX_COMPILER_LAUNCHER ${OECMAKE_CXX_COMPILER_LAUNCHER} )
108set( CMAKE_ASM_COMPILER ${OECMAKE_C_COMPILER} )
109find_program( CMAKE_AR ${OECMAKE_AR} DOC "Archiver" REQUIRED )
110
111set( CMAKE_C_FLAGS "${OECMAKE_C_FLAGS}" CACHE STRING "CFLAGS" )
112set( CMAKE_CXX_FLAGS "${OECMAKE_CXX_FLAGS}" CACHE STRING "CXXFLAGS" )
113set( CMAKE_ASM_FLAGS "${OECMAKE_C_FLAGS}" CACHE STRING "ASM FLAGS" )
114set( CMAKE_C_FLAGS_RELEASE "${OECMAKE_C_FLAGS_RELEASE}" CACHE STRING "Additional CFLAGS for release" )
115set( CMAKE_CXX_FLAGS_RELEASE "${OECMAKE_CXX_FLAGS_RELEASE}" CACHE STRING "Additional CXXFLAGS for release" )
116set( CMAKE_ASM_FLAGS_RELEASE "${OECMAKE_C_FLAGS_RELEASE}" CACHE STRING "Additional ASM FLAGS for release" )
117set( CMAKE_C_LINK_FLAGS "${OECMAKE_C_LINK_FLAGS}" CACHE STRING "LDFLAGS" )
118set( CMAKE_CXX_LINK_FLAGS "${OECMAKE_CXX_LINK_FLAGS}" CACHE STRING "LDFLAGS" )
119
120# only search in the paths provided so cmake doesnt pick
121# up libraries and tools from the native build machine
122set( CMAKE_FIND_ROOT_PATH ${STAGING_DIR_HOST} ${STAGING_DIR_NATIVE} ${CROSS_DIR} ${OECMAKE_PERLNATIVE_DIR} ${OECMAKE_EXTRA_ROOT_PATH} ${EXTERNAL_TOOLCHAIN} ${HOSTTOOLS_DIR})
123set( CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY )
124set( CMAKE_FIND_ROOT_PATH_MODE_PROGRAM ${OECMAKE_FIND_ROOT_PATH_MODE_PROGRAM} )
125set( CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY )
126set( CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY )
127set( CMAKE_PROGRAM_PATH "/" )
128
129# Use qt.conf settings
130set( ENV{QT_CONF_PATH} ${WORKDIR}/qt.conf )
131
132# We need to set the rpath to the correct directory as cmake does not provide any
133# directory as rpath by default
134set( CMAKE_INSTALL_RPATH ${OECMAKE_RPATH} )
135
136# Use RPATHs relative to build directory for reproducibility
137set( CMAKE_BUILD_RPATH_USE_ORIGIN ON )
138
139# Use our cmake modules
140list(APPEND CMAKE_MODULE_PATH "${STAGING_DATADIR}/cmake/Modules/")
141
142# add for non /usr/lib libdir, e.g. /usr/lib64
143set( CMAKE_LIBRARY_PATH ${libdir} ${base_libdir})
144
145# add include dir to implicit includes in case it differs from /usr/include
146list(APPEND CMAKE_C_IMPLICIT_INCLUDE_DIRECTORIES ${includedir})
147list(APPEND CMAKE_CXX_IMPLICIT_INCLUDE_DIRECTORIES ${includedir})
148
149EOF
150}
151
152addtask generate_toolchain_file after do_patch before do_configure
153
154CONFIGURE_FILES = "CMakeLists.txt"
155
156do_configure[cleandirs] = "${@d.getVar('B') if d.getVar('S') != d.getVar('B') else ''}"
157
158cmake_do_configure() {
159 if [ "${OECMAKE_BUILDPATH}" ]; then
160 bbnote "cmake.bbclass no longer uses OECMAKE_BUILDPATH. The default behaviour is now out-of-tree builds with B=WORKDIR/build."
161 fi
162
163 if [ "${S}" = "${B}" ]; then
164 find ${B} -name CMakeFiles -or -name Makefile -or -name cmake_install.cmake -or -name CMakeCache.txt -delete
165 fi
166
167 # Just like autotools cmake can use a site file to cache result that need generated binaries to run
168 if [ -e ${WORKDIR}/site-file.cmake ] ; then
169 oecmake_sitefile="-C ${WORKDIR}/site-file.cmake"
170 else
171 oecmake_sitefile=
172 fi
173
174 cmake \
175 ${OECMAKE_GENERATOR_ARGS} \
176 $oecmake_sitefile \
177 ${OECMAKE_SOURCEPATH} \
178 -DCMAKE_INSTALL_PREFIX:PATH=${prefix} \
179 -DCMAKE_INSTALL_BINDIR:PATH=${@os.path.relpath(d.getVar('bindir'), d.getVar('prefix') + '/')} \
180 -DCMAKE_INSTALL_SBINDIR:PATH=${@os.path.relpath(d.getVar('sbindir'), d.getVar('prefix') + '/')} \
181 -DCMAKE_INSTALL_LIBEXECDIR:PATH=${@os.path.relpath(d.getVar('libexecdir'), d.getVar('prefix') + '/')} \
182 -DCMAKE_INSTALL_SYSCONFDIR:PATH=${sysconfdir} \
183 -DCMAKE_INSTALL_SHAREDSTATEDIR:PATH=${@os.path.relpath(d.getVar('sharedstatedir'), d. getVar('prefix') + '/')} \
184 -DCMAKE_INSTALL_LOCALSTATEDIR:PATH=${localstatedir} \
185 -DCMAKE_INSTALL_LIBDIR:PATH=${@os.path.relpath(d.getVar('libdir'), d.getVar('prefix') + '/')} \
186 -DCMAKE_INSTALL_INCLUDEDIR:PATH=${@os.path.relpath(d.getVar('includedir'), d.getVar('prefix') + '/')} \
187 -DCMAKE_INSTALL_DATAROOTDIR:PATH=${@os.path.relpath(d.getVar('datadir'), d.getVar('prefix') + '/')} \
188 -DPYTHON_EXECUTABLE:PATH=${PYTHON} \
189 -DPython_EXECUTABLE:PATH=${PYTHON} \
190 -DPython3_EXECUTABLE:PATH=${PYTHON} \
191 -DLIB_SUFFIX=${@d.getVar('baselib').replace('lib', '')} \
192 -DCMAKE_INSTALL_SO_NO_EXE=0 \
193 -DCMAKE_TOOLCHAIN_FILE=${WORKDIR}/toolchain.cmake \
194 -DCMAKE_NO_SYSTEM_FROM_IMPORTED=1 \
195 -DCMAKE_EXPORT_NO_PACKAGE_REGISTRY=ON \
196 -DFETCHCONTENT_FULLY_DISCONNECTED=ON \
197 ${EXTRA_OECMAKE} \
198 -Wno-dev
199}
200
201# To disable verbose cmake logs for a given recipe or globally config metadata e.g. local.conf
202# add following
203#
204# CMAKE_VERBOSE = ""
205#
206
207CMAKE_VERBOSE ??= "VERBOSE=1"
208
209# Then run do_compile again
210cmake_runcmake_build() {
211 bbnote ${DESTDIR:+DESTDIR=${DESTDIR} }${CMAKE_VERBOSE} cmake --build '${B}' "$@" -- ${EXTRA_OECMAKE_BUILD}
212 eval ${DESTDIR:+DESTDIR=${DESTDIR} }${CMAKE_VERBOSE} cmake --build '${B}' "$@" -- ${EXTRA_OECMAKE_BUILD}
213}
214
215cmake_do_compile() {
216 cmake_runcmake_build --target ${OECMAKE_TARGET_COMPILE}
217}
218
219cmake_do_install() {
220 DESTDIR='${D}' cmake_runcmake_build --target ${OECMAKE_TARGET_INSTALL}
221}
222
223EXPORT_FUNCTIONS do_configure do_compile do_install do_generate_toolchain_file
diff --git a/meta/classes/cml1.bbclass b/meta/classes/cml1.bbclass
deleted file mode 100644
index b79091383d..0000000000
--- a/meta/classes/cml1.bbclass
+++ /dev/null
@@ -1,107 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# returns all the elements from the src uri that are .cfg files
8def find_cfgs(d):
9 sources=src_patches(d, True)
10 sources_list=[]
11 for s in sources:
12 if s.endswith('.cfg'):
13 sources_list.append(s)
14
15 return sources_list
16
17cml1_do_configure() {
18 set -e
19 unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS
20 yes '' | oe_runmake oldconfig
21}
22
23EXPORT_FUNCTIONS do_configure
24addtask configure after do_unpack do_patch before do_compile
25
26inherit terminal
27
28OE_TERMINAL_EXPORTS += "HOST_EXTRACFLAGS HOSTLDFLAGS TERMINFO CROSS_CURSES_LIB CROSS_CURSES_INC"
29HOST_EXTRACFLAGS = "${BUILD_CFLAGS} ${BUILD_LDFLAGS}"
30HOSTLDFLAGS = "${BUILD_LDFLAGS}"
31CROSS_CURSES_LIB = "-lncurses -ltinfo"
32CROSS_CURSES_INC = '-DCURSES_LOC="<curses.h>"'
33TERMINFO = "${STAGING_DATADIR_NATIVE}/terminfo"
34
35KCONFIG_CONFIG_COMMAND ??= "menuconfig"
36KCONFIG_CONFIG_ROOTDIR ??= "${B}"
37python do_menuconfig() {
38 import shutil
39
40 config = os.path.join(d.getVar('KCONFIG_CONFIG_ROOTDIR'), ".config")
41 configorig = os.path.join(d.getVar('KCONFIG_CONFIG_ROOTDIR'), ".config.orig")
42
43 try:
44 mtime = os.path.getmtime(config)
45 shutil.copy(config, configorig)
46 except OSError:
47 mtime = 0
48
49 # setup native pkg-config variables (kconfig scripts call pkg-config directly, cannot generically be overriden to pkg-config-native)
50 d.setVar("PKG_CONFIG_DIR", "${STAGING_DIR_NATIVE}${libdir_native}/pkgconfig")
51 d.setVar("PKG_CONFIG_PATH", "${PKG_CONFIG_DIR}:${STAGING_DATADIR_NATIVE}/pkgconfig")
52 d.setVar("PKG_CONFIG_LIBDIR", "${PKG_CONFIG_DIR}")
53 d.setVarFlag("PKG_CONFIG_SYSROOT_DIR", "unexport", "1")
54 # ensure that environment variables are overwritten with this tasks 'd' values
55 d.appendVar("OE_TERMINAL_EXPORTS", " PKG_CONFIG_DIR PKG_CONFIG_PATH PKG_CONFIG_LIBDIR PKG_CONFIG_SYSROOT_DIR")
56
57 oe_terminal("sh -c \"make %s; if [ \\$? -ne 0 ]; then echo 'Command failed.'; printf 'Press any key to continue... '; read r; fi\"" % d.getVar('KCONFIG_CONFIG_COMMAND'),
58 d.getVar('PN') + ' Configuration', d)
59
60 # FIXME this check can be removed when the minimum bitbake version has been bumped
61 if hasattr(bb.build, 'write_taint'):
62 try:
63 newmtime = os.path.getmtime(config)
64 except OSError:
65 newmtime = 0
66
67 if newmtime > mtime:
68 bb.note("Configuration changed, recompile will be forced")
69 bb.build.write_taint('do_compile', d)
70}
71do_menuconfig[depends] += "ncurses-native:do_populate_sysroot"
72do_menuconfig[nostamp] = "1"
73do_menuconfig[dirs] = "${KCONFIG_CONFIG_ROOTDIR}"
74addtask menuconfig after do_configure
75
76python do_diffconfig() {
77 import shutil
78 import subprocess
79
80 workdir = d.getVar('WORKDIR')
81 fragment = workdir + '/fragment.cfg'
82 configorig = os.path.join(d.getVar('KCONFIG_CONFIG_ROOTDIR'), ".config.orig")
83 config = os.path.join(d.getVar('KCONFIG_CONFIG_ROOTDIR'), ".config")
84
85 try:
86 md5newconfig = bb.utils.md5_file(configorig)
87 md5config = bb.utils.md5_file(config)
88 isdiff = md5newconfig != md5config
89 except IOError as e:
90 bb.fatal("No config files found. Did you do menuconfig ?\n%s" % e)
91
92 if isdiff:
93 statement = 'diff --unchanged-line-format= --old-line-format= --new-line-format="%L" ' + configorig + ' ' + config + '>' + fragment
94 subprocess.call(statement, shell=True)
95 # No need to check the exit code as we know it's going to be
96 # non-zero, but that's what we expect.
97 shutil.copy(configorig, config)
98
99 bb.plain("Config fragment has been dumped into:\n %s" % fragment)
100 else:
101 if os.path.exists(fragment):
102 os.unlink(fragment)
103}
104
105do_diffconfig[nostamp] = "1"
106do_diffconfig[dirs] = "${KCONFIG_CONFIG_ROOTDIR}"
107addtask diffconfig
diff --git a/meta/classes/compress_doc.bbclass b/meta/classes/compress_doc.bbclass
deleted file mode 100644
index d603caf858..0000000000
--- a/meta/classes/compress_doc.bbclass
+++ /dev/null
@@ -1,269 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# Compress man pages in ${mandir} and info pages in ${infodir}
8#
9# 1. The doc will be compressed to gz format by default.
10#
11# 2. It will automatically correct the compressed doc which is not
12# in ${DOC_COMPRESS} but in ${DOC_COMPRESS_LIST} to the format
13# of ${DOC_COMPRESS} policy
14#
15# 3. It is easy to add a new type compression by editing
16# local.conf, such as:
17# DOC_COMPRESS_LIST:append = ' abc'
18# DOC_COMPRESS = 'abc'
19# DOC_COMPRESS_CMD[abc] = 'abc compress cmd ***'
20# DOC_DECOMPRESS_CMD[abc] = 'abc decompress cmd ***'
21
22# All supported compression policy
23DOC_COMPRESS_LIST ?= "gz xz bz2"
24
25# Compression policy, must be one of ${DOC_COMPRESS_LIST}
26DOC_COMPRESS ?= "gz"
27
28# Compression shell command
29DOC_COMPRESS_CMD[gz] ?= 'gzip -v -9 -n'
30DOC_COMPRESS_CMD[bz2] ?= "bzip2 -v -9"
31DOC_COMPRESS_CMD[xz] ?= "xz -v"
32
33# Decompression shell command
34DOC_DECOMPRESS_CMD[gz] ?= 'gunzip -v'
35DOC_DECOMPRESS_CMD[bz2] ?= "bunzip2 -v"
36DOC_DECOMPRESS_CMD[xz] ?= "unxz -v"
37
38PACKAGE_PREPROCESS_FUNCS += "package_do_compress_doc compress_doc_updatealternatives"
39python package_do_compress_doc() {
40 compress_mode = d.getVar('DOC_COMPRESS')
41 compress_list = (d.getVar('DOC_COMPRESS_LIST') or '').split()
42 if compress_mode not in compress_list:
43 bb.fatal('Compression policy %s not supported (not listed in %s)\n' % (compress_mode, compress_list))
44
45 dvar = d.getVar('PKGD')
46 compress_cmds = {}
47 decompress_cmds = {}
48 for mode in compress_list:
49 compress_cmds[mode] = d.getVarFlag('DOC_COMPRESS_CMD', mode)
50 decompress_cmds[mode] = d.getVarFlag('DOC_DECOMPRESS_CMD', mode)
51
52 mandir = os.path.abspath(dvar + os.sep + d.getVar("mandir"))
53 if os.path.exists(mandir):
54 # Decompress doc files which format is not compress_mode
55 decompress_doc(mandir, compress_mode, decompress_cmds)
56 compress_doc(mandir, compress_mode, compress_cmds)
57
58 infodir = os.path.abspath(dvar + os.sep + d.getVar("infodir"))
59 if os.path.exists(infodir):
60 # Decompress doc files which format is not compress_mode
61 decompress_doc(infodir, compress_mode, decompress_cmds)
62 compress_doc(infodir, compress_mode, compress_cmds)
63}
64
65def _get_compress_format(file, compress_format_list):
66 for compress_format in compress_format_list:
67 compress_suffix = '.' + compress_format
68 if file.endswith(compress_suffix):
69 return compress_format
70
71 return ''
72
73# Collect hardlinks to dict, each element in dict lists hardlinks
74# which points to the same doc file.
75# {hardlink10: [hardlink11, hardlink12],,,}
76# The hardlink10, hardlink11 and hardlink12 are the same file.
77def _collect_hardlink(hardlink_dict, file):
78 for hardlink in hardlink_dict:
79 # Add to the existed hardlink
80 if os.path.samefile(hardlink, file):
81 hardlink_dict[hardlink].append(file)
82 return hardlink_dict
83
84 hardlink_dict[file] = []
85 return hardlink_dict
86
87def _process_hardlink(hardlink_dict, compress_mode, shell_cmds, decompress=False):
88 import subprocess
89 for target in hardlink_dict:
90 if decompress:
91 compress_format = _get_compress_format(target, shell_cmds.keys())
92 cmd = "%s -f %s" % (shell_cmds[compress_format], target)
93 bb.note('decompress hardlink %s' % target)
94 else:
95 cmd = "%s -f %s" % (shell_cmds[compress_mode], target)
96 bb.note('compress hardlink %s' % target)
97 (retval, output) = subprocess.getstatusoutput(cmd)
98 if retval:
99 bb.warn("de/compress file failed %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else ""))
100 return
101
102 for hardlink_dup in hardlink_dict[target]:
103 if decompress:
104 # Remove compress suffix
105 compress_suffix = '.' + compress_format
106 new_hardlink = hardlink_dup[:-len(compress_suffix)]
107 new_target = target[:-len(compress_suffix)]
108 else:
109 # Append compress suffix
110 compress_suffix = '.' + compress_mode
111 new_hardlink = hardlink_dup + compress_suffix
112 new_target = target + compress_suffix
113
114 bb.note('hardlink %s-->%s' % (new_hardlink, new_target))
115 if not os.path.exists(new_hardlink):
116 os.link(new_target, new_hardlink)
117 if os.path.exists(hardlink_dup):
118 os.unlink(hardlink_dup)
119
120def _process_symlink(file, compress_format, decompress=False):
121 compress_suffix = '.' + compress_format
122 if decompress:
123 # Remove compress suffix
124 new_linkname = file[:-len(compress_suffix)]
125 new_source = os.readlink(file)[:-len(compress_suffix)]
126 else:
127 # Append compress suffix
128 new_linkname = file + compress_suffix
129 new_source = os.readlink(file) + compress_suffix
130
131 bb.note('symlink %s-->%s' % (new_linkname, new_source))
132 if not os.path.exists(new_linkname):
133 os.symlink(new_source, new_linkname)
134
135 os.unlink(file)
136
137def _is_info(file):
138 flags = '.info .info-'.split()
139 for flag in flags:
140 if flag in os.path.basename(file):
141 return True
142
143 return False
144
145def _is_man(file):
146 import re
147
148 # It refers MANSECT-var in man(1.6g)'s man.config
149 # ".1:.1p:.8:.2:.3:.3p:.4:.5:.6:.7:.9:.0p:.tcl:.n:.l:.p:.o"
150 # Not start with '.', and contain the above colon-seperate element
151 p = re.compile(r'[^\.]+\.([1-9lnop]|0p|tcl)')
152 if p.search(file):
153 return True
154
155 return False
156
157def _is_compress_doc(file, compress_format_list):
158 compress_format = _get_compress_format(file, compress_format_list)
159 compress_suffix = '.' + compress_format
160 if file.endswith(compress_suffix):
161 # Remove the compress suffix
162 uncompress_file = file[:-len(compress_suffix)]
163 if _is_info(uncompress_file) or _is_man(uncompress_file):
164 return True, compress_format
165
166 return False, ''
167
168def compress_doc(topdir, compress_mode, compress_cmds):
169 import subprocess
170 hardlink_dict = {}
171 for root, dirs, files in os.walk(topdir):
172 for f in files:
173 file = os.path.join(root, f)
174 if os.path.isdir(file):
175 continue
176
177 if _is_info(file) or _is_man(file):
178 # Symlink
179 if os.path.islink(file):
180 _process_symlink(file, compress_mode)
181 # Hardlink
182 elif os.lstat(file).st_nlink > 1:
183 _collect_hardlink(hardlink_dict, file)
184 # Normal file
185 elif os.path.isfile(file):
186 cmd = "%s %s" % (compress_cmds[compress_mode], file)
187 (retval, output) = subprocess.getstatusoutput(cmd)
188 if retval:
189 bb.warn("compress failed %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else ""))
190 continue
191 bb.note('compress file %s' % file)
192
193 _process_hardlink(hardlink_dict, compress_mode, compress_cmds)
194
195# Decompress doc files which format is not compress_mode
196def decompress_doc(topdir, compress_mode, decompress_cmds):
197 import subprocess
198 hardlink_dict = {}
199 decompress = True
200 for root, dirs, files in os.walk(topdir):
201 for f in files:
202 file = os.path.join(root, f)
203 if os.path.isdir(file):
204 continue
205
206 res, compress_format = _is_compress_doc(file, decompress_cmds.keys())
207 # Decompress files which format is not compress_mode
208 if res and compress_mode!=compress_format:
209 # Symlink
210 if os.path.islink(file):
211 _process_symlink(file, compress_format, decompress)
212 # Hardlink
213 elif os.lstat(file).st_nlink > 1:
214 _collect_hardlink(hardlink_dict, file)
215 # Normal file
216 elif os.path.isfile(file):
217 cmd = "%s %s" % (decompress_cmds[compress_format], file)
218 (retval, output) = subprocess.getstatusoutput(cmd)
219 if retval:
220 bb.warn("decompress failed %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else ""))
221 continue
222 bb.note('decompress file %s' % file)
223
224 _process_hardlink(hardlink_dict, compress_mode, decompress_cmds, decompress)
225
226python compress_doc_updatealternatives () {
227 if not bb.data.inherits_class('update-alternatives', d):
228 return
229
230 mandir = d.getVar("mandir")
231 infodir = d.getVar("infodir")
232 compress_mode = d.getVar('DOC_COMPRESS')
233 for pkg in (d.getVar('PACKAGES') or "").split():
234 old_names = (d.getVar('ALTERNATIVE:%s' % pkg) or "").split()
235 new_names = []
236 for old_name in old_names:
237 old_link = d.getVarFlag('ALTERNATIVE_LINK_NAME', old_name)
238 old_target = d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, old_name) or \
239 d.getVarFlag('ALTERNATIVE_TARGET', old_name) or \
240 d.getVar('ALTERNATIVE_TARGET_%s' % pkg) or \
241 d.getVar('ALTERNATIVE_TARGET') or \
242 old_link
243 # Sometimes old_target is specified as relative to the link name.
244 old_target = os.path.join(os.path.dirname(old_link), old_target)
245
246 # The updatealternatives used for compress doc
247 if mandir in old_target or infodir in old_target:
248 new_name = old_name + '.' + compress_mode
249 new_link = old_link + '.' + compress_mode
250 new_target = old_target + '.' + compress_mode
251 d.delVarFlag('ALTERNATIVE_LINK_NAME', old_name)
252 d.setVarFlag('ALTERNATIVE_LINK_NAME', new_name, new_link)
253 if d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, old_name):
254 d.delVarFlag('ALTERNATIVE_TARGET_%s' % pkg, old_name)
255 d.setVarFlag('ALTERNATIVE_TARGET_%s' % pkg, new_name, new_target)
256 elif d.getVarFlag('ALTERNATIVE_TARGET', old_name):
257 d.delVarFlag('ALTERNATIVE_TARGET', old_name)
258 d.setVarFlag('ALTERNATIVE_TARGET', new_name, new_target)
259 elif d.getVar('ALTERNATIVE_TARGET_%s' % pkg):
260 d.setVar('ALTERNATIVE_TARGET_%s' % pkg, new_target)
261 elif d.getVar('ALTERNATIVE_TARGET'):
262 d.setVar('ALTERNATIVE_TARGET', new_target)
263
264 new_names.append(new_name)
265
266 if new_names:
267 d.setVar('ALTERNATIVE:%s' % pkg, ' '.join(new_names))
268}
269
diff --git a/meta/classes/core-image.bbclass b/meta/classes/core-image.bbclass
deleted file mode 100644
index 7ef7d07390..0000000000
--- a/meta/classes/core-image.bbclass
+++ /dev/null
@@ -1,81 +0,0 @@
1# Common code for generating core reference images
2#
3# Copyright (C) 2007-2011 Linux Foundation
4#
5# SPDX-License-Identifier: MIT
6
7# IMAGE_FEATURES control content of the core reference images
8#
9# By default we install packagegroup-core-boot and packagegroup-base-extended packages;
10# this gives us working (console only) rootfs.
11#
12# Available IMAGE_FEATURES:
13#
14# - weston - Weston Wayland compositor
15# - x11 - X server
16# - x11-base - X server with minimal environment
17# - x11-sato - OpenedHand Sato environment
18# - tools-debug - debugging tools
19# - eclipse-debug - Eclipse remote debugging support
20# - tools-profile - profiling tools
21# - tools-testapps - tools usable to make some device tests
22# - tools-sdk - SDK (C/C++ compiler, autotools, etc.)
23# - nfs-server - NFS server
24# - nfs-client - NFS client
25# - ssh-server-dropbear - SSH server (dropbear)
26# - ssh-server-openssh - SSH server (openssh)
27# - hwcodecs - Install hardware acceleration codecs
28# - package-management - installs package management tools and preserves the package manager database
29# - debug-tweaks - makes an image suitable for development, e.g. allowing passwordless root logins
30# - empty-root-password
31# - allow-empty-password
32# - allow-root-login
33# - post-install-logging
34# - dev-pkgs - development packages (headers, etc.) for all installed packages in the rootfs
35# - dbg-pkgs - debug symbol packages for all installed packages in the rootfs
36# - lic-pkgs - license packages for all installed pacakges in the rootfs, requires
37# LICENSE_CREATE_PACKAGE="1" to be set when building packages too
38# - doc-pkgs - documentation packages for all installed packages in the rootfs
39# - bash-completion-pkgs - bash-completion packages for recipes using bash-completion bbclass
40# - ptest-pkgs - ptest packages for all ptest-enabled recipes
41# - read-only-rootfs - tweaks an image to support read-only rootfs
42# - stateless-rootfs - systemctl-native not run, image populated by systemd at runtime
43# - splash - bootup splash screen
44#
45FEATURE_PACKAGES_weston = "packagegroup-core-weston"
46FEATURE_PACKAGES_x11 = "packagegroup-core-x11"
47FEATURE_PACKAGES_x11-base = "packagegroup-core-x11-base"
48FEATURE_PACKAGES_x11-sato = "packagegroup-core-x11-sato"
49FEATURE_PACKAGES_tools-debug = "packagegroup-core-tools-debug"
50FEATURE_PACKAGES_eclipse-debug = "packagegroup-core-eclipse-debug"
51FEATURE_PACKAGES_tools-profile = "packagegroup-core-tools-profile"
52FEATURE_PACKAGES_tools-testapps = "packagegroup-core-tools-testapps"
53FEATURE_PACKAGES_tools-sdk = "packagegroup-core-sdk packagegroup-core-standalone-sdk-target"
54FEATURE_PACKAGES_nfs-server = "packagegroup-core-nfs-server"
55FEATURE_PACKAGES_nfs-client = "packagegroup-core-nfs-client"
56FEATURE_PACKAGES_ssh-server-dropbear = "packagegroup-core-ssh-dropbear"
57FEATURE_PACKAGES_ssh-server-openssh = "packagegroup-core-ssh-openssh"
58FEATURE_PACKAGES_hwcodecs = "${MACHINE_HWCODECS}"
59
60
61# IMAGE_FEATURES_REPLACES_foo = 'bar1 bar2'
62# Including image feature foo would replace the image features bar1 and bar2
63IMAGE_FEATURES_REPLACES_ssh-server-openssh = "ssh-server-dropbear"
64
65# IMAGE_FEATURES_CONFLICTS_foo = 'bar1 bar2'
66# An error exception would be raised if both image features foo and bar1(or bar2) are included
67
68MACHINE_HWCODECS ??= ""
69
70CORE_IMAGE_BASE_INSTALL = '\
71 packagegroup-core-boot \
72 packagegroup-base-extended \
73 \
74 ${CORE_IMAGE_EXTRA_INSTALL} \
75 '
76
77CORE_IMAGE_EXTRA_INSTALL ?= ""
78
79IMAGE_INSTALL ?= "${CORE_IMAGE_BASE_INSTALL}"
80
81inherit image
diff --git a/meta/classes/cpan-base.bbclass b/meta/classes/cpan-base.bbclass
deleted file mode 100644
index 1db0a4ded6..0000000000
--- a/meta/classes/cpan-base.bbclass
+++ /dev/null
@@ -1,33 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7#
8# cpan-base providers various perl related information needed for building
9# cpan modules
10#
11FILES:${PN} += "${libdir}/perl5 ${datadir}/perl5"
12
13DEPENDS += "${@["perl", "perl-native"][(bb.data.inherits_class('native', d))]}"
14RDEPENDS:${PN} += "${@["perl", ""][(bb.data.inherits_class('native', d))]}"
15
16inherit perl-version
17
18def is_target(d):
19 if not bb.data.inherits_class('native', d):
20 return "yes"
21 return "no"
22
23PERLLIBDIRS = "${libdir}/perl5"
24PERLLIBDIRS:class-native = "${libdir}/perl5"
25
26def cpan_upstream_check_pattern(d):
27 for x in (d.getVar('SRC_URI') or '').split(' '):
28 if x.startswith("https://cpan.metacpan.org"):
29 _pattern = x.split('/')[-1].replace(d.getVar('PV'), r'(?P<pver>\d+.\d+)')
30 return _pattern
31 return ''
32
33UPSTREAM_CHECK_REGEX ?= "${@cpan_upstream_check_pattern(d)}"
diff --git a/meta/classes/cpan.bbclass b/meta/classes/cpan.bbclass
deleted file mode 100644
index bb76a5b326..0000000000
--- a/meta/classes/cpan.bbclass
+++ /dev/null
@@ -1,71 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7#
8# This is for perl modules that use the old Makefile.PL build system
9#
10inherit cpan-base perlnative
11
12EXTRA_CPANFLAGS ?= ""
13EXTRA_PERLFLAGS ?= ""
14
15# Env var which tells perl if it should use host (no) or target (yes) settings
16export PERLCONFIGTARGET = "${@is_target(d)}"
17
18# Env var which tells perl where the perl include files are
19export PERL_INC = "${STAGING_LIBDIR}${PERL_OWN_DIR}/perl5/${@get_perl_version(d)}/${@get_perl_arch(d)}/CORE"
20export PERL_LIB = "${STAGING_LIBDIR}${PERL_OWN_DIR}/perl5/${@get_perl_version(d)}"
21export PERL_ARCHLIB = "${STAGING_LIBDIR}${PERL_OWN_DIR}/perl5/${@get_perl_version(d)}/${@get_perl_arch(d)}"
22export PERLHOSTLIB = "${STAGING_LIBDIR_NATIVE}/perl5/${@get_perl_version(d)}/"
23export PERLHOSTARCHLIB = "${STAGING_LIBDIR_NATIVE}/perl5/${@get_perl_version(d)}/${@get_perl_hostarch(d)}/"
24
25cpan_do_configure () {
26 yes '' | perl ${EXTRA_PERLFLAGS} Makefile.PL INSTALLDIRS=vendor NO_PERLLOCAL=1 NO_PACKLIST=1 PERL=$(which perl) ${EXTRA_CPANFLAGS}
27
28 # Makefile.PLs can exit with success without generating a
29 # Makefile, e.g. in cases of missing configure time
30 # dependencies. This is considered a best practice by
31 # cpantesters.org. See:
32 # * http://wiki.cpantesters.org/wiki/CPANAuthorNotes
33 # * http://www.nntp.perl.org/group/perl.qa/2008/08/msg11236.html
34 [ -e Makefile ] || bbfatal "No Makefile was generated by Makefile.PL"
35
36 if [ "${BUILD_SYS}" != "${HOST_SYS}" ]; then
37 . ${STAGING_LIBDIR}${PERL_OWN_DIR}/perl5/config.sh
38 # Use find since there can be a Makefile generated for each Makefile.PL
39 for f in `find -name Makefile.PL`; do
40 f2=`echo $f | sed -e 's/.PL//'`
41 test -f $f2 || continue
42 sed -i -e "s:\(PERL_ARCHLIB = \).*:\1${PERL_ARCHLIB}:" \
43 -e 's/perl.real/perl/' \
44 -e "s|^\(CCFLAGS =.*\)|\1 ${CFLAGS}|" \
45 $f2
46 done
47 fi
48}
49
50do_configure:append:class-target() {
51 find . -name Makefile | xargs sed -E -i \
52 -e 's:LD_RUN_PATH ?= ?"?[^"]*"?::g'
53}
54
55do_configure:append:class-nativesdk() {
56 find . -name Makefile | xargs sed -E -i \
57 -e 's:LD_RUN_PATH ?= ?"?[^"]*"?::g'
58}
59
60cpan_do_compile () {
61 oe_runmake PASTHRU_INC="${CFLAGS}" LD="${CCLD}"
62}
63
64cpan_do_install () {
65 oe_runmake DESTDIR="${D}" install_vendor
66 for PERLSCRIPT in `grep -rIEl '#! *${bindir}/perl-native.*/perl' ${D}`; do
67 sed -i -e 's|${bindir}/perl-native.*/perl|/usr/bin/env nativeperl|' $PERLSCRIPT
68 done
69}
70
71EXPORT_FUNCTIONS do_configure do_compile do_install
diff --git a/meta/classes/cpan_build.bbclass b/meta/classes/cpan_build.bbclass
deleted file mode 100644
index 026859b6c7..0000000000
--- a/meta/classes/cpan_build.bbclass
+++ /dev/null
@@ -1,47 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7#
8# This is for perl modules that use the new Build.PL build system
9#
10inherit cpan-base perlnative
11
12EXTRA_CPAN_BUILD_FLAGS ?= ""
13
14# Env var which tells perl if it should use host (no) or target (yes) settings
15export PERLCONFIGTARGET = "${@is_target(d)}"
16export PERL_ARCHLIB = "${STAGING_LIBDIR}${PERL_OWN_DIR}/perl5/${@get_perl_version(d)}/${@get_perl_arch(d)}"
17export PERLHOSTLIB = "${STAGING_LIBDIR_NATIVE}/perl5/${@get_perl_version(d)}/"
18export PERLHOSTARCHLIB = "${STAGING_LIBDIR_NATIVE}/perl5/${@get_perl_version(d)}/${@get_perl_hostarch(d)}/"
19export LD = "${CCLD}"
20
21cpan_build_do_configure () {
22 if [ "${@is_target(d)}" = "yes" ]; then
23 # build for target
24 . ${STAGING_LIBDIR}/perl5/config.sh
25 fi
26
27 perl Build.PL --installdirs vendor --destdir ${D} \
28 ${EXTRA_CPAN_BUILD_FLAGS}
29
30 # Build.PLs can exit with success without generating a
31 # Build, e.g. in cases of missing configure time
32 # dependencies. This is considered a best practice by
33 # cpantesters.org. See:
34 # * http://wiki.cpantesters.org/wiki/CPANAuthorNotes
35 # * http://www.nntp.perl.org/group/perl.qa/2008/08/msg11236.html
36 [ -e Build ] || bbfatal "No Build was generated by Build.PL"
37}
38
39cpan_build_do_compile () {
40 perl Build --perl "${bindir}/perl" verbose=1
41}
42
43cpan_build_do_install () {
44 perl Build install --destdir ${D}
45}
46
47EXPORT_FUNCTIONS do_configure do_compile do_install
diff --git a/meta/classes/cross-canadian.bbclass b/meta/classes/cross-canadian.bbclass
deleted file mode 100644
index 1670217d69..0000000000
--- a/meta/classes/cross-canadian.bbclass
+++ /dev/null
@@ -1,200 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6# NOTE - When using this class the user is responsible for ensuring that
7# TRANSLATED_TARGET_ARCH is added into PN. This ensures that if the TARGET_ARCH
8# is changed, another nativesdk xxx-canadian-cross can be installed
9#
10
11
12# SDK packages are built either explicitly by the user,
13# or indirectly via dependency. No need to be in 'world'.
14EXCLUDE_FROM_WORLD = "1"
15NATIVESDKLIBC ?= "libc-glibc"
16LIBCOVERRIDE = ":${NATIVESDKLIBC}"
17CLASSOVERRIDE = "class-cross-canadian"
18STAGING_BINDIR_TOOLCHAIN = "${STAGING_DIR_NATIVE}${bindir_native}/${SDK_ARCH}${SDK_VENDOR}-${SDK_OS}:${STAGING_DIR_NATIVE}${bindir_native}/${TARGET_ARCH}${TARGET_VENDOR}-${TARGET_OS}"
19
20#
21# Update BASE_PACKAGE_ARCH and PACKAGE_ARCHS
22#
23PACKAGE_ARCH = "${SDK_ARCH}-${SDKPKGSUFFIX}"
24BASECANADIANEXTRAOS ?= "linux-musl"
25CANADIANEXTRAOS = "${BASECANADIANEXTRAOS}"
26CANADIANEXTRAVENDOR = ""
27MODIFYTOS ??= "1"
28python () {
29 archs = d.getVar('PACKAGE_ARCHS').split()
30 sdkarchs = []
31 for arch in archs:
32 sdkarchs.append(arch + '-${SDKPKGSUFFIX}')
33 d.setVar('PACKAGE_ARCHS', " ".join(sdkarchs))
34
35 # Allow the following code segment to be disabled, e.g. meta-environment
36 if d.getVar("MODIFYTOS") != "1":
37 return
38
39 if d.getVar("TCLIBC") in [ 'baremetal', 'newlib' ]:
40 return
41
42 tos = d.getVar("TARGET_OS")
43 tos_known = ["mingw32"]
44 extralibcs = [""]
45 if "musl" in d.getVar("BASECANADIANEXTRAOS"):
46 extralibcs.append("musl")
47 if "android" in tos:
48 extralibcs.append("android")
49 for variant in ["", "spe", "x32", "eabi", "n32", "_ilp32"]:
50 for libc in extralibcs:
51 entry = "linux"
52 if variant and libc:
53 entry = entry + "-" + libc + variant
54 elif variant:
55 entry = entry + "-gnu" + variant
56 elif libc:
57 entry = entry + "-" + libc
58 tos_known.append(entry)
59 if tos not in tos_known:
60 bb.fatal("Building cross-candian for an unknown TARGET_SYS (%s), please update cross-canadian.bbclass" % d.getVar("TARGET_SYS"))
61
62 for n in ["PROVIDES", "DEPENDS"]:
63 d.setVar(n, d.getVar(n))
64 d.setVar("STAGING_BINDIR_TOOLCHAIN", d.getVar("STAGING_BINDIR_TOOLCHAIN"))
65 for prefix in ["AR", "AS", "DLLTOOL", "CC", "CXX", "GCC", "LD", "LIPO", "NM", "OBJDUMP", "RANLIB", "STRIP", "WINDRES"]:
66 n = prefix + "_FOR_TARGET"
67 d.setVar(n, d.getVar(n))
68 # This is a bit ugly. We need to zero LIBC/ABI extension which will change TARGET_OS
69 # however we need the old value in some variables. We expand those here first.
70 tarch = d.getVar("TARGET_ARCH")
71 if tarch == "x86_64":
72 d.setVar("LIBCEXTENSION", "")
73 d.setVar("ABIEXTENSION", "")
74 d.appendVar("CANADIANEXTRAOS", " linux-gnux32")
75 for extraos in d.getVar("BASECANADIANEXTRAOS").split():
76 d.appendVar("CANADIANEXTRAOS", " " + extraos + "x32")
77 elif tarch == "powerpc":
78 # PowerPC can build "linux" and "linux-gnuspe"
79 d.setVar("LIBCEXTENSION", "")
80 d.setVar("ABIEXTENSION", "")
81 d.appendVar("CANADIANEXTRAOS", " linux-gnuspe")
82 for extraos in d.getVar("BASECANADIANEXTRAOS").split():
83 d.appendVar("CANADIANEXTRAOS", " " + extraos + "spe")
84 elif tarch == "mips64":
85 d.appendVar("CANADIANEXTRAOS", " linux-gnun32")
86 for extraos in d.getVar("BASECANADIANEXTRAOS").split():
87 d.appendVar("CANADIANEXTRAOS", " " + extraos + "n32")
88 if tarch == "arm" or tarch == "armeb":
89 d.appendVar("CANADIANEXTRAOS", " linux-gnueabi linux-musleabi")
90 d.setVar("TARGET_OS", "linux-gnueabi")
91 else:
92 d.setVar("TARGET_OS", "linux")
93
94 # Also need to handle multilib target vendors
95 vendors = d.getVar("CANADIANEXTRAVENDOR")
96 if not vendors:
97 vendors = all_multilib_tune_values(d, 'TARGET_VENDOR')
98 origvendor = d.getVar("TARGET_VENDOR_MULTILIB_ORIGINAL")
99 if origvendor:
100 d.setVar("TARGET_VENDOR", origvendor)
101 if origvendor not in vendors.split():
102 vendors = origvendor + " " + vendors
103 d.setVar("CANADIANEXTRAVENDOR", vendors)
104}
105MULTIMACH_TARGET_SYS = "${PACKAGE_ARCH}${HOST_VENDOR}-${HOST_OS}"
106
107INHIBIT_DEFAULT_DEPS = "1"
108
109STAGING_DIR_HOST = "${RECIPE_SYSROOT}"
110
111TOOLCHAIN_OPTIONS = " --sysroot=${RECIPE_SYSROOT}"
112
113PATH:append = ":${TMPDIR}/sysroots/${HOST_ARCH}/${bindir_cross}"
114PKGHIST_DIR = "${TMPDIR}/pkghistory/${HOST_ARCH}-${SDKPKGSUFFIX}${HOST_VENDOR}-${HOST_OS}/"
115
116HOST_ARCH = "${SDK_ARCH}"
117HOST_VENDOR = "${SDK_VENDOR}"
118HOST_OS = "${SDK_OS}"
119HOST_PREFIX = "${SDK_PREFIX}"
120HOST_CC_ARCH = "${SDK_CC_ARCH}"
121HOST_LD_ARCH = "${SDK_LD_ARCH}"
122HOST_AS_ARCH = "${SDK_AS_ARCH}"
123
124#assign DPKG_ARCH
125DPKG_ARCH = "${@debian_arch_map(d.getVar('SDK_ARCH'), '')}"
126
127CPPFLAGS = "${BUILDSDK_CPPFLAGS}"
128CFLAGS = "${BUILDSDK_CFLAGS}"
129CXXFLAGS = "${BUILDSDK_CFLAGS}"
130LDFLAGS = "${BUILDSDK_LDFLAGS} \
131 -Wl,-rpath-link,${STAGING_LIBDIR}/.. \
132 -Wl,-rpath,${libdir}/.. "
133
134#
135# We need chrpath >= 0.14 to ensure we can deal with 32 and 64 bit
136# binaries
137#
138DEPENDS:append = " chrpath-replacement-native"
139EXTRANATIVEPATH += "chrpath-native"
140
141# Path mangling needed by the cross packaging
142# Note that we use := here to ensure that libdir and includedir are
143# target paths.
144target_base_prefix := "${base_prefix}"
145target_prefix := "${prefix}"
146target_exec_prefix := "${exec_prefix}"
147target_base_libdir = "${target_base_prefix}/${baselib}"
148target_libdir = "${target_exec_prefix}/${baselib}"
149target_includedir := "${includedir}"
150
151# Change to place files in SDKPATH
152base_prefix = "${SDKPATHNATIVE}"
153prefix = "${SDKPATHNATIVE}${prefix_nativesdk}"
154exec_prefix = "${SDKPATHNATIVE}${prefix_nativesdk}"
155bindir = "${exec_prefix}/bin/${TARGET_ARCH}${TARGET_VENDOR}-${TARGET_OS}"
156sbindir = "${bindir}"
157base_bindir = "${bindir}"
158base_sbindir = "${bindir}"
159libdir = "${exec_prefix}/lib/${TARGET_ARCH}${TARGET_VENDOR}-${TARGET_OS}"
160libexecdir = "${exec_prefix}/libexec/${TARGET_ARCH}${TARGET_VENDOR}-${TARGET_OS}"
161
162FILES:${PN} = "${prefix}"
163
164export PKG_CONFIG_DIR = "${STAGING_DIR_HOST}${exec_prefix}/lib/pkgconfig"
165export PKG_CONFIG_SYSROOT_DIR = "${STAGING_DIR_HOST}"
166
167do_populate_sysroot[stamp-extra-info] = ""
168do_packagedata[stamp-extra-info] = ""
169
170USE_NLS = "${SDKUSE_NLS}"
171
172# We have to us TARGET_ARCH but we care about the absolute value
173# and not any particular tune that is enabled.
174TARGET_ARCH[vardepsexclude] = "TUNE_ARCH"
175
176PKGDATA_DIR = "${PKGDATA_DIR_SDK}"
177# If MLPREFIX is set by multilib code, shlibs
178# points to the wrong place so force it
179SHLIBSDIRS = "${PKGDATA_DIR}/nativesdk-shlibs2"
180SHLIBSWORKDIR = "${PKGDATA_DIR}/nativesdk-shlibs2"
181
182cross_canadian_bindirlinks () {
183 for i in linux ${CANADIANEXTRAOS}
184 do
185 for v in ${CANADIANEXTRAVENDOR}
186 do
187 d=${D}${bindir}/../${TARGET_ARCH}$v-$i
188 if [ -d $d ];
189 then
190 continue
191 fi
192 install -d $d
193 for j in `ls ${D}${bindir}`
194 do
195 p=${TARGET_ARCH}$v-$i-`echo $j | sed -e s,${TARGET_PREFIX},,`
196 ln -s ../${TARGET_SYS}/$j $d/$p
197 done
198 done
199 done
200}
diff --git a/meta/classes/cross.bbclass b/meta/classes/cross.bbclass
deleted file mode 100644
index 93de9a5274..0000000000
--- a/meta/classes/cross.bbclass
+++ /dev/null
@@ -1,103 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7inherit relocatable
8
9# Cross packages are built indirectly via dependency,
10# no need for them to be a direct target of 'world'
11EXCLUDE_FROM_WORLD = "1"
12
13CLASSOVERRIDE = "class-cross"
14PACKAGES = ""
15PACKAGES_DYNAMIC = ""
16PACKAGES_DYNAMIC:class-native = ""
17
18HOST_ARCH = "${BUILD_ARCH}"
19HOST_VENDOR = "${BUILD_VENDOR}"
20HOST_OS = "${BUILD_OS}"
21HOST_PREFIX = "${BUILD_PREFIX}"
22HOST_CC_ARCH = "${BUILD_CC_ARCH}"
23HOST_LD_ARCH = "${BUILD_LD_ARCH}"
24HOST_AS_ARCH = "${BUILD_AS_ARCH}"
25
26# No strip sysroot when DEBUG_BUILD is enabled
27INHIBIT_SYSROOT_STRIP ?= "${@oe.utils.vartrue('DEBUG_BUILD', '1', '', d)}"
28
29export lt_cv_sys_lib_dlsearch_path_spec = "${libdir} ${base_libdir} /lib /lib64 /usr/lib /usr/lib64"
30
31STAGING_DIR_HOST = "${RECIPE_SYSROOT_NATIVE}"
32
33PACKAGE_ARCH = "${BUILD_ARCH}"
34
35MULTIMACH_TARGET_SYS = "${BUILD_ARCH}${BUILD_VENDOR}-${BUILD_OS}"
36
37export PKG_CONFIG_DIR = "${exec_prefix}/lib/pkgconfig"
38export PKG_CONFIG_SYSROOT_DIR = ""
39
40TARGET_CPPFLAGS = ""
41TARGET_CFLAGS = ""
42TARGET_CXXFLAGS = ""
43TARGET_LDFLAGS = ""
44
45CPPFLAGS = "${BUILD_CPPFLAGS}"
46CFLAGS = "${BUILD_CFLAGS}"
47CXXFLAGS = "${BUILD_CFLAGS}"
48LDFLAGS = "${BUILD_LDFLAGS}"
49
50TOOLCHAIN_OPTIONS = ""
51
52# This class encodes staging paths into its scripts data so can only be
53# reused if we manipulate the paths.
54SSTATE_SCAN_CMD ?= "${SSTATE_SCAN_CMD_NATIVE}"
55
56# Path mangling needed by the cross packaging
57# Note that we use := here to ensure that libdir and includedir are
58# target paths.
59target_base_prefix := "${root_prefix}"
60target_prefix := "${prefix}"
61target_exec_prefix := "${exec_prefix}"
62target_base_libdir = "${target_base_prefix}/${baselib}"
63target_libdir = "${target_exec_prefix}/${baselib}"
64target_includedir := "${includedir}"
65
66# Overrides for paths
67CROSS_TARGET_SYS_DIR = "${TARGET_SYS}"
68prefix = "${STAGING_DIR_NATIVE}${prefix_native}"
69base_prefix = "${STAGING_DIR_NATIVE}"
70exec_prefix = "${STAGING_DIR_NATIVE}${prefix_native}"
71bindir = "${exec_prefix}/bin/${CROSS_TARGET_SYS_DIR}"
72sbindir = "${bindir}"
73base_bindir = "${bindir}"
74base_sbindir = "${bindir}"
75libdir = "${exec_prefix}/lib/${CROSS_TARGET_SYS_DIR}"
76libexecdir = "${exec_prefix}/libexec/${CROSS_TARGET_SYS_DIR}"
77
78do_populate_sysroot[sstate-inputdirs] = "${SYSROOT_DESTDIR}/${STAGING_DIR_NATIVE}/"
79do_packagedata[stamp-extra-info] = ""
80
81USE_NLS = "no"
82
83export CC = "${BUILD_CC}"
84export CXX = "${BUILD_CXX}"
85export FC = "${BUILD_FC}"
86export CPP = "${BUILD_CPP}"
87export LD = "${BUILD_LD}"
88export CCLD = "${BUILD_CCLD}"
89export AR = "${BUILD_AR}"
90export AS = "${BUILD_AS}"
91export RANLIB = "${BUILD_RANLIB}"
92export STRIP = "${BUILD_STRIP}"
93export NM = "${BUILD_NM}"
94
95inherit nopackages
96
97python do_addto_recipe_sysroot () {
98 bb.build.exec_func("extend_recipe_sysroot", d)
99}
100addtask addto_recipe_sysroot after do_populate_sysroot
101do_addto_recipe_sysroot[deptask] = "do_populate_sysroot"
102
103PATH:prepend = "${COREBASE}/scripts/cross-intercept:"
diff --git a/meta/classes/crosssdk.bbclass b/meta/classes/crosssdk.bbclass
deleted file mode 100644
index 824b1bcff4..0000000000
--- a/meta/classes/crosssdk.bbclass
+++ /dev/null
@@ -1,57 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7inherit cross
8
9CLASSOVERRIDE = "class-crosssdk"
10NATIVESDKLIBC ?= "libc-glibc"
11LIBCOVERRIDE = ":${NATIVESDKLIBC}"
12MACHINEOVERRIDES = ""
13PACKAGE_ARCH = "${SDK_ARCH}"
14
15python () {
16 # set TUNE_PKGARCH to SDK_ARCH
17 d.setVar('TUNE_PKGARCH', d.getVar('SDK_ARCH'))
18 # Set features here to prevent appends and distro features backfill
19 # from modifying nativesdk distro features
20 features = set(d.getVar("DISTRO_FEATURES_NATIVESDK").split())
21 filtered = set(bb.utils.filter("DISTRO_FEATURES", d.getVar("DISTRO_FEATURES_FILTER_NATIVESDK"), d).split())
22 d.setVar("DISTRO_FEATURES", " ".join(sorted(features | filtered)))
23}
24
25STAGING_BINDIR_TOOLCHAIN = "${STAGING_DIR_NATIVE}${bindir_native}/${TARGET_ARCH}${TARGET_VENDOR}-${TARGET_OS}"
26
27# This class encodes staging paths into its scripts data so can only be
28# reused if we manipulate the paths.
29SSTATE_SCAN_CMD ?= "${SSTATE_SCAN_CMD_NATIVE}"
30
31TARGET_ARCH = "${SDK_ARCH}"
32TARGET_VENDOR = "${SDK_VENDOR}"
33TARGET_OS = "${SDK_OS}"
34TARGET_PREFIX = "${SDK_PREFIX}"
35TARGET_CC_ARCH = "${SDK_CC_ARCH}"
36TARGET_LD_ARCH = "${SDK_LD_ARCH}"
37TARGET_AS_ARCH = "${SDK_AS_ARCH}"
38TARGET_CPPFLAGS = ""
39TARGET_CFLAGS = ""
40TARGET_CXXFLAGS = ""
41TARGET_LDFLAGS = ""
42TARGET_FPU = ""
43
44
45target_libdir = "${SDKPATHNATIVE}${libdir_nativesdk}"
46target_includedir = "${SDKPATHNATIVE}${includedir_nativesdk}"
47target_base_libdir = "${SDKPATHNATIVE}${base_libdir_nativesdk}"
48target_prefix = "${SDKPATHNATIVE}${prefix_nativesdk}"
49target_exec_prefix = "${SDKPATHNATIVE}${prefix_nativesdk}"
50baselib = "lib"
51
52do_packagedata[stamp-extra-info] = ""
53
54# Need to force this to ensure consitency across architectures
55EXTRA_OECONF_GCC_FLOAT = ""
56
57USE_NLS = "no"
diff --git a/meta/classes/debian.bbclass b/meta/classes/debian.bbclass
deleted file mode 100644
index 7135d74837..0000000000
--- a/meta/classes/debian.bbclass
+++ /dev/null
@@ -1,156 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# Debian package renaming only occurs when a package is built
8# We therefore have to make sure we build all runtime packages
9# before building the current package to make the packages runtime
10# depends are correct
11#
12# Custom library package names can be defined setting
13# DEBIANNAME: + pkgname to the desired name.
14#
15# Better expressed as ensure all RDEPENDS package before we package
16# This means we can't have circular RDEPENDS/RRECOMMENDS
17
18AUTO_LIBNAME_PKGS = "${PACKAGES}"
19
20inherit package
21
22DEBIANRDEP = "do_packagedata"
23do_package_write_ipk[deptask] = "${DEBIANRDEP}"
24do_package_write_deb[deptask] = "${DEBIANRDEP}"
25do_package_write_tar[deptask] = "${DEBIANRDEP}"
26do_package_write_rpm[deptask] = "${DEBIANRDEP}"
27do_package_write_ipk[rdeptask] = "${DEBIANRDEP}"
28do_package_write_deb[rdeptask] = "${DEBIANRDEP}"
29do_package_write_tar[rdeptask] = "${DEBIANRDEP}"
30do_package_write_rpm[rdeptask] = "${DEBIANRDEP}"
31
32python () {
33 if not d.getVar("PACKAGES"):
34 d.setVar("DEBIANRDEP", "")
35}
36
37python debian_package_name_hook () {
38 import glob, copy, stat, errno, re, pathlib, subprocess
39
40 pkgdest = d.getVar("PKGDEST")
41 packages = d.getVar('PACKAGES')
42 so_re = re.compile(r"lib.*\.so")
43
44 def socrunch(s):
45 s = s.lower().replace('_', '-')
46 m = re.match(r"^(.*)(.)\.so\.(.*)$", s)
47 if m is None:
48 return None
49 if m.group(2) in '0123456789':
50 bin = '%s%s-%s' % (m.group(1), m.group(2), m.group(3))
51 else:
52 bin = m.group(1) + m.group(2) + m.group(3)
53 dev = m.group(1) + m.group(2)
54 return (bin, dev)
55
56 def isexec(path):
57 try:
58 s = os.stat(path)
59 except (os.error, AttributeError):
60 return 0
61 return (s[stat.ST_MODE] & stat.S_IEXEC)
62
63 def add_rprovides(pkg, d):
64 newpkg = d.getVar('PKG:' + pkg)
65 if newpkg and newpkg != pkg:
66 provs = (d.getVar('RPROVIDES:' + pkg) or "").split()
67 if pkg not in provs:
68 d.appendVar('RPROVIDES:' + pkg, " " + pkg + " (=" + d.getVar("PKGV") + ")")
69
70 def auto_libname(packages, orig_pkg):
71 p = lambda var: pathlib.PurePath(d.getVar(var))
72 libdirs = (p("base_libdir"), p("libdir"))
73 bindirs = (p("base_bindir"), p("base_sbindir"), p("bindir"), p("sbindir"))
74
75 sonames = []
76 has_bins = 0
77 has_libs = 0
78 for f in pkgfiles[orig_pkg]:
79 # This is .../packages-split/orig_pkg/
80 pkgpath = pathlib.PurePath(pkgdest, orig_pkg)
81 # Strip pkgpath off the full path to a file in the package, re-root
82 # so it is absolute, and then get the parent directory of the file.
83 path = pathlib.PurePath("/") / (pathlib.PurePath(f).relative_to(pkgpath).parent)
84 if path in bindirs:
85 has_bins = 1
86 if path in libdirs:
87 has_libs = 1
88 if so_re.match(os.path.basename(f)):
89 try:
90 cmd = [d.expand("${TARGET_PREFIX}objdump"), "-p", f]
91 output = subprocess.check_output(cmd).decode("utf-8")
92 for m in re.finditer(r"\s+SONAME\s+([^\s]+)", output):
93 if m.group(1) not in sonames:
94 sonames.append(m.group(1))
95 except subprocess.CalledProcessError:
96 pass
97 bb.debug(1, 'LIBNAMES: pkg %s libs %d bins %d sonames %s' % (orig_pkg, has_libs, has_bins, sonames))
98 soname = None
99 if len(sonames) == 1:
100 soname = sonames[0]
101 elif len(sonames) > 1:
102 lead = d.getVar('LEAD_SONAME')
103 if lead:
104 r = re.compile(lead)
105 filtered = []
106 for s in sonames:
107 if r.match(s):
108 filtered.append(s)
109 if len(filtered) == 1:
110 soname = filtered[0]
111 elif len(filtered) > 1:
112 bb.note("Multiple matches (%s) for LEAD_SONAME '%s'" % (", ".join(filtered), lead))
113 else:
114 bb.note("Multiple libraries (%s) found, but LEAD_SONAME '%s' doesn't match any of them" % (", ".join(sonames), lead))
115 else:
116 bb.note("Multiple libraries (%s) found and LEAD_SONAME not defined" % ", ".join(sonames))
117
118 if has_libs and not has_bins and soname:
119 soname_result = socrunch(soname)
120 if soname_result:
121 (pkgname, devname) = soname_result
122 for pkg in packages.split():
123 if (d.getVar('PKG:' + pkg, False) or d.getVar('DEBIAN_NOAUTONAME:' + pkg, False)):
124 add_rprovides(pkg, d)
125 continue
126 debian_pn = d.getVar('DEBIANNAME:' + pkg, False)
127 if debian_pn:
128 newpkg = debian_pn
129 elif pkg == orig_pkg:
130 newpkg = pkgname
131 else:
132 newpkg = pkg.replace(orig_pkg, devname, 1)
133 mlpre=d.getVar('MLPREFIX')
134 if mlpre:
135 if not newpkg.find(mlpre) == 0:
136 newpkg = mlpre + newpkg
137 if newpkg != pkg:
138 bb.note("debian: renaming %s to %s" % (pkg, newpkg))
139 d.setVar('PKG:' + pkg, newpkg)
140 add_rprovides(pkg, d)
141 else:
142 add_rprovides(orig_pkg, d)
143
144 # reversed sort is needed when some package is substring of another
145 # ie in ncurses we get without reverse sort:
146 # DEBUG: LIBNAMES: pkgname libtic5 devname libtic pkg ncurses-libtic orig_pkg ncurses-libtic debian_pn None newpkg libtic5
147 # and later
148 # DEBUG: LIBNAMES: pkgname libtic5 devname libtic pkg ncurses-libticw orig_pkg ncurses-libtic debian_pn None newpkg libticw
149 # so we need to handle ncurses-libticw->libticw5 before ncurses-libtic->libtic5
150 for pkg in sorted((d.getVar('AUTO_LIBNAME_PKGS') or "").split(), reverse=True):
151 auto_libname(packages, pkg)
152}
153
154EXPORT_FUNCTIONS package_name_hook
155
156DEBIAN_NAMES = "1"
diff --git a/meta/classes/deploy.bbclass b/meta/classes/deploy.bbclass
deleted file mode 100644
index f56fe98d6d..0000000000
--- a/meta/classes/deploy.bbclass
+++ /dev/null
@@ -1,18 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7DEPLOYDIR = "${WORKDIR}/deploy-${PN}"
8SSTATETASKS += "do_deploy"
9do_deploy[sstate-inputdirs] = "${DEPLOYDIR}"
10do_deploy[sstate-outputdirs] = "${DEPLOY_DIR_IMAGE}"
11
12python do_deploy_setscene () {
13 sstate_setscene(d)
14}
15addtask do_deploy_setscene
16do_deploy[dirs] = "${B}"
17do_deploy[cleandirs] = "${DEPLOYDIR}"
18do_deploy[stamp-extra-info] = "${MACHINE_ARCH}"
diff --git a/meta/classes/devicetree.bbclass b/meta/classes/devicetree.bbclass
deleted file mode 100644
index ac1d284ccd..0000000000
--- a/meta/classes/devicetree.bbclass
+++ /dev/null
@@ -1,154 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# This bbclass implements device tree compliation for user provided device tree
8# sources. The compilation of the device tree sources is the same as the kernel
9# device tree compilation process, this includes being able to include sources
10# from the kernel such as soc dtsi files or header files such as gpio.h. In
11# addition to device trees this bbclass also handles compilation of device tree
12# overlays.
13#
14# The output of this class behaves similar to how kernel-devicetree.bbclass
15# operates in that the output files are installed into /boot/devicetree.
16# However this class on purpose separates the deployed device trees into the
17# 'devicetree' subdirectory. This prevents clashes with the kernel-devicetree
18# output. Additionally the device trees are populated into the sysroot for
19# access via the sysroot from within other recipes.
20
21SECTION ?= "bsp"
22
23# The default inclusion of kernel device tree includes and headers means that
24# device trees built with them are at least GPL-2.0-only (and in some cases dual
25# licensed). Default to GPL-2.0-only if the recipe does not specify a license.
26LICENSE ?= "GPL-2.0-only"
27LIC_FILES_CHKSUM ?= "file://${COMMON_LICENSE_DIR}/GPL-2.0-only;md5=801f80980d171dd6425610833a22dbe6"
28
29INHIBIT_DEFAULT_DEPS = "1"
30DEPENDS += "dtc-native"
31
32inherit deploy kernel-arch
33
34COMPATIBLE_MACHINE ?= "^$"
35
36PROVIDES = "virtual/dtb"
37
38PACKAGE_ARCH = "${MACHINE_ARCH}"
39
40SYSROOT_DIRS += "/boot/devicetree"
41FILES:${PN} = "/boot/devicetree/*.dtb /boot/devicetree/*.dtbo"
42
43S = "${WORKDIR}"
44B = "${WORKDIR}/build"
45
46# Default kernel includes, these represent what are normally used for in-kernel
47# sources.
48KERNEL_INCLUDE ??= " \
49 ${STAGING_KERNEL_DIR}/arch/${ARCH}/boot/dts \
50 ${STAGING_KERNEL_DIR}/arch/${ARCH}/boot/dts/* \
51 ${STAGING_KERNEL_DIR}/scripts/dtc/include-prefixes \
52 "
53
54DT_INCLUDE[doc] = "Search paths to be made available to both the device tree compiler and preprocessor for inclusion."
55DT_INCLUDE ?= "${DT_FILES_PATH} ${KERNEL_INCLUDE}"
56DT_FILES_PATH[doc] = "Defaults to source directory, can be used to select dts files that are not in source (e.g. generated)."
57DT_FILES_PATH ?= "${S}"
58
59DT_PADDING_SIZE[doc] = "Size of padding on the device tree blob, used as extra space typically for additional properties during boot."
60DT_PADDING_SIZE ??= "0x3000"
61DT_RESERVED_MAP[doc] = "Number of reserved map entires."
62DT_RESERVED_MAP ??= "8"
63DT_BOOT_CPU[doc] = "The boot cpu, defaults to 0"
64DT_BOOT_CPU ??= "0"
65
66DTC_FLAGS ?= "-R ${DT_RESERVED_MAP} -b ${DT_BOOT_CPU}"
67DTC_PPFLAGS ?= "-nostdinc -undef -D__DTS__ -x assembler-with-cpp"
68DTC_BFLAGS ?= "-p ${DT_PADDING_SIZE} -@"
69DTC_OFLAGS ?= "-p 0 -@ -H epapr"
70
71python () {
72 if d.getVar("KERNEL_INCLUDE"):
73 # auto add dependency on kernel tree, but only if kernel include paths
74 # are specified.
75 d.appendVarFlag("do_compile", "depends", " virtual/kernel:do_configure")
76}
77
78def expand_includes(varname, d):
79 import glob
80 includes = set()
81 # expand all includes with glob
82 for i in (d.getVar(varname) or "").split():
83 for g in glob.glob(i):
84 if os.path.isdir(g): # only add directories to include path
85 includes.add(g)
86 return includes
87
88def devicetree_source_is_overlay(path):
89 # determine if a dts file is an overlay by checking if it uses "/plugin/;"
90 with open(path, "r") as f:
91 for i in f:
92 if i.startswith("/plugin/;"):
93 return True
94 return False
95
96def devicetree_compile(dtspath, includes, d):
97 import subprocess
98 dts = os.path.basename(dtspath)
99 dtname = os.path.splitext(dts)[0]
100 bb.note("Processing {0} [{1}]".format(dtname, dts))
101
102 # preprocess
103 ppargs = d.getVar("BUILD_CPP").split()
104 ppargs += (d.getVar("DTC_PPFLAGS") or "").split()
105 for i in includes:
106 ppargs.append("-I{0}".format(i))
107 ppargs += ["-o", "{0}.pp".format(dts), dtspath]
108 bb.note("Running {0}".format(" ".join(ppargs)))
109 subprocess.run(ppargs, check = True)
110
111 # determine if the file is an overlay or not (using the preprocessed file)
112 isoverlay = devicetree_source_is_overlay("{0}.pp".format(dts))
113
114 # compile
115 dtcargs = ["dtc"] + (d.getVar("DTC_FLAGS") or "").split()
116 if isoverlay:
117 dtcargs += (d.getVar("DTC_OFLAGS") or "").split()
118 else:
119 dtcargs += (d.getVar("DTC_BFLAGS") or "").split()
120 for i in includes:
121 dtcargs += ["-i", i]
122 dtcargs += ["-o", "{0}.{1}".format(dtname, "dtbo" if isoverlay else "dtb")]
123 dtcargs += ["-I", "dts", "-O", "dtb", "{0}.pp".format(dts)]
124 bb.note("Running {0}".format(" ".join(dtcargs)))
125 subprocess.run(dtcargs, check = True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
126
127python devicetree_do_compile() {
128 includes = expand_includes("DT_INCLUDE", d)
129 listpath = d.getVar("DT_FILES_PATH")
130 for dts in os.listdir(listpath):
131 dtspath = os.path.join(listpath, dts)
132 try:
133 if not(os.path.isfile(dtspath)) or not(dts.endswith(".dts") or devicetree_source_is_overlay(dtspath)):
134 continue # skip non-.dts files and non-overlay files
135 except:
136 continue # skip if can't determine if overlay
137 devicetree_compile(dtspath, includes, d)
138}
139
140devicetree_do_install() {
141 for DTB_FILE in `ls *.dtb *.dtbo`; do
142 install -Dm 0644 ${B}/${DTB_FILE} ${D}/boot/devicetree/${DTB_FILE}
143 done
144}
145
146devicetree_do_deploy() {
147 for DTB_FILE in `ls *.dtb *.dtbo`; do
148 install -Dm 0644 ${B}/${DTB_FILE} ${DEPLOYDIR}/devicetree/${DTB_FILE}
149 done
150}
151addtask deploy before do_build after do_install
152
153EXPORT_FUNCTIONS do_compile do_install do_deploy
154
diff --git a/meta/classes/devshell.bbclass b/meta/classes/devshell.bbclass
deleted file mode 100644
index 03af56b7a2..0000000000
--- a/meta/classes/devshell.bbclass
+++ /dev/null
@@ -1,166 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7inherit terminal
8
9DEVSHELL = "${SHELL}"
10
11PATH:prepend:task-devshell = "${COREBASE}/scripts/git-intercept:"
12
13python do_devshell () {
14 if d.getVarFlag("do_devshell", "manualfakeroot"):
15 d.prependVar("DEVSHELL", "pseudo ")
16 fakeenv = d.getVar("FAKEROOTENV").split()
17 for f in fakeenv:
18 k = f.split("=")
19 d.setVar(k[0], k[1])
20 d.appendVar("OE_TERMINAL_EXPORTS", " " + k[0])
21 d.delVarFlag("do_devshell", "fakeroot")
22
23 oe_terminal(d.getVar('DEVSHELL'), 'OpenEmbedded Developer Shell', d)
24}
25
26addtask devshell after do_patch do_prepare_recipe_sysroot
27
28# The directory that the terminal starts in
29DEVSHELL_STARTDIR ?= "${S}"
30do_devshell[dirs] = "${DEVSHELL_STARTDIR}"
31do_devshell[nostamp] = "1"
32do_devshell[network] = "1"
33
34# devshell and fakeroot/pseudo need careful handling since only the final
35# command should run under fakeroot emulation, any X connection should
36# be done as the normal user. We therfore carefully construct the envionment
37# manually
38python () {
39 if d.getVarFlag("do_devshell", "fakeroot"):
40 # We need to signal our code that we want fakeroot however we
41 # can't manipulate the environment and variables here yet (see YOCTO #4795)
42 d.setVarFlag("do_devshell", "manualfakeroot", "1")
43 d.delVarFlag("do_devshell", "fakeroot")
44}
45
46def pydevshell(d):
47
48 import code
49 import select
50 import signal
51 import termios
52
53 m, s = os.openpty()
54 sname = os.ttyname(s)
55
56 def noechoicanon(fd):
57 old = termios.tcgetattr(fd)
58 old[3] = old[3] &~ termios.ECHO &~ termios.ICANON
59 # &~ termios.ISIG
60 termios.tcsetattr(fd, termios.TCSADRAIN, old)
61
62 # No echo or buffering over the pty
63 noechoicanon(s)
64
65 pid = os.fork()
66 if pid:
67 os.close(m)
68 oe_terminal("oepydevshell-internal.py %s %d" % (sname, pid), 'OpenEmbedded Developer PyShell', d)
69 os._exit(0)
70 else:
71 os.close(s)
72
73 os.dup2(m, sys.stdin.fileno())
74 os.dup2(m, sys.stdout.fileno())
75 os.dup2(m, sys.stderr.fileno())
76
77 bb.utils.nonblockingfd(sys.stdout)
78 bb.utils.nonblockingfd(sys.stderr)
79 bb.utils.nonblockingfd(sys.stdin)
80
81 _context = {
82 "os": os,
83 "bb": bb,
84 "time": time,
85 "d": d,
86 }
87
88 ps1 = "pydevshell> "
89 ps2 = "... "
90 buf = []
91 more = False
92
93 i = code.InteractiveInterpreter(locals=_context)
94 print("OE PyShell (PN = %s)\n" % d.getVar("PN"))
95
96 def prompt(more):
97 if more:
98 prompt = ps2
99 else:
100 prompt = ps1
101 sys.stdout.write(prompt)
102 sys.stdout.flush()
103
104 # Restore Ctrl+C since bitbake masks this
105 def signal_handler(signal, frame):
106 raise KeyboardInterrupt
107 signal.signal(signal.SIGINT, signal_handler)
108
109 child = None
110
111 prompt(more)
112 while True:
113 try:
114 try:
115 (r, _, _) = select.select([sys.stdin], [], [], 1)
116 if not r:
117 continue
118 line = sys.stdin.readline().strip()
119 if not line:
120 prompt(more)
121 continue
122 except EOFError as e:
123 sys.stdout.write("\n")
124 sys.stdout.flush()
125 except (OSError, IOError) as e:
126 if e.errno == 11:
127 continue
128 if e.errno == 5:
129 return
130 raise
131 else:
132 if not child:
133 child = int(line)
134 continue
135 buf.append(line)
136 source = "\n".join(buf)
137 more = i.runsource(source, "<pyshell>")
138 if not more:
139 buf = []
140 sys.stderr.flush()
141 prompt(more)
142 except KeyboardInterrupt:
143 i.write("\nKeyboardInterrupt\n")
144 buf = []
145 more = False
146 prompt(more)
147 except SystemExit:
148 # Easiest way to ensure everything exits
149 os.kill(child, signal.SIGTERM)
150 break
151
152python do_pydevshell() {
153 import signal
154
155 try:
156 pydevshell(d)
157 except SystemExit:
158 # Stop the SIGTERM above causing an error exit code
159 return
160 finally:
161 return
162}
163addtask pydevshell after do_patch
164
165do_pydevshell[nostamp] = "1"
166do_pydevshell[network] = "1"
diff --git a/meta/classes/devupstream.bbclass b/meta/classes/devupstream.bbclass
deleted file mode 100644
index 1529cc8fca..0000000000
--- a/meta/classes/devupstream.bbclass
+++ /dev/null
@@ -1,61 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# Class for use in BBCLASSEXTEND to make it easier to have a single recipe that
8# can build both stable tarballs and snapshots from upstream source
9# repositories.
10#
11# Usage:
12# BBCLASSEXTEND = "devupstream:target"
13# SRC_URI:class-devupstream = "git://git.example.com/example;branch=master"
14# SRCREV:class-devupstream = "abcdef"
15#
16# If the first entry in SRC_URI is a git: URL then S is rewritten to
17# WORKDIR/git.
18#
19# There are a few caveats that remain to be solved:
20# - You can't build native or nativesdk recipes using for example
21# devupstream:native, you can only build target recipes.
22# - If the fetcher requires native tools (such as subversion-native) then
23# bitbake won't be able to add them automatically.
24
25python devupstream_virtclass_handler () {
26 # Do nothing if this is inherited, as it's for BBCLASSEXTEND
27 if "devupstream" not in (d.getVar('BBCLASSEXTEND') or ""):
28 bb.error("Don't inherit devupstream, use BBCLASSEXTEND")
29 return
30
31 variant = d.getVar("BBEXTENDVARIANT")
32 if variant not in ("target", "native"):
33 bb.error("Unsupported variant %s. Pass the variant when using devupstream, for example devupstream:target" % variant)
34 return
35
36 # Develpment releases are never preferred by default
37 d.setVar("DEFAULT_PREFERENCE", "-1")
38
39 src_uri = d.getVar("SRC_URI:class-devupstream") or d.getVar("SRC_URI")
40 uri = bb.fetch2.URI(src_uri.split()[0])
41
42 if uri.scheme == "git" and not d.getVar("S:class-devupstream"):
43 d.setVar("S", "${WORKDIR}/git")
44
45 # Modify the PV if the recipe hasn't already overridden it
46 pv = d.getVar("PV")
47 proto_marker = "+" + uri.scheme
48 if proto_marker not in pv and not d.getVar("PV:class-devupstream"):
49 d.setVar("PV", pv + proto_marker + "${SRCPV}")
50
51 if variant == "native":
52 pn = d.getVar("PN")
53 d.setVar("PN", "%s-native" % (pn))
54 fn = d.getVar("FILE")
55 bb.parse.BBHandler.inherit("native", fn, 0, d)
56
57 d.appendVar("CLASSOVERRIDE", ":class-devupstream")
58}
59
60addhandler devupstream_virtclass_handler
61devupstream_virtclass_handler[eventmask] = "bb.event.RecipePreFinalise"
diff --git a/meta/classes/distro_features_check.bbclass b/meta/classes/distro_features_check.bbclass
deleted file mode 100644
index 1f2674fd6e..0000000000
--- a/meta/classes/distro_features_check.bbclass
+++ /dev/null
@@ -1,13 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# Temporarily provide fallback to the old name of the class
8
9python __anonymous() {
10 bb.warn("distro_features_check.bbclass is deprecated, please use features_check.bbclass instead")
11}
12
13inherit features_check
diff --git a/meta/classes/distrooverrides.bbclass b/meta/classes/distrooverrides.bbclass
deleted file mode 100644
index 8d9d7cda7d..0000000000
--- a/meta/classes/distrooverrides.bbclass
+++ /dev/null
@@ -1,38 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# Turns certain DISTRO_FEATURES into overrides with the same
8# name plus a df- prefix. Ensures that these special
9# distro features remain set also for native and nativesdk
10# recipes, so that these overrides can also be used there.
11#
12# This makes it simpler to write .bbappends that only change the
13# task signatures of the recipe if the change is really enabled,
14# for example with:
15# do_install:append:df-my-feature () { ... }
16# where "my-feature" is a DISTRO_FEATURE.
17#
18# The class is meant to be used in a layer.conf or distro
19# .inc file with:
20# INHERIT += "distrooverrides"
21# DISTRO_FEATURES_OVERRIDES += "my-feature"
22#
23# Beware that this part of OVERRIDES changes during parsing, so usage
24# of these overrides should be limited to .bb and .bbappend files,
25# because then DISTRO_FEATURES is final.
26
27DISTRO_FEATURES_OVERRIDES ?= ""
28DISTRO_FEATURES_OVERRIDES[doc] = "A space-separated list of <feature> entries. \
29Each entry is added to OVERRIDES as df-<feature> if <feature> is in DISTRO_FEATURES."
30
31DISTRO_FEATURES_FILTER_NATIVE:append = " ${DISTRO_FEATURES_OVERRIDES}"
32DISTRO_FEATURES_FILTER_NATIVESDK:append = " ${DISTRO_FEATURES_OVERRIDES}"
33
34# If DISTRO_FEATURES_OVERRIDES or DISTRO_FEATURES show up in a task
35# signature because of this line, then the task dependency on
36# OVERRIDES itself should be fixed. Excluding these two variables
37# with DISTROOVERRIDES[vardepsexclude] would just work around the problem.
38DISTROOVERRIDES .= "${@ ''.join([':df-' + x for x in sorted(set(d.getVar('DISTRO_FEATURES_OVERRIDES').split()) & set((d.getVar('DISTRO_FEATURES') or '').split()))]) }"
diff --git a/meta/classes/dos2unix.bbclass b/meta/classes/dos2unix.bbclass
deleted file mode 100644
index 18e89b1cf2..0000000000
--- a/meta/classes/dos2unix.bbclass
+++ /dev/null
@@ -1,20 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# Class for use to convert all CRLF line terminators to LF
8# provided that some projects are being developed/maintained
9# on Windows so they have different line terminators(CRLF) vs
10# on Linux(LF), which can cause annoying patching errors during
11# git push/checkout processes.
12
13do_convert_crlf_to_lf[depends] += "dos2unix-native:do_populate_sysroot"
14
15# Convert CRLF line terminators to LF
16do_convert_crlf_to_lf () {
17 find ${S} -type f -exec dos2unix {} \;
18}
19
20addtask convert_crlf_to_lf after do_unpack before do_patch
diff --git a/meta/classes/externalsrc.bbclass b/meta/classes/externalsrc.bbclass
deleted file mode 100644
index 51dbe9ea5a..0000000000
--- a/meta/classes/externalsrc.bbclass
+++ /dev/null
@@ -1,269 +0,0 @@
1# Copyright (C) 2012 Linux Foundation
2# Author: Richard Purdie
3# Some code and influence taken from srctree.bbclass:
4# Copyright (C) 2009 Chris Larson <clarson@kergoth.com>
5#
6# SPDX-License-Identifier: MIT
7#
8# externalsrc.bbclass enables use of an existing source tree, usually external to
9# the build system to build a piece of software rather than the usual fetch/unpack/patch
10# process.
11#
12# To use, add externalsrc to the global inherit and set EXTERNALSRC to point at the
13# directory you want to use containing the sources e.g. from local.conf for a recipe
14# called "myrecipe" you would do:
15#
16# INHERIT += "externalsrc"
17# EXTERNALSRC:pn-myrecipe = "/path/to/my/source/tree"
18#
19# In order to make this class work for both target and native versions (or with
20# multilibs/cross or other BBCLASSEXTEND variants), B is set to point to a separate
21# directory under the work directory (split source and build directories). This is
22# the default, but the build directory can be set to the source directory if
23# circumstances dictate by setting EXTERNALSRC_BUILD to the same value, e.g.:
24#
25# EXTERNALSRC_BUILD:pn-myrecipe = "/path/to/my/source/tree"
26#
27
28SRCTREECOVEREDTASKS ?= "do_patch do_unpack do_fetch"
29EXTERNALSRC_SYMLINKS ?= "oe-workdir:${WORKDIR} oe-logs:${T}"
30
31python () {
32 externalsrc = d.getVar('EXTERNALSRC')
33 externalsrcbuild = d.getVar('EXTERNALSRC_BUILD')
34
35 if externalsrc and not externalsrc.startswith("/"):
36 bb.error("EXTERNALSRC must be an absolute path")
37 if externalsrcbuild and not externalsrcbuild.startswith("/"):
38 bb.error("EXTERNALSRC_BUILD must be an absolute path")
39
40 # If this is the base recipe and EXTERNALSRC is set for it or any of its
41 # derivatives, then enable BB_DONT_CACHE to force the recipe to always be
42 # re-parsed so that the file-checksums function for do_compile is run every
43 # time.
44 bpn = d.getVar('BPN')
45 classextend = (d.getVar('BBCLASSEXTEND') or '').split()
46 if bpn == d.getVar('PN') or not classextend:
47 if (externalsrc or
48 ('native' in classextend and
49 d.getVar('EXTERNALSRC:pn-%s-native' % bpn)) or
50 ('nativesdk' in classextend and
51 d.getVar('EXTERNALSRC:pn-nativesdk-%s' % bpn)) or
52 ('cross' in classextend and
53 d.getVar('EXTERNALSRC:pn-%s-cross' % bpn))):
54 d.setVar('BB_DONT_CACHE', '1')
55
56 if externalsrc:
57 import oe.recipeutils
58 import oe.path
59
60 d.setVar('S', externalsrc)
61 if externalsrcbuild:
62 d.setVar('B', externalsrcbuild)
63 else:
64 d.setVar('B', '${WORKDIR}/${BPN}-${PV}/')
65
66 local_srcuri = []
67 fetch = bb.fetch2.Fetch((d.getVar('SRC_URI') or '').split(), d)
68 for url in fetch.urls:
69 url_data = fetch.ud[url]
70 parm = url_data.parm
71 if (url_data.type == 'file' or
72 url_data.type == 'npmsw' or url_data.type == 'crate' or
73 'type' in parm and parm['type'] == 'kmeta'):
74 local_srcuri.append(url)
75
76 d.setVar('SRC_URI', ' '.join(local_srcuri))
77
78 # Dummy value because the default function can't be called with blank SRC_URI
79 d.setVar('SRCPV', '999')
80
81 if d.getVar('CONFIGUREOPT_DEPTRACK') == '--disable-dependency-tracking':
82 d.setVar('CONFIGUREOPT_DEPTRACK', '')
83
84 tasks = filter(lambda k: d.getVarFlag(k, "task"), d.keys())
85
86 for task in tasks:
87 if task.endswith("_setscene"):
88 # sstate is never going to work for external source trees, disable it
89 bb.build.deltask(task, d)
90 elif os.path.realpath(d.getVar('S')) == os.path.realpath(d.getVar('B')):
91 # Since configure will likely touch ${S}, ensure only we lock so one task has access at a time
92 d.appendVarFlag(task, "lockfiles", " ${S}/singletask.lock")
93
94 for funcname in [task, "base_" + task, "kernel_" + task]:
95 # We do not want our source to be wiped out, ever (kernel.bbclass does this for do_clean)
96 cleandirs = oe.recipeutils.split_var_value(d.getVarFlag(funcname, 'cleandirs', False) or '')
97 setvalue = False
98 for cleandir in cleandirs[:]:
99 if oe.path.is_path_parent(externalsrc, d.expand(cleandir)):
100 cleandirs.remove(cleandir)
101 setvalue = True
102 if setvalue:
103 d.setVarFlag(funcname, 'cleandirs', ' '.join(cleandirs))
104
105 fetch_tasks = ['do_fetch', 'do_unpack']
106 # If we deltask do_patch, there's no dependency to ensure do_unpack gets run, so add one
107 # Note that we cannot use d.appendVarFlag() here because deps is expected to be a list object, not a string
108 d.setVarFlag('do_configure', 'deps', (d.getVarFlag('do_configure', 'deps', False) or []) + ['do_unpack'])
109
110 for task in d.getVar("SRCTREECOVEREDTASKS").split():
111 if local_srcuri and task in fetch_tasks:
112 continue
113 bb.build.deltask(task, d)
114 if task == 'do_unpack':
115 # The reproducible build create_source_date_epoch_stamp function must
116 # be run after the source is available and before the
117 # do_deploy_source_date_epoch task. In the normal case, it's attached
118 # to do_unpack as a postfuncs, but since we removed do_unpack (above)
119 # we need to move the function elsewhere. The easiest thing to do is
120 # move it into the prefuncs of the do_deploy_source_date_epoch task.
121 # This is safe, as externalsrc runs with the source already unpacked.
122 d.prependVarFlag('do_deploy_source_date_epoch', 'prefuncs', 'create_source_date_epoch_stamp ')
123
124 d.prependVarFlag('do_compile', 'prefuncs', "externalsrc_compile_prefunc ")
125 d.prependVarFlag('do_configure', 'prefuncs', "externalsrc_configure_prefunc ")
126
127 d.setVarFlag('do_compile', 'file-checksums', '${@srctree_hash_files(d)}')
128 d.setVarFlag('do_configure', 'file-checksums', '${@srctree_configure_hash_files(d)}')
129
130 # We don't want the workdir to go away
131 d.appendVar('RM_WORK_EXCLUDE', ' ' + d.getVar('PN'))
132
133 bb.build.addtask('do_buildclean',
134 'do_clean' if d.getVar('S') == d.getVar('B') else None,
135 None, d)
136
137 # If B=S the same builddir is used even for different architectures.
138 # Thus, use a shared CONFIGURESTAMPFILE and STAMP directory so that
139 # change of do_configure task hash is correctly detected and stamps are
140 # invalidated if e.g. MACHINE changes.
141 if d.getVar('S') == d.getVar('B'):
142 configstamp = '${TMPDIR}/work-shared/${PN}/${EXTENDPE}${PV}-${PR}/configure.sstate'
143 d.setVar('CONFIGURESTAMPFILE', configstamp)
144 d.setVar('STAMP', '${STAMPS_DIR}/work-shared/${PN}/${EXTENDPE}${PV}-${PR}')
145 d.setVar('STAMPCLEAN', '${STAMPS_DIR}/work-shared/${PN}/*-*')
146}
147
148python externalsrc_configure_prefunc() {
149 s_dir = d.getVar('S')
150 # Create desired symlinks
151 symlinks = (d.getVar('EXTERNALSRC_SYMLINKS') or '').split()
152 newlinks = []
153 for symlink in symlinks:
154 symsplit = symlink.split(':', 1)
155 lnkfile = os.path.join(s_dir, symsplit[0])
156 target = d.expand(symsplit[1])
157 if len(symsplit) > 1:
158 if os.path.islink(lnkfile):
159 # Link already exists, leave it if it points to the right location already
160 if os.readlink(lnkfile) == target:
161 continue
162 os.unlink(lnkfile)
163 elif os.path.exists(lnkfile):
164 # File/dir exists with same name as link, just leave it alone
165 continue
166 os.symlink(target, lnkfile)
167 newlinks.append(symsplit[0])
168 # Hide the symlinks from git
169 try:
170 git_exclude_file = os.path.join(s_dir, '.git/info/exclude')
171 if os.path.exists(git_exclude_file):
172 with open(git_exclude_file, 'r+') as efile:
173 elines = efile.readlines()
174 for link in newlinks:
175 if link in elines or '/'+link in elines:
176 continue
177 efile.write('/' + link + '\n')
178 except IOError as ioe:
179 bb.note('Failed to hide EXTERNALSRC_SYMLINKS from git')
180}
181
182python externalsrc_compile_prefunc() {
183 # Make it obvious that this is happening, since forgetting about it could lead to much confusion
184 bb.plain('NOTE: %s: compiling from external source tree %s' % (d.getVar('PN'), d.getVar('EXTERNALSRC')))
185}
186
187do_buildclean[dirs] = "${S} ${B}"
188do_buildclean[nostamp] = "1"
189do_buildclean[doc] = "Call 'make clean' or equivalent in ${B}"
190externalsrc_do_buildclean() {
191 if [ -e Makefile -o -e makefile -o -e GNUmakefile ]; then
192 rm -f ${@' '.join([x.split(':')[0] for x in (d.getVar('EXTERNALSRC_SYMLINKS') or '').split()])}
193 if [ "${CLEANBROKEN}" != "1" ]; then
194 oe_runmake clean || die "make failed"
195 fi
196 else
197 bbnote "nothing to do - no makefile found"
198 fi
199}
200
201def srctree_hash_files(d, srcdir=None):
202 import shutil
203 import subprocess
204 import tempfile
205 import hashlib
206
207 s_dir = srcdir or d.getVar('EXTERNALSRC')
208 git_dir = None
209
210 try:
211 git_dir = os.path.join(s_dir,
212 subprocess.check_output(['git', '-C', s_dir, 'rev-parse', '--git-dir'], stderr=subprocess.DEVNULL).decode("utf-8").rstrip())
213 top_git_dir = os.path.join(s_dir, subprocess.check_output(['git', '-C', d.getVar("TOPDIR"), 'rev-parse', '--git-dir'],
214 stderr=subprocess.DEVNULL).decode("utf-8").rstrip())
215 if git_dir == top_git_dir:
216 git_dir = None
217 except subprocess.CalledProcessError:
218 pass
219
220 ret = " "
221 if git_dir is not None:
222 oe_hash_file = os.path.join(git_dir, 'oe-devtool-tree-sha1-%s' % d.getVar('PN'))
223 with tempfile.NamedTemporaryFile(prefix='oe-devtool-index') as tmp_index:
224 # Clone index
225 shutil.copyfile(os.path.join(git_dir, 'index'), tmp_index.name)
226 # Update our custom index
227 env = os.environ.copy()
228 env['GIT_INDEX_FILE'] = tmp_index.name
229 subprocess.check_output(['git', 'add', '-A', '.'], cwd=s_dir, env=env)
230 git_sha1 = subprocess.check_output(['git', 'write-tree'], cwd=s_dir, env=env).decode("utf-8")
231 submodule_helper = subprocess.check_output(['git', 'submodule--helper', 'list'], cwd=s_dir, env=env).decode("utf-8")
232 for line in submodule_helper.splitlines():
233 module_dir = os.path.join(s_dir, line.rsplit(maxsplit=1)[1])
234 if os.path.isdir(module_dir):
235 proc = subprocess.Popen(['git', 'add', '-A', '.'], cwd=module_dir, env=env, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
236 proc.communicate()
237 proc = subprocess.Popen(['git', 'write-tree'], cwd=module_dir, env=env, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
238 stdout, _ = proc.communicate()
239 git_sha1 += stdout.decode("utf-8")
240 sha1 = hashlib.sha1(git_sha1.encode("utf-8")).hexdigest()
241 with open(oe_hash_file, 'w') as fobj:
242 fobj.write(sha1)
243 ret = oe_hash_file + ':True'
244 else:
245 ret = s_dir + '/*:True'
246 return ret
247
248def srctree_configure_hash_files(d):
249 """
250 Get the list of files that should trigger do_configure to re-execute,
251 based on the value of CONFIGURE_FILES
252 """
253 in_files = (d.getVar('CONFIGURE_FILES') or '').split()
254 out_items = []
255 search_files = []
256 for entry in in_files:
257 if entry.startswith('/'):
258 out_items.append('%s:%s' % (entry, os.path.exists(entry)))
259 else:
260 search_files.append(entry)
261 if search_files:
262 s_dir = d.getVar('EXTERNALSRC')
263 for root, _, files in os.walk(s_dir):
264 for f in files:
265 if f in search_files:
266 out_items.append('%s:True' % os.path.join(root, f))
267 return ' '.join(out_items)
268
269EXPORT_FUNCTIONS do_buildclean
diff --git a/meta/classes/features_check.bbclass b/meta/classes/features_check.bbclass
deleted file mode 100644
index 163a7bc3fc..0000000000
--- a/meta/classes/features_check.bbclass
+++ /dev/null
@@ -1,57 +0,0 @@
1# Allow checking of required and conflicting features
2#
3# xxx = [DISTRO,MACHINE,COMBINED,IMAGE]
4#
5# ANY_OF_xxx_FEATURES: ensure at least one item on this list is included
6# in xxx_FEATURES.
7# REQUIRED_xxx_FEATURES: ensure every item on this list is included
8# in xxx_FEATURES.
9# CONFLICT_xxx_FEATURES: ensure no item in this list is included in
10# xxx_FEATURES.
11#
12# Copyright 2019 (C) Texas Instruments Inc.
13# Copyright 2013 (C) O.S. Systems Software LTDA.
14#
15# SPDX-License-Identifier: MIT
16
17
18python () {
19 if d.getVar('PARSE_ALL_RECIPES', False):
20 return
21
22 unused = True
23
24 for kind in ['DISTRO', 'MACHINE', 'COMBINED', 'IMAGE']:
25 if d.getVar('ANY_OF_' + kind + '_FEATURES') is None and not d.hasOverrides('ANY_OF_' + kind + '_FEATURES') and \
26 d.getVar('REQUIRED_' + kind + '_FEATURES') is None and not d.hasOverrides('REQUIRED_' + kind + '_FEATURES') and \
27 d.getVar('CONFLICT_' + kind + '_FEATURES') is None and not d.hasOverrides('CONFLICT_' + kind + '_FEATURES'):
28 continue
29
30 unused = False
31
32 # Assume at least one var is set.
33 features = set((d.getVar(kind + '_FEATURES') or '').split())
34
35 any_of_features = set((d.getVar('ANY_OF_' + kind + '_FEATURES') or '').split())
36 if any_of_features:
37 if set.isdisjoint(any_of_features, features):
38 raise bb.parse.SkipRecipe("one of '%s' needs to be in %s_FEATURES"
39 % (' '.join(any_of_features), kind))
40
41 required_features = set((d.getVar('REQUIRED_' + kind + '_FEATURES') or '').split())
42 if required_features:
43 missing = set.difference(required_features, features)
44 if missing:
45 raise bb.parse.SkipRecipe("missing required %s feature%s '%s' (not in %s_FEATURES)"
46 % (kind.lower(), 's' if len(missing) > 1 else '', ' '.join(missing), kind))
47
48 conflict_features = set((d.getVar('CONFLICT_' + kind + '_FEATURES') or '').split())
49 if conflict_features:
50 conflicts = set.intersection(conflict_features, features)
51 if conflicts:
52 raise bb.parse.SkipRecipe("conflicting %s feature%s '%s' (in %s_FEATURES)"
53 % (kind.lower(), 's' if len(conflicts) > 1 else '', ' '.join(conflicts), kind))
54
55 if unused:
56 bb.warn("Recipe inherits features_check but doesn't use it")
57}
diff --git a/meta/classes/fontcache.bbclass b/meta/classes/fontcache.bbclass
deleted file mode 100644
index 0d496b72dd..0000000000
--- a/meta/classes/fontcache.bbclass
+++ /dev/null
@@ -1,63 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7#
8# This class will generate the proper postinst/postrm scriptlets for font
9# packages.
10#
11
12PACKAGE_WRITE_DEPS += "qemu-native"
13inherit qemu
14
15FONT_PACKAGES ??= "${PN}"
16FONT_EXTRA_RDEPENDS ?= "${MLPREFIX}fontconfig-utils"
17FONTCONFIG_CACHE_DIR ?= "${localstatedir}/cache/fontconfig"
18FONTCONFIG_CACHE_PARAMS ?= "-v"
19# You can change this to e.g. FC_DEBUG=16 to debug fc-cache issues,
20# something has to be set, because qemuwrapper is using this variable after -E
21# multiple variables aren't allowed because for qemu they are separated
22# by comma and in -n "$D" case they should be separated by space
23FONTCONFIG_CACHE_ENV ?= "FC_DEBUG=1"
24fontcache_common() {
25if [ -n "$D" ] ; then
26 $INTERCEPT_DIR/postinst_intercept update_font_cache ${PKG} mlprefix=${MLPREFIX} binprefix=${MLPREFIX} \
27 'bindir="${bindir}"' \
28 'libdir="${libdir}"' \
29 'libexecdir="${libexecdir}"' \
30 'base_libdir="${base_libdir}"' \
31 'fontconfigcachedir="${FONTCONFIG_CACHE_DIR}"' \
32 'fontconfigcacheparams="${FONTCONFIG_CACHE_PARAMS}"' \
33 'fontconfigcacheenv="${FONTCONFIG_CACHE_ENV}"'
34else
35 ${FONTCONFIG_CACHE_ENV} fc-cache ${FONTCONFIG_CACHE_PARAMS}
36fi
37}
38
39python () {
40 font_pkgs = d.getVar('FONT_PACKAGES').split()
41 deps = d.getVar("FONT_EXTRA_RDEPENDS")
42
43 for pkg in font_pkgs:
44 if deps: d.appendVar('RDEPENDS:' + pkg, ' '+deps)
45}
46
47python add_fontcache_postinsts() {
48 for pkg in d.getVar('FONT_PACKAGES').split():
49 bb.note("adding fonts postinst and postrm scripts to %s" % pkg)
50 postinst = d.getVar('pkg_postinst:%s' % pkg) or d.getVar('pkg_postinst')
51 if not postinst:
52 postinst = '#!/bin/sh\n'
53 postinst += d.getVar('fontcache_common')
54 d.setVar('pkg_postinst:%s' % pkg, postinst)
55
56 postrm = d.getVar('pkg_postrm:%s' % pkg) or d.getVar('pkg_postrm')
57 if not postrm:
58 postrm = '#!/bin/sh\n'
59 postrm += d.getVar('fontcache_common')
60 d.setVar('pkg_postrm:%s' % pkg, postrm)
61}
62
63PACKAGEFUNCS =+ "add_fontcache_postinsts"
diff --git a/meta/classes/fs-uuid.bbclass b/meta/classes/fs-uuid.bbclass
deleted file mode 100644
index a9e7eb8c67..0000000000
--- a/meta/classes/fs-uuid.bbclass
+++ /dev/null
@@ -1,30 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# Extract UUID from ${ROOTFS}, which must have been built
8# by the time that this function gets called. Only works
9# on ext file systems and depends on tune2fs.
10def get_rootfs_uuid(d):
11 import subprocess
12 rootfs = d.getVar('ROOTFS')
13 output = subprocess.check_output(['tune2fs', '-l', rootfs])
14 for line in output.split('\n'):
15 if line.startswith('Filesystem UUID:'):
16 uuid = line.split()[-1]
17 bb.note('UUID of %s: %s' % (rootfs, uuid))
18 return uuid
19 bb.fatal('Could not determine filesystem UUID of %s' % rootfs)
20
21# Replace the special <<uuid-of-rootfs>> inside a string (like the
22# root= APPEND string in a syslinux.cfg or systemd-boot entry) with the
23# actual UUID of the rootfs. Does nothing if the special string
24# is not used.
25def replace_rootfs_uuid(d, string):
26 UUID_PLACEHOLDER = '<<uuid-of-rootfs>>'
27 if UUID_PLACEHOLDER in string:
28 uuid = get_rootfs_uuid(d)
29 string = string.replace(UUID_PLACEHOLDER, uuid)
30 return string
diff --git a/meta/classes/gconf.bbclass b/meta/classes/gconf.bbclass
deleted file mode 100644
index b81851bc78..0000000000
--- a/meta/classes/gconf.bbclass
+++ /dev/null
@@ -1,77 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7DEPENDS += "gconf"
8PACKAGE_WRITE_DEPS += "gconf-native"
9
10# These are for when gconftool is used natively and the prefix isn't necessarily
11# the sysroot. TODO: replicate the postinst logic for -native packages going
12# into sysroot as they won't be running their own install-time schema
13# registration (disabled below) nor the postinst script (as they don't happen).
14export GCONF_SCHEMA_INSTALL_SOURCE = "xml:merged:${STAGING_DIR_NATIVE}${sysconfdir}/gconf/gconf.xml.defaults"
15export GCONF_BACKEND_DIR = "${STAGING_LIBDIR_NATIVE}/GConf/2"
16
17# Disable install-time schema registration as we're a packaging system so this
18# happens in the postinst script, not at install time. Set both the configure
19# script option and the traditional envionment variable just to make sure.
20EXTRA_OECONF += "--disable-schemas-install"
21export GCONF_DISABLE_MAKEFILE_SCHEMA_INSTALL = "1"
22
23gconf_postinst() {
24if [ "x$D" != "x" ]; then
25 export GCONF_CONFIG_SOURCE="xml::$D${sysconfdir}/gconf/gconf.xml.defaults"
26else
27 export GCONF_CONFIG_SOURCE=`gconftool-2 --get-default-source`
28fi
29
30SCHEMA_LOCATION=$D/etc/gconf/schemas
31for SCHEMA in ${SCHEMA_FILES}; do
32 if [ -e $SCHEMA_LOCATION/$SCHEMA ]; then
33 HOME=$D/root gconftool-2 \
34 --makefile-install-rule $SCHEMA_LOCATION/$SCHEMA > /dev/null
35 fi
36done
37}
38
39gconf_prerm() {
40SCHEMA_LOCATION=/etc/gconf/schemas
41for SCHEMA in ${SCHEMA_FILES}; do
42 if [ -e $SCHEMA_LOCATION/$SCHEMA ]; then
43 HOME=/root GCONF_CONFIG_SOURCE=`gconftool-2 --get-default-source` \
44 gconftool-2 \
45 --makefile-uninstall-rule $SCHEMA_LOCATION/$SCHEMA > /dev/null
46 fi
47done
48}
49
50python populate_packages:append () {
51 import re
52 packages = d.getVar('PACKAGES').split()
53 pkgdest = d.getVar('PKGDEST')
54
55 for pkg in packages:
56 schema_dir = '%s/%s/etc/gconf/schemas' % (pkgdest, pkg)
57 schemas = []
58 schema_re = re.compile(r".*\.schemas$")
59 if os.path.exists(schema_dir):
60 for f in os.listdir(schema_dir):
61 if schema_re.match(f):
62 schemas.append(f)
63 if schemas != []:
64 bb.note("adding gconf postinst and prerm scripts to %s" % pkg)
65 d.setVar('SCHEMA_FILES', " ".join(schemas))
66 postinst = d.getVar('pkg_postinst:%s' % pkg)
67 if not postinst:
68 postinst = '#!/bin/sh\n'
69 postinst += d.getVar('gconf_postinst')
70 d.setVar('pkg_postinst:%s' % pkg, postinst)
71 prerm = d.getVar('pkg_prerm:%s' % pkg)
72 if not prerm:
73 prerm = '#!/bin/sh\n'
74 prerm += d.getVar('gconf_prerm')
75 d.setVar('pkg_prerm:%s' % pkg, prerm)
76 d.appendVar("RDEPENDS:%s" % pkg, ' ' + d.getVar('MLPREFIX', False) + 'gconf')
77}
diff --git a/meta/classes/gettext.bbclass b/meta/classes/gettext.bbclass
deleted file mode 100644
index c313885d52..0000000000
--- a/meta/classes/gettext.bbclass
+++ /dev/null
@@ -1,28 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7def gettext_dependencies(d):
8 if d.getVar('INHIBIT_DEFAULT_DEPS') and not oe.utils.inherits(d, 'cross-canadian'):
9 return ""
10 if d.getVar('USE_NLS') == 'no':
11 return "gettext-minimal-native"
12 return "gettext-native"
13
14def gettext_oeconf(d):
15 if d.getVar('USE_NLS') == 'no':
16 return '--disable-nls'
17 # Remove the NLS bits if USE_NLS is no or INHIBIT_DEFAULT_DEPS is set
18 if d.getVar('INHIBIT_DEFAULT_DEPS') and not oe.utils.inherits(d, 'cross-canadian'):
19 return '--disable-nls'
20 return "--enable-nls"
21
22BASEDEPENDS:append = " ${@gettext_dependencies(d)}"
23EXTRA_OECONF:append = " ${@gettext_oeconf(d)}"
24
25# Without this, msgfmt from gettext-native will not find ITS files
26# provided by target recipes (for example, polkit.its).
27GETTEXTDATADIRS:append:class-target = ":${STAGING_DATADIR}/gettext"
28export GETTEXTDATADIRS
diff --git a/meta/classes/gi-docgen.bbclass b/meta/classes/gi-docgen.bbclass
deleted file mode 100644
index 8b7eaacea3..0000000000
--- a/meta/classes/gi-docgen.bbclass
+++ /dev/null
@@ -1,30 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# gi-docgen is a new gnome documentation generator, which
8# seems to be a successor to gtk-doc:
9# https://gitlab.gnome.org/GNOME/gi-docgen
10
11# This variable is set to True if api-documentation is in
12# DISTRO_FEATURES, and False otherwise.
13GIDOCGEN_ENABLED ?= "${@bb.utils.contains('DISTRO_FEATURES', 'api-documentation', 'True', 'False', d)}"
14# When building native recipes, disable gi-docgen, as it is not necessary,
15# pulls in additional dependencies, and makes build times longer
16GIDOCGEN_ENABLED:class-native = "False"
17GIDOCGEN_ENABLED:class-nativesdk = "False"
18
19# meson: default option name to enable/disable gi-docgen. This matches most
20# projects' configuration. In doubts - check meson_options.txt in project's
21# source path.
22GIDOCGEN_MESON_OPTION ?= 'gtk_doc'
23GIDOCGEN_MESON_ENABLE_FLAG ?= 'true'
24GIDOCGEN_MESON_DISABLE_FLAG ?= 'false'
25
26# Auto enable/disable based on GIDOCGEN_ENABLED
27EXTRA_OEMESON:prepend = "-D${GIDOCGEN_MESON_OPTION}=${@bb.utils.contains('GIDOCGEN_ENABLED', 'True', '${GIDOCGEN_MESON_ENABLE_FLAG}', '${GIDOCGEN_MESON_DISABLE_FLAG}', d)} "
28
29DEPENDS:append = "${@' gi-docgen-native gi-docgen' if d.getVar('GIDOCGEN_ENABLED') == 'True' else ''}"
30
diff --git a/meta/classes/gio-module-cache.bbclass b/meta/classes/gio-module-cache.bbclass
deleted file mode 100644
index d12e03c4a0..0000000000
--- a/meta/classes/gio-module-cache.bbclass
+++ /dev/null
@@ -1,44 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7PACKAGE_WRITE_DEPS += "qemu-native"
8inherit qemu
9
10GIO_MODULE_PACKAGES ??= "${PN}"
11
12gio_module_cache_common() {
13if [ "x$D" != "x" ]; then
14 $INTERCEPT_DIR/postinst_intercept update_gio_module_cache ${PKG} \
15 mlprefix=${MLPREFIX} \
16 binprefix=${MLPREFIX} \
17 libdir=${libdir} \
18 libexecdir=${libexecdir} \
19 base_libdir=${base_libdir} \
20 bindir=${bindir}
21else
22 ${libexecdir}/${MLPREFIX}gio-querymodules ${libdir}/gio/modules/
23fi
24}
25
26python populate_packages:append () {
27 packages = d.getVar('GIO_MODULE_PACKAGES').split()
28
29 for pkg in packages:
30 bb.note("adding gio-module-cache postinst and postrm scripts to %s" % pkg)
31
32 postinst = d.getVar('pkg_postinst:%s' % pkg)
33 if not postinst:
34 postinst = '#!/bin/sh\n'
35 postinst += d.getVar('gio_module_cache_common')
36 d.setVar('pkg_postinst:%s' % pkg, postinst)
37
38 postrm = d.getVar('pkg_postrm:%s' % pkg)
39 if not postrm:
40 postrm = '#!/bin/sh\n'
41 postrm += d.getVar('gio_module_cache_common')
42 d.setVar('pkg_postrm:%s' % pkg, postrm)
43}
44
diff --git a/meta/classes/glide.bbclass b/meta/classes/glide.bbclass
deleted file mode 100644
index 21b48fa4e0..0000000000
--- a/meta/classes/glide.bbclass
+++ /dev/null
@@ -1,15 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# Handle Glide Vendor Package Management use
8#
9# Copyright 2018 (C) O.S. Systems Software LTDA.
10
11DEPENDS:append = " glide-native"
12
13do_compile:prepend() {
14 ( cd ${B}/src/${GO_IMPORT} && glide install )
15}
diff --git a/meta/classes/gnomebase.bbclass b/meta/classes/gnomebase.bbclass
deleted file mode 100644
index 805daafa40..0000000000
--- a/meta/classes/gnomebase.bbclass
+++ /dev/null
@@ -1,37 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7def gnome_verdir(v):
8 return ".".join(v.split(".")[:-1])
9
10
11GNOME_COMPRESS_TYPE ?= "xz"
12SECTION ?= "x11/gnome"
13GNOMEBN ?= "${BPN}"
14SRC_URI = "${GNOME_MIRROR}/${GNOMEBN}/${@gnome_verdir("${PV}")}/${GNOMEBN}-${PV}.tar.${GNOME_COMPRESS_TYPE};name=archive"
15
16FILES:${PN} += "${datadir}/application-registry \
17 ${datadir}/mime-info \
18 ${datadir}/mime/packages \
19 ${datadir}/mime/application \
20 ${datadir}/gnome-2.0 \
21 ${datadir}/polkit* \
22 ${datadir}/GConf \
23 ${datadir}/glib-2.0/schemas \
24 ${datadir}/appdata \
25 ${datadir}/icons \
26"
27
28FILES:${PN}-doc += "${datadir}/devhelp"
29
30GNOMEBASEBUILDCLASS ??= "autotools"
31inherit ${GNOMEBASEBUILDCLASS} pkgconfig
32
33do_install:append() {
34 rm -rf ${D}${localstatedir}/lib/scrollkeeper/*
35 rm -rf ${D}${localstatedir}/scrollkeeper/*
36 rm -f ${D}${datadir}/applications/*.cache
37}
diff --git a/meta/classes/go-mod.bbclass b/meta/classes/go-mod.bbclass
deleted file mode 100644
index 927746a338..0000000000
--- a/meta/classes/go-mod.bbclass
+++ /dev/null
@@ -1,26 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# Handle Go Modules support
8#
9# When using Go Modules, the the current working directory MUST be at or below
10# the location of the 'go.mod' file when the go tool is used, and there is no
11# way to tell it to look elsewhere. It will automatically look upwards for the
12# file, but not downwards.
13#
14# To support this use case, we provide the `GO_WORKDIR` variable, which defaults
15# to `GO_IMPORT` but allows for easy override.
16#
17# Copyright 2020 (C) O.S. Systems Software LTDA.
18
19# The '-modcacherw' option ensures we have write access to the cached objects so
20# we avoid errors during clean task as well as when removing the TMPDIR.
21GOBUILDFLAGS:append = " -modcacherw"
22
23inherit go
24
25GO_WORKDIR ?= "${GO_IMPORT}"
26do_compile[dirs] += "${B}/src/${GO_WORKDIR}"
diff --git a/meta/classes/go-ptest.bbclass b/meta/classes/go-ptest.bbclass
deleted file mode 100644
index 54fcbb535d..0000000000
--- a/meta/classes/go-ptest.bbclass
+++ /dev/null
@@ -1,60 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7inherit go ptest
8
9do_compile_ptest_base() {
10 export TMPDIR="${GOTMPDIR}"
11 rm -f ${B}/.go_compiled_tests.list
12 go_list_package_tests | while read pkg; do
13 cd ${B}/src/$pkg
14 ${GO} test ${GOPTESTBUILDFLAGS} $pkg
15 find . -mindepth 1 -maxdepth 1 -type f -name '*.test' -exec echo $pkg/{} \; | \
16 sed -e's,/\./,/,'>> ${B}/.go_compiled_tests.list
17 done
18 do_compile_ptest
19}
20
21do_compile_ptest_base[dirs] =+ "${GOTMPDIR}"
22
23go_make_ptest_wrapper() {
24 cat >${D}${PTEST_PATH}/run-ptest <<EOF
25#!/bin/sh
26RC=0
27run_test() (
28 cd "\$1"
29 ((((./\$2 ${GOPTESTFLAGS}; echo \$? >&3) | sed -r -e"s,^(PASS|SKIP|FAIL)\$,\\1: \$1/\$2," >&4) 3>&1) | (read rc; exit \$rc)) 4>&1
30 exit \$?)
31EOF
32
33}
34
35do_install_ptest_base() {
36 test -f "${B}/.go_compiled_tests.list" || exit 0
37 install -d ${D}${PTEST_PATH}
38 go_stage_testdata
39 go_make_ptest_wrapper
40 havetests=""
41 while read test; do
42 testdir=`dirname $test`
43 testprog=`basename $test`
44 install -d ${D}${PTEST_PATH}/$testdir
45 install -m 0755 ${B}/src/$test ${D}${PTEST_PATH}/$test
46 echo "run_test $testdir $testprog || RC=1" >> ${D}${PTEST_PATH}/run-ptest
47 havetests="yes"
48 done < ${B}/.go_compiled_tests.list
49 if [ -n "$havetests" ]; then
50 echo "exit \$RC" >> ${D}${PTEST_PATH}/run-ptest
51 chmod +x ${D}${PTEST_PATH}/run-ptest
52 else
53 rm -rf ${D}${PTEST_PATH}
54 fi
55 do_install_ptest
56 chown -R root:root ${D}${PTEST_PATH}
57}
58
59INSANE_SKIP:${PN}-ptest += "ldflags"
60
diff --git a/meta/classes/go.bbclass b/meta/classes/go.bbclass
deleted file mode 100644
index 6b9748406d..0000000000
--- a/meta/classes/go.bbclass
+++ /dev/null
@@ -1,170 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7inherit goarch
8inherit linuxloader
9
10GO_PARALLEL_BUILD ?= "${@oe.utils.parallel_make_argument(d, '-p %d')}"
11
12export GODEBUG = "gocachehash=1"
13
14GOROOT:class-native = "${STAGING_LIBDIR_NATIVE}/go"
15GOROOT:class-nativesdk = "${STAGING_DIR_TARGET}${libdir}/go"
16GOROOT = "${STAGING_LIBDIR}/go"
17export GOROOT
18export GOROOT_FINAL = "${libdir}/go"
19export GOCACHE = "${B}/.cache"
20
21export GOARCH = "${TARGET_GOARCH}"
22export GOOS = "${TARGET_GOOS}"
23export GOHOSTARCH="${BUILD_GOARCH}"
24export GOHOSTOS="${BUILD_GOOS}"
25
26GOARM[export] = "0"
27GOARM:arm:class-target = "${TARGET_GOARM}"
28GOARM:arm:class-target[export] = "1"
29
30GO386[export] = "0"
31GO386:x86:class-target = "${TARGET_GO386}"
32GO386:x86:class-target[export] = "1"
33
34GOMIPS[export] = "0"
35GOMIPS:mips:class-target = "${TARGET_GOMIPS}"
36GOMIPS:mips:class-target[export] = "1"
37
38DEPENDS_GOLANG:class-target = "virtual/${TUNE_PKGARCH}-go virtual/${TARGET_PREFIX}go-runtime"
39DEPENDS_GOLANG:class-native = "go-native"
40DEPENDS_GOLANG:class-nativesdk = "virtual/${TARGET_PREFIX}go-crosssdk virtual/${TARGET_PREFIX}go-runtime"
41
42DEPENDS:append = " ${DEPENDS_GOLANG}"
43
44GO_LINKSHARED ?= "${@'-linkshared' if d.getVar('GO_DYNLINK') else ''}"
45GO_RPATH_LINK = "${@'-Wl,-rpath-link=${STAGING_DIR_TARGET}${libdir}/go/pkg/${TARGET_GOTUPLE}_dynlink' if d.getVar('GO_DYNLINK') else ''}"
46GO_RPATH = "${@'-r ${libdir}/go/pkg/${TARGET_GOTUPLE}_dynlink' if d.getVar('GO_DYNLINK') else ''}"
47GO_RPATH:class-native = "${@'-r ${STAGING_LIBDIR_NATIVE}/go/pkg/${TARGET_GOTUPLE}_dynlink' if d.getVar('GO_DYNLINK') else ''}"
48GO_RPATH_LINK:class-native = "${@'-Wl,-rpath-link=${STAGING_LIBDIR_NATIVE}/go/pkg/${TARGET_GOTUPLE}_dynlink' if d.getVar('GO_DYNLINK') else ''}"
49GO_EXTLDFLAGS ?= "${HOST_CC_ARCH}${TOOLCHAIN_OPTIONS} ${GO_RPATH_LINK} ${LDFLAGS}"
50GO_LINKMODE ?= ""
51GO_LINKMODE:class-nativesdk = "--linkmode=external"
52GO_LINKMODE:class-native = "--linkmode=external"
53GO_EXTRA_LDFLAGS ?= ""
54GO_LINUXLOADER ?= "-I ${@get_linuxloader(d)}"
55# Use system loader. If uninative is used, the uninative loader will be patched automatically
56GO_LINUXLOADER:class-native = ""
57GO_LDFLAGS ?= '-ldflags="${GO_RPATH} ${GO_LINKMODE} ${GO_LINUXLOADER} ${GO_EXTRA_LDFLAGS} -extldflags '${GO_EXTLDFLAGS}'"'
58export GOBUILDFLAGS ?= "-v ${GO_LDFLAGS} -trimpath"
59export GOPATH_OMIT_IN_ACTIONID ?= "1"
60export GOPTESTBUILDFLAGS ?= "${GOBUILDFLAGS} -c"
61export GOPTESTFLAGS ?= ""
62GOBUILDFLAGS:prepend:task-compile = "${GO_PARALLEL_BUILD} "
63
64export GO = "${HOST_PREFIX}go"
65GOTOOLDIR = "${STAGING_LIBDIR_NATIVE}/${TARGET_SYS}/go/pkg/tool/${BUILD_GOTUPLE}"
66GOTOOLDIR:class-native = "${STAGING_LIBDIR_NATIVE}/go/pkg/tool/${BUILD_GOTUPLE}"
67export GOTOOLDIR
68
69export CGO_ENABLED ?= "1"
70export CGO_CFLAGS ?= "${CFLAGS}"
71export CGO_CPPFLAGS ?= "${CPPFLAGS}"
72export CGO_CXXFLAGS ?= "${CXXFLAGS}"
73export CGO_LDFLAGS ?= "${LDFLAGS}"
74
75GO_INSTALL ?= "${GO_IMPORT}/..."
76GO_INSTALL_FILTEROUT ?= "${GO_IMPORT}/vendor/"
77
78B = "${WORKDIR}/build"
79export GOPATH = "${B}"
80export GOENV = "off"
81export GOTMPDIR ?= "${WORKDIR}/build-tmp"
82GOTMPDIR[vardepvalue] = ""
83
84python go_do_unpack() {
85 src_uri = (d.getVar('SRC_URI') or "").split()
86 if len(src_uri) == 0:
87 return
88
89 fetcher = bb.fetch2.Fetch(src_uri, d)
90 for url in fetcher.urls:
91 if fetcher.ud[url].type == 'git':
92 if fetcher.ud[url].parm.get('destsuffix') is None:
93 s_dirname = os.path.basename(d.getVar('S'))
94 fetcher.ud[url].parm['destsuffix'] = os.path.join(s_dirname, 'src', d.getVar('GO_IMPORT')) + '/'
95 fetcher.unpack(d.getVar('WORKDIR'))
96}
97
98go_list_packages() {
99 ${GO} list -f '{{.ImportPath}}' ${GOBUILDFLAGS} ${GO_INSTALL} | \
100 egrep -v '${GO_INSTALL_FILTEROUT}'
101}
102
103go_list_package_tests() {
104 ${GO} list -f '{{.ImportPath}} {{.TestGoFiles}}' ${GOBUILDFLAGS} ${GO_INSTALL} | \
105 grep -v '\[\]$' | \
106 egrep -v '${GO_INSTALL_FILTEROUT}' | \
107 awk '{ print $1 }'
108}
109
110go_do_configure() {
111 ln -snf ${S}/src ${B}/
112}
113do_configure[dirs] =+ "${GOTMPDIR}"
114
115go_do_compile() {
116 export TMPDIR="${GOTMPDIR}"
117 if [ -n "${GO_INSTALL}" ]; then
118 if [ -n "${GO_LINKSHARED}" ]; then
119 ${GO} install ${GOBUILDFLAGS} `go_list_packages`
120 rm -rf ${B}/bin
121 fi
122 ${GO} install ${GO_LINKSHARED} ${GOBUILDFLAGS} `go_list_packages`
123 fi
124}
125do_compile[dirs] =+ "${GOTMPDIR}"
126do_compile[cleandirs] = "${B}/bin ${B}/pkg"
127
128go_do_install() {
129 install -d ${D}${libdir}/go/src/${GO_IMPORT}
130 tar -C ${S}/src/${GO_IMPORT} -cf - --exclude-vcs --exclude '*.test' --exclude 'testdata' . | \
131 tar -C ${D}${libdir}/go/src/${GO_IMPORT} --no-same-owner -xf -
132 tar -C ${B} -cf - --exclude-vcs --exclude '*.test' --exclude 'testdata' pkg | \
133 tar -C ${D}${libdir}/go --no-same-owner -xf -
134
135 if [ -n "`ls ${B}/${GO_BUILD_BINDIR}/`" ]; then
136 install -d ${D}${bindir}
137 install -m 0755 ${B}/${GO_BUILD_BINDIR}/* ${D}${bindir}/
138 fi
139}
140
141go_stage_testdata() {
142 oldwd="$PWD"
143 cd ${S}/src
144 find ${GO_IMPORT} -depth -type d -name testdata | while read d; do
145 if echo "$d" | grep -q '/vendor/'; then
146 continue
147 fi
148 parent=`dirname $d`
149 install -d ${D}${PTEST_PATH}/$parent
150 cp --preserve=mode,timestamps -R $d ${D}${PTEST_PATH}/$parent/
151 done
152 cd "$oldwd"
153}
154
155EXPORT_FUNCTIONS do_unpack do_configure do_compile do_install
156
157FILES:${PN}-dev = "${libdir}/go/src"
158FILES:${PN}-staticdev = "${libdir}/go/pkg"
159
160INSANE_SKIP:${PN} += "ldflags"
161
162# Add -buildmode=pie to GOBUILDFLAGS to satisfy "textrel" QA checking, but mips
163# doesn't support -buildmode=pie, so skip the QA checking for mips/rv32 and its
164# variants.
165python() {
166 if 'mips' in d.getVar('TARGET_ARCH') or 'riscv32' in d.getVar('TARGET_ARCH'):
167 d.appendVar('INSANE_SKIP:%s' % d.getVar('PN'), " textrel")
168 else:
169 d.appendVar('GOBUILDFLAGS', ' -buildmode=pie')
170}
diff --git a/meta/classes/goarch.bbclass b/meta/classes/goarch.bbclass
deleted file mode 100644
index 61ead30a63..0000000000
--- a/meta/classes/goarch.bbclass
+++ /dev/null
@@ -1,122 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7BUILD_GOOS = "${@go_map_os(d.getVar('BUILD_OS'), d)}"
8BUILD_GOARCH = "${@go_map_arch(d.getVar('BUILD_ARCH'), d)}"
9BUILD_GOTUPLE = "${BUILD_GOOS}_${BUILD_GOARCH}"
10HOST_GOOS = "${@go_map_os(d.getVar('HOST_OS'), d)}"
11HOST_GOARCH = "${@go_map_arch(d.getVar('HOST_ARCH'), d)}"
12HOST_GOARM = "${@go_map_arm(d.getVar('HOST_ARCH'), d)}"
13HOST_GO386 = "${@go_map_386(d.getVar('HOST_ARCH'), d.getVar('TUNE_FEATURES'), d)}"
14HOST_GOMIPS = "${@go_map_mips(d.getVar('HOST_ARCH'), d.getVar('TUNE_FEATURES'), d)}"
15HOST_GOARM:class-native = "7"
16HOST_GO386:class-native = "sse2"
17HOST_GOMIPS:class-native = "hardfloat"
18HOST_GOTUPLE = "${HOST_GOOS}_${HOST_GOARCH}"
19TARGET_GOOS = "${@go_map_os(d.getVar('TARGET_OS'), d)}"
20TARGET_GOARCH = "${@go_map_arch(d.getVar('TARGET_ARCH'), d)}"
21TARGET_GOARM = "${@go_map_arm(d.getVar('TARGET_ARCH'), d)}"
22TARGET_GO386 = "${@go_map_386(d.getVar('TARGET_ARCH'), d.getVar('TUNE_FEATURES'), d)}"
23TARGET_GOMIPS = "${@go_map_mips(d.getVar('TARGET_ARCH'), d.getVar('TUNE_FEATURES'), d)}"
24TARGET_GOARM:class-native = "7"
25TARGET_GO386:class-native = "sse2"
26TARGET_GOMIPS:class-native = "hardfloat"
27TARGET_GOTUPLE = "${TARGET_GOOS}_${TARGET_GOARCH}"
28GO_BUILD_BINDIR = "${@['bin/${HOST_GOTUPLE}','bin'][d.getVar('BUILD_GOTUPLE') == d.getVar('HOST_GOTUPLE')]}"
29
30# Use the MACHINEOVERRIDES to map ARM CPU architecture passed to GO via GOARM.
31# This is combined with *_ARCH to set HOST_GOARM and TARGET_GOARM.
32BASE_GOARM = ''
33BASE_GOARM:armv7ve = '7'
34BASE_GOARM:armv7a = '7'
35BASE_GOARM:armv6 = '6'
36BASE_GOARM:armv5 = '5'
37
38# Go supports dynamic linking on a limited set of architectures.
39# See the supportsDynlink function in go/src/cmd/compile/internal/gc/main.go
40GO_DYNLINK = ""
41GO_DYNLINK:arm ?= "1"
42GO_DYNLINK:aarch64 ?= "1"
43GO_DYNLINK:x86 ?= "1"
44GO_DYNLINK:x86-64 ?= "1"
45GO_DYNLINK:powerpc64 ?= "1"
46GO_DYNLINK:powerpc64le ?= "1"
47GO_DYNLINK:class-native ?= ""
48GO_DYNLINK:class-nativesdk = ""
49
50# define here because everybody inherits this class
51#
52COMPATIBLE_HOST:linux-gnux32 = "null"
53COMPATIBLE_HOST:linux-muslx32 = "null"
54COMPATIBLE_HOST:powerpc = "null"
55COMPATIBLE_HOST:powerpc64 = "null"
56COMPATIBLE_HOST:mipsarchn32 = "null"
57
58ARM_INSTRUCTION_SET:armv4 = "arm"
59ARM_INSTRUCTION_SET:armv5 = "arm"
60ARM_INSTRUCTION_SET:armv6 = "arm"
61
62TUNE_CCARGS:remove = "-march=mips32r2"
63SECURITY_NOPIE_CFLAGS ??= ""
64
65# go can't be built with ccache:
66# gcc: fatal error: no input files
67CCACHE_DISABLE ?= "1"
68
69def go_map_arch(a, d):
70 import re
71 if re.match('i.86', a):
72 return '386'
73 elif a == 'x86_64':
74 return 'amd64'
75 elif re.match('arm.*', a):
76 return 'arm'
77 elif re.match('aarch64.*', a):
78 return 'arm64'
79 elif re.match('mips64el.*', a):
80 return 'mips64le'
81 elif re.match('mips64.*', a):
82 return 'mips64'
83 elif a == 'mips':
84 return 'mips'
85 elif a == 'mipsel':
86 return 'mipsle'
87 elif re.match('p(pc|owerpc)(64le)', a):
88 return 'ppc64le'
89 elif re.match('p(pc|owerpc)(64)', a):
90 return 'ppc64'
91 elif a == 'riscv64':
92 return 'riscv64'
93 else:
94 raise bb.parse.SkipRecipe("Unsupported CPU architecture: %s" % a)
95
96def go_map_arm(a, d):
97 if a.startswith("arm"):
98 return d.getVar('BASE_GOARM')
99 return ''
100
101def go_map_386(a, f, d):
102 import re
103 if re.match('i.86', a):
104 if ('core2' in f) or ('corei7' in f):
105 return 'sse2'
106 else:
107 return 'softfloat'
108 return ''
109
110def go_map_mips(a, f, d):
111 import re
112 if a == 'mips' or a == 'mipsel':
113 if 'fpu-hard' in f:
114 return 'hardfloat'
115 else:
116 return 'softfloat'
117 return ''
118
119def go_map_os(o, d):
120 if o.startswith('linux'):
121 return 'linux'
122 return o
diff --git a/meta/classes/gobject-introspection-data.bbclass b/meta/classes/gobject-introspection-data.bbclass
deleted file mode 100644
index 7f522a1ed3..0000000000
--- a/meta/classes/gobject-introspection-data.bbclass
+++ /dev/null
@@ -1,18 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# This variable is set to True if gobject-introspection-data is in
8# DISTRO_FEATURES and qemu-usermode is in MACHINE_FEATURES, and False otherwise.
9#
10# It should be used in recipes to determine whether introspection data should be built,
11# so that qemu use can be avoided when necessary.
12GI_DATA_ENABLED ?= "${@bb.utils.contains('DISTRO_FEATURES', 'gobject-introspection-data', \
13 bb.utils.contains('MACHINE_FEATURES', 'qemu-usermode', 'True', 'False', d), 'False', d)}"
14
15do_compile:prepend() {
16 # This prevents g-ir-scanner from writing cache data to $HOME
17 export GI_SCANNER_DISABLE_CACHE=1
18}
diff --git a/meta/classes/gobject-introspection.bbclass b/meta/classes/gobject-introspection.bbclass
deleted file mode 100644
index 0c7b7d200a..0000000000
--- a/meta/classes/gobject-introspection.bbclass
+++ /dev/null
@@ -1,61 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# Inherit this class in recipes to enable building their introspection files
8
9# python3native is inherited to prevent introspection tools being run with
10# host's python 3 (they need to be run with native python 3)
11#
12# This also sets up autoconf-based recipes to build introspection data (or not),
13# depending on distro and machine features (see gobject-introspection-data class).
14inherit python3native gobject-introspection-data
15
16# meson: default option name to enable/disable introspection. This matches most
17# project's configuration. In doubts - check meson_options.txt in project's
18# source path.
19GIR_MESON_OPTION ?= 'introspection'
20GIR_MESON_ENABLE_FLAG ?= 'true'
21GIR_MESON_DISABLE_FLAG ?= 'false'
22
23# Define g-i options such that they can be disabled completely when GIR_MESON_OPTION is empty
24GIRMESONTARGET = "-D${GIR_MESON_OPTION}=${@bb.utils.contains('GI_DATA_ENABLED', 'True', '${GIR_MESON_ENABLE_FLAG}', '${GIR_MESON_DISABLE_FLAG}', d)} "
25GIRMESONBUILD = "-D${GIR_MESON_OPTION}=${GIR_MESON_DISABLE_FLAG} "
26# Auto enable/disable based on GI_DATA_ENABLED
27EXTRA_OECONF:prepend:class-target = "${@bb.utils.contains('GI_DATA_ENABLED', 'True', '--enable-introspection', '--disable-introspection', d)} "
28EXTRA_OEMESON:prepend:class-target = "${@['', '${GIRMESONTARGET}'][d.getVar('GIR_MESON_OPTION') != '']}"
29# When building native recipes, disable introspection, as it is not necessary,
30# pulls in additional dependencies, and makes build times longer
31EXTRA_OECONF:prepend:class-native = "--disable-introspection "
32EXTRA_OECONF:prepend:class-nativesdk = "--disable-introspection "
33EXTRA_OEMESON:prepend:class-native = "${@['', '${GIRMESONBUILD}'][d.getVar('GIR_MESON_OPTION') != '']}"
34EXTRA_OEMESON:prepend:class-nativesdk = "${@['', '${GIRMESONBUILD}'][d.getVar('GIR_MESON_OPTION') != '']}"
35
36# Generating introspection data depends on a combination of native and target
37# introspection tools, and qemu to run the target tools.
38DEPENDS:append:class-target = " gobject-introspection gobject-introspection-native qemu-native"
39
40# Even though introspection is disabled on -native, gobject-introspection package is still
41# needed for m4 macros.
42DEPENDS:append:class-native = " gobject-introspection-native"
43DEPENDS:append:class-nativesdk = " gobject-introspection-native"
44
45# This is used by introspection tools to find .gir includes
46export XDG_DATA_DIRS = "${STAGING_DATADIR}:${STAGING_LIBDIR}"
47
48do_configure:prepend:class-target () {
49 # introspection.m4 pre-packaged with upstream tarballs does not yet
50 # have our fixes
51 mkdir -p ${S}/m4
52 cp ${STAGING_DIR_TARGET}/${datadir}/aclocal/introspection.m4 ${S}/m4
53}
54
55# .typelib files are needed at runtime and so they go to the main package (so
56# they'll be together with libraries they support).
57FILES:${PN}:append = " ${libdir}/girepository-*/*.typelib"
58
59# .gir files go to dev package, as they're needed for developing (but not for
60# running) things that depends on introspection.
61FILES:${PN}-dev:append = " ${datadir}/gir-*/*.gir ${libdir}/gir-*/*.gir"
diff --git a/meta/classes/grub-efi-cfg.bbclass b/meta/classes/grub-efi-cfg.bbclass
deleted file mode 100644
index 52e85a3bb0..0000000000
--- a/meta/classes/grub-efi-cfg.bbclass
+++ /dev/null
@@ -1,122 +0,0 @@
1# grub-efi.bbclass
2# Copyright (c) 2011, Intel Corporation.
3#
4# SPDX-License-Identifier: MIT
5
6# Provide grub-efi specific functions for building bootable images.
7
8# External variables
9# ${INITRD} - indicates a list of filesystem images to concatenate and use as an initrd (optional)
10# ${ROOTFS} - indicates a filesystem image to include as the root filesystem (optional)
11# ${GRUB_GFXSERIAL} - set this to 1 to have graphics and serial in the boot menu
12# ${LABELS} - a list of targets for the automatic config
13# ${APPEND} - an override list of append strings for each label
14# ${GRUB_OPTS} - additional options to add to the config, ';' delimited # (optional)
15# ${GRUB_TIMEOUT} - timeout before executing the deault label (optional)
16# ${GRUB_ROOT} - grub's root device.
17
18GRUB_SERIAL ?= "console=ttyS0,115200"
19GRUB_CFG_VM = "${S}/grub_vm.cfg"
20GRUB_CFG_LIVE = "${S}/grub_live.cfg"
21GRUB_TIMEOUT ?= "10"
22#FIXME: build this from the machine config
23GRUB_OPTS ?= "serial --unit=0 --speed=115200 --word=8 --parity=no --stop=1"
24
25GRUB_ROOT ?= "${ROOT}"
26APPEND ?= ""
27
28# Uses MACHINE specific KERNEL_IMAGETYPE
29PACKAGE_ARCH = "${MACHINE_ARCH}"
30
31# Need UUID utility code.
32inherit fs-uuid
33
34python build_efi_cfg() {
35 import sys
36
37 workdir = d.getVar('WORKDIR')
38 if not workdir:
39 bb.error("WORKDIR not defined, unable to package")
40 return
41
42 gfxserial = d.getVar('GRUB_GFXSERIAL') or ""
43
44 labels = d.getVar('LABELS')
45 if not labels:
46 bb.debug(1, "LABELS not defined, nothing to do")
47 return
48
49 if labels == []:
50 bb.debug(1, "No labels, nothing to do")
51 return
52
53 cfile = d.getVar('GRUB_CFG')
54 if not cfile:
55 bb.fatal('Unable to read GRUB_CFG')
56
57 try:
58 cfgfile = open(cfile, 'w')
59 except OSError:
60 bb.fatal('Unable to open %s' % cfile)
61
62 cfgfile.write('# Automatically created by OE\n')
63
64 opts = d.getVar('GRUB_OPTS')
65 if opts:
66 for opt in opts.split(';'):
67 cfgfile.write('%s\n' % opt)
68
69 cfgfile.write('default=%s\n' % (labels.split()[0]))
70
71 timeout = d.getVar('GRUB_TIMEOUT')
72 if timeout:
73 cfgfile.write('timeout=%s\n' % timeout)
74 else:
75 cfgfile.write('timeout=50\n')
76
77 root = d.getVar('GRUB_ROOT')
78 if not root:
79 bb.fatal('GRUB_ROOT not defined')
80
81 if gfxserial == "1":
82 btypes = [ [ " graphics console", "" ],
83 [ " serial console", d.getVar('GRUB_SERIAL') or "" ] ]
84 else:
85 btypes = [ [ "", "" ] ]
86
87 for label in labels.split():
88 localdata = d.createCopy()
89
90 overrides = localdata.getVar('OVERRIDES')
91 if not overrides:
92 bb.fatal('OVERRIDES not defined')
93
94 localdata.setVar('OVERRIDES', 'grub_' + label + ':' + overrides)
95
96 for btype in btypes:
97 cfgfile.write('\nmenuentry \'%s%s\'{\n' % (label, btype[0]))
98 lb = label
99 if label == "install":
100 lb = "install-efi"
101 kernel = localdata.getVar('KERNEL_IMAGETYPE')
102 cfgfile.write('linux /%s LABEL=%s' % (kernel, lb))
103
104 cfgfile.write(' %s' % replace_rootfs_uuid(d, root))
105
106 append = localdata.getVar('APPEND')
107 initrd = localdata.getVar('INITRD')
108
109 if append:
110 append = replace_rootfs_uuid(d, append)
111 cfgfile.write(' %s' % (append))
112
113 cfgfile.write(' %s' % btype[1])
114 cfgfile.write('\n')
115
116 if initrd:
117 cfgfile.write('initrd /initrd')
118 cfgfile.write('\n}\n')
119
120 cfgfile.close()
121}
122build_efi_cfg[vardepsexclude] += "OVERRIDES"
diff --git a/meta/classes/grub-efi.bbclass b/meta/classes/grub-efi.bbclass
deleted file mode 100644
index 4afd12195f..0000000000
--- a/meta/classes/grub-efi.bbclass
+++ /dev/null
@@ -1,14 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7inherit grub-efi-cfg
8require conf/image-uefi.conf
9
10efi_populate() {
11 efi_populate_common "$1" grub-efi
12
13 install -m 0644 ${GRUB_CFG} ${DEST}${EFIDIR}/grub.cfg
14}
diff --git a/meta/classes/gsettings.bbclass b/meta/classes/gsettings.bbclass
deleted file mode 100644
index adb027ea0a..0000000000
--- a/meta/classes/gsettings.bbclass
+++ /dev/null
@@ -1,48 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# A bbclass to handle installed GSettings (glib) schemas, updated the compiled
8# form on package install and remove.
9#
10# The compiled schemas are platform-agnostic, so we can depend on
11# glib-2.0-native for the native tool and run the postinst script when the
12# rootfs builds to save a little time on first boot.
13
14# TODO use a trigger so that this runs once per package operation run
15
16GSETTINGS_PACKAGE ?= "${PN}"
17
18python __anonymous() {
19 pkg = d.getVar("GSETTINGS_PACKAGE")
20 if pkg:
21 d.appendVar("PACKAGE_WRITE_DEPS", " glib-2.0-native")
22 d.appendVar("RDEPENDS:" + pkg, " ${MLPREFIX}glib-2.0-utils")
23 d.appendVar("FILES:" + pkg, " ${datadir}/glib-2.0/schemas")
24}
25
26gsettings_postinstrm () {
27 glib-compile-schemas $D${datadir}/glib-2.0/schemas
28}
29
30python populate_packages:append () {
31 pkg = d.getVar('GSETTINGS_PACKAGE')
32 if pkg:
33 bb.note("adding gsettings postinst scripts to %s" % pkg)
34
35 postinst = d.getVar('pkg_postinst:%s' % pkg) or d.getVar('pkg_postinst')
36 if not postinst:
37 postinst = '#!/bin/sh\n'
38 postinst += d.getVar('gsettings_postinstrm')
39 d.setVar('pkg_postinst:%s' % pkg, postinst)
40
41 bb.note("adding gsettings postrm scripts to %s" % pkg)
42
43 postrm = d.getVar('pkg_postrm:%s' % pkg) or d.getVar('pkg_postrm')
44 if not postrm:
45 postrm = '#!/bin/sh\n'
46 postrm += d.getVar('gsettings_postinstrm')
47 d.setVar('pkg_postrm:%s' % pkg, postrm)
48}
diff --git a/meta/classes/gtk-doc.bbclass b/meta/classes/gtk-doc.bbclass
deleted file mode 100644
index 68fa2cc745..0000000000
--- a/meta/classes/gtk-doc.bbclass
+++ /dev/null
@@ -1,89 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# Helper class to pull in the right gtk-doc dependencies and configure
8# gtk-doc to enable or disable documentation building (which requries the
9# use of usermode qemu).
10
11# This variable is set to True if api-documentation is in
12# DISTRO_FEATURES and qemu-usermode is in MACHINE_FEATURES, and False otherwise.
13#
14# It should be used in recipes to determine whether gtk-doc based documentation should be built,
15# so that qemu use can be avoided when necessary.
16GTKDOC_ENABLED:class-native = "False"
17GTKDOC_ENABLED ?= "${@bb.utils.contains('DISTRO_FEATURES', 'api-documentation', \
18 bb.utils.contains('MACHINE_FEATURES', 'qemu-usermode', 'True', 'False', d), 'False', d)}"
19
20# meson: default option name to enable/disable gtk-doc. This matches most
21# project's configuration. In doubts - check meson_options.txt in project's
22# source path.
23GTKDOC_MESON_OPTION ?= 'docs'
24GTKDOC_MESON_ENABLE_FLAG ?= 'true'
25GTKDOC_MESON_DISABLE_FLAG ?= 'false'
26
27# Auto enable/disable based on GTKDOC_ENABLED
28EXTRA_OECONF:prepend:class-target = "${@bb.utils.contains('GTKDOC_ENABLED', 'True', '--enable-gtk-doc --enable-gtk-doc-html --disable-gtk-doc-pdf', \
29 '--disable-gtk-doc', d)} "
30EXTRA_OEMESON:prepend:class-target = "-D${GTKDOC_MESON_OPTION}=${@bb.utils.contains('GTKDOC_ENABLED', 'True', '${GTKDOC_MESON_ENABLE_FLAG}', '${GTKDOC_MESON_DISABLE_FLAG}', d)} "
31
32# When building native recipes, disable gtkdoc, as it is not necessary,
33# pulls in additional dependencies, and makes build times longer
34EXTRA_OECONF:prepend:class-native = "--disable-gtk-doc "
35EXTRA_OECONF:prepend:class-nativesdk = "--disable-gtk-doc "
36EXTRA_OEMESON:prepend:class-native = "-D${GTKDOC_MESON_OPTION}=${GTKDOC_MESON_DISABLE_FLAG} "
37EXTRA_OEMESON:prepend:class-nativesdk = "-D${GTKDOC_MESON_OPTION}=${GTKDOC_MESON_DISABLE_FLAG} "
38
39# Even though gtkdoc is disabled on -native, gtk-doc package is still
40# needed for m4 macros.
41DEPENDS:append = " gtk-doc-native"
42
43# The documentation directory, where the infrastructure will be copied.
44# gtkdocize has a default of "." so to handle out-of-tree builds set this to $S.
45GTKDOC_DOCDIR ?= "${S}"
46
47export STAGING_DIR_HOST
48
49inherit python3native pkgconfig qemu
50DEPENDS:append = "${@' qemu-native' if d.getVar('GTKDOC_ENABLED') == 'True' else ''}"
51
52do_configure:prepend () {
53 # Need to use ||true as this is only needed if configure.ac both exists
54 # and uses GTK_DOC_CHECK.
55 gtkdocize --srcdir ${S} --docdir ${GTKDOC_DOCDIR} || true
56}
57
58do_compile:prepend:class-target () {
59 if [ ${GTKDOC_ENABLED} = True ]; then
60 # Write out a qemu wrapper that will be given to gtkdoc-scangobj so that it
61 # can run target helper binaries through that.
62 qemu_binary="${@qemu_wrapper_cmdline(d, '$STAGING_DIR_HOST', ['\\$GIR_EXTRA_LIBS_PATH','$STAGING_DIR_HOST/${libdir}','$STAGING_DIR_HOST/${base_libdir}'])}"
63 cat > ${B}/gtkdoc-qemuwrapper << EOF
64#!/bin/sh
65# Use a modules directory which doesn't exist so we don't load random things
66# which may then get deleted (or their dependencies) and potentially segfault
67export GIO_MODULE_DIR=${STAGING_LIBDIR}/gio/modules-dummy
68
69GIR_EXTRA_LIBS_PATH=\`find ${B} -name *.so -printf "%h\n"|sort|uniq| tr '\n' ':'\`\$GIR_EXTRA_LIBS_PATH
70GIR_EXTRA_LIBS_PATH=\`find ${B} -name .libs| tr '\n' ':'\`\$GIR_EXTRA_LIBS_PATH
71
72# meson sets this wrongly (only to libs in build-dir), qemu_wrapper_cmdline() and GIR_EXTRA_LIBS_PATH take care of it properly
73unset LD_LIBRARY_PATH
74
75if [ -d ".libs" ]; then
76 $qemu_binary ".libs/\$@"
77else
78 $qemu_binary "\$@"
79fi
80
81if [ \$? -ne 0 ]; then
82 echo "If the above error message is about missing .so libraries, then setting up GIR_EXTRA_LIBS_PATH in the recipe should help."
83 echo "(typically like this: GIR_EXTRA_LIBS_PATH=\"$""{B}/something/.libs\" )"
84 exit 1
85fi
86EOF
87 chmod +x ${B}/gtkdoc-qemuwrapper
88 fi
89}
diff --git a/meta/classes/gtk-icon-cache.bbclass b/meta/classes/gtk-icon-cache.bbclass
deleted file mode 100644
index 17c7eb7a33..0000000000
--- a/meta/classes/gtk-icon-cache.bbclass
+++ /dev/null
@@ -1,95 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7FILES:${PN} += "${datadir}/icons/hicolor"
8
9GTKIC_VERSION ??= '3'
10
11GTKPN = "${@ 'gtk4' if d.getVar('GTKIC_VERSION') == '4' else 'gtk+3' }"
12GTKIC_CMD = "${@ 'gtk-update-icon-cache-3.0.0' if d.getVar('GTKIC_VERSION') == '4' else 'gtk4-update-icon-cache' }"
13
14#gtk+3/gtk4 require GTK3DISTROFEATURES, DEPENDS on it make all the
15#recipes inherit this class require GTK3DISTROFEATURES
16inherit features_check
17ANY_OF_DISTRO_FEATURES = "${GTK3DISTROFEATURES}"
18
19DEPENDS +=" ${@ '' if d.getVar('BPN') == 'hicolor-icon-theme' else 'hicolor-icon-theme' } \
20 ${@ '' if d.getVar('BPN') == 'gdk-pixbuf' else 'gdk-pixbuf' } \
21 ${@ '' if d.getVar('BPN') == d.getVar('GTKPN') else d.getVar('GTKPN') } \
22 ${GTKPN}-native \
23"
24
25PACKAGE_WRITE_DEPS += "${GTKPN}-native gdk-pixbuf-native"
26
27gtk_icon_cache_postinst() {
28if [ "x$D" != "x" ]; then
29 $INTERCEPT_DIR/postinst_intercept update_gtk_icon_cache ${PKG} \
30 mlprefix=${MLPREFIX} \
31 libdir_native=${libdir_native}
32else
33
34 # Update the pixbuf loaders in case they haven't been registered yet
35 ${libdir}/gdk-pixbuf-2.0/gdk-pixbuf-query-loaders --update-cache
36
37 for icondir in /usr/share/icons/* ; do
38 if [ -d $icondir ] ; then
39 ${GTKIC_CMD} -fqt $icondir
40 fi
41 done
42fi
43}
44
45gtk_icon_cache_postrm() {
46if [ "x$D" != "x" ]; then
47 $INTERCEPT_DIR/postinst_intercept update_gtk_icon_cache ${PKG} \
48 mlprefix=${MLPREFIX} \
49 libdir=${libdir}
50else
51 for icondir in /usr/share/icons/* ; do
52 if [ -d $icondir ] ; then
53 ${GTKIC_CMD} -qt $icondir
54 fi
55 done
56fi
57}
58
59python populate_packages:append () {
60 packages = d.getVar('PACKAGES').split()
61 pkgdest = d.getVar('PKGDEST')
62
63 for pkg in packages:
64 icon_dir = '%s/%s/%s/icons' % (pkgdest, pkg, d.getVar('datadir'))
65 if not os.path.exists(icon_dir):
66 continue
67
68 bb.note("adding hicolor-icon-theme dependency to %s" % pkg)
69 rdepends = ' ' + d.getVar('MLPREFIX', False) + "hicolor-icon-theme"
70 d.appendVar('RDEPENDS:%s' % pkg, rdepends)
71
72 #gtk_icon_cache_postinst depend on gdk-pixbuf and gtk+3/gtk4
73 bb.note("adding gdk-pixbuf dependency to %s" % pkg)
74 rdepends = ' ' + d.getVar('MLPREFIX', False) + "gdk-pixbuf"
75 d.appendVar('RDEPENDS:%s' % pkg, rdepends)
76
77 bb.note("adding %s dependency to %s" % (d.getVar('GTKPN'), pkg))
78 rdepends = ' ' + d.getVar('MLPREFIX', False) + d.getVar('GTKPN')
79 d.appendVar('RDEPENDS:%s' % pkg, rdepends)
80
81 bb.note("adding gtk-icon-cache postinst and postrm scripts to %s" % pkg)
82
83 postinst = d.getVar('pkg_postinst:%s' % pkg)
84 if not postinst:
85 postinst = '#!/bin/sh\n'
86 postinst += d.getVar('gtk_icon_cache_postinst')
87 d.setVar('pkg_postinst:%s' % pkg, postinst)
88
89 postrm = d.getVar('pkg_postrm:%s' % pkg)
90 if not postrm:
91 postrm = '#!/bin/sh\n'
92 postrm += d.getVar('gtk_icon_cache_postrm')
93 d.setVar('pkg_postrm:%s' % pkg, postrm)
94}
95
diff --git a/meta/classes/gtk-immodules-cache.bbclass b/meta/classes/gtk-immodules-cache.bbclass
deleted file mode 100644
index 8fbe1dd1fb..0000000000
--- a/meta/classes/gtk-immodules-cache.bbclass
+++ /dev/null
@@ -1,82 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# This class will update the inputmethod module cache for virtual keyboards
8#
9# Usage: Set GTKIMMODULES_PACKAGES to the packages that needs to update the inputmethod modules
10
11PACKAGE_WRITE_DEPS += "qemu-native"
12
13inherit qemu
14
15GTKIMMODULES_PACKAGES ?= "${PN}"
16
17gtk_immodule_cache_postinst() {
18if [ "x$D" != "x" ]; then
19 $INTERCEPT_DIR/postinst_intercept update_gtk_immodules_cache ${PKG} \
20 mlprefix=${MLPREFIX} \
21 binprefix=${MLPREFIX} \
22 libdir=${libdir} \
23 libexecdir=${libexecdir} \
24 base_libdir=${base_libdir} \
25 bindir=${bindir}
26else
27 if [ ! -z `which gtk-query-immodules-2.0` ]; then
28 gtk-query-immodules-2.0 > ${libdir}/gtk-2.0/2.10.0/immodules.cache
29 fi
30 if [ ! -z `which gtk-query-immodules-3.0` ]; then
31 mkdir -p ${libdir}/gtk-3.0/3.0.0
32 gtk-query-immodules-3.0 > ${libdir}/gtk-3.0/3.0.0/immodules.cache
33 fi
34fi
35}
36
37gtk_immodule_cache_postrm() {
38if [ "x$D" != "x" ]; then
39 $INTERCEPT_DIR/postinst_intercept update_gtk_immodules_cache ${PKG} \
40 mlprefix=${MLPREFIX} \
41 binprefix=${MLPREFIX} \
42 libdir=${libdir} \
43 libexecdir=${libexecdir} \
44 base_libdir=${base_libdir} \
45 bindir=${bindir}
46else
47 if [ ! -z `which gtk-query-immodules-2.0` ]; then
48 gtk-query-immodules-2.0 > ${libdir}/gtk-2.0/2.10.0/immodules.cache
49 fi
50 if [ ! -z `which gtk-query-immodules-3.0` ]; then
51 gtk-query-immodules-3.0 > ${libdir}/gtk-3.0/3.0.0/immodules.cache
52 fi
53fi
54}
55
56python populate_packages:append () {
57 gtkimmodules_pkgs = d.getVar('GTKIMMODULES_PACKAGES').split()
58
59 for pkg in gtkimmodules_pkgs:
60 bb.note("adding gtk-immodule-cache postinst and postrm scripts to %s" % pkg)
61
62 postinst = d.getVar('pkg_postinst:%s' % pkg)
63 if not postinst:
64 postinst = '#!/bin/sh\n'
65 postinst += d.getVar('gtk_immodule_cache_postinst')
66 d.setVar('pkg_postinst:%s' % pkg, postinst)
67
68 postrm = d.getVar('pkg_postrm:%s' % pkg)
69 if not postrm:
70 postrm = '#!/bin/sh\n'
71 postrm += d.getVar('gtk_immodule_cache_postrm')
72 d.setVar('pkg_postrm:%s' % pkg, postrm)
73}
74
75python __anonymous() {
76 if not bb.data.inherits_class('native', d) and not bb.data.inherits_class('cross', d):
77 gtkimmodules_check = d.getVar('GTKIMMODULES_PACKAGES', False)
78 if not gtkimmodules_check:
79 bb_filename = d.getVar('FILE', False)
80 bb.fatal("ERROR: %s inherits gtk-immodules-cache but doesn't set GTKIMMODULES_PACKAGES" % bb_filename)
81}
82
diff --git a/meta/classes/image-artifact-names.bbclass b/meta/classes/image-artifact-names.bbclass
deleted file mode 100644
index 5c4e746b90..0000000000
--- a/meta/classes/image-artifact-names.bbclass
+++ /dev/null
@@ -1,28 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7##################################################################
8# Specific image creation and rootfs population info.
9##################################################################
10
11IMAGE_BASENAME ?= "${PN}"
12IMAGE_VERSION_SUFFIX ?= "-${DATETIME}"
13IMAGE_VERSION_SUFFIX[vardepsexclude] += "DATETIME SOURCE_DATE_EPOCH"
14IMAGE_NAME ?= "${IMAGE_BASENAME}-${MACHINE}${IMAGE_VERSION_SUFFIX}"
15IMAGE_LINK_NAME ?= "${IMAGE_BASENAME}-${MACHINE}"
16
17# IMAGE_NAME is the base name for everything produced when building images.
18# The actual image that contains the rootfs has an additional suffix (.rootfs
19# by default) followed by additional suffices which describe the format (.ext4,
20# .ext4.xz, etc.).
21IMAGE_NAME_SUFFIX ??= ".rootfs"
22
23python () {
24 if bb.data.inherits_class('deploy', d) and d.getVar("IMAGE_VERSION_SUFFIX") == "-${DATETIME}":
25 import datetime
26 d.setVar("IMAGE_VERSION_SUFFIX", "-" + datetime.datetime.fromtimestamp(int(d.getVar("SOURCE_DATE_EPOCH")), datetime.timezone.utc).strftime('%Y%m%d%H%M%S'))
27 d.setVarFlag("IMAGE_VERSION_SUFFIX", "vardepvalue", "")
28}
diff --git a/meta/classes/image-combined-dbg.bbclass b/meta/classes/image-combined-dbg.bbclass
deleted file mode 100644
index dcf1968538..0000000000
--- a/meta/classes/image-combined-dbg.bbclass
+++ /dev/null
@@ -1,15 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7IMAGE_PREPROCESS_COMMAND:append = " combine_dbg_image; "
8
9combine_dbg_image () {
10 if [ "${IMAGE_GEN_DEBUGFS}" = "1" -a -e ${IMAGE_ROOTFS}-dbg ]; then
11 # copy target files into -dbg rootfs, so it can be used for
12 # debug purposes directly
13 tar -C ${IMAGE_ROOTFS} -cf - . | tar -C ${IMAGE_ROOTFS}-dbg -xf -
14 fi
15}
diff --git a/meta/classes/image-container.bbclass b/meta/classes/image-container.bbclass
deleted file mode 100644
index d24b030453..0000000000
--- a/meta/classes/image-container.bbclass
+++ /dev/null
@@ -1,27 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7ROOTFS_BOOTSTRAP_INSTALL = ""
8IMAGE_TYPES_MASKED += "container"
9IMAGE_TYPEDEP:container = "tar.bz2"
10
11python __anonymous() {
12 if "container" in d.getVar("IMAGE_FSTYPES") and \
13 d.getVar("IMAGE_CONTAINER_NO_DUMMY") != "1" and \
14 "linux-dummy" not in d.getVar("PREFERRED_PROVIDER_virtual/kernel"):
15 msg = '"container" is in IMAGE_FSTYPES, but ' \
16 'PREFERRED_PROVIDER_virtual/kernel is not "linux-dummy". ' \
17 'Unless a particular kernel is needed, using linux-dummy will ' \
18 'prevent a kernel from being built, which can reduce ' \
19 'build times. If you don\'t want to use "linux-dummy", set ' \
20 '"IMAGE_CONTAINER_NO_DUMMY" to "1".'
21
22 # Raising skip recipe was Paul's clever idea. It causes the error to
23 # only be shown for the recipes actually requested to build, rather
24 # than bb.fatal which would appear for all recipes inheriting the
25 # class.
26 raise bb.parse.SkipRecipe(msg)
27}
diff --git a/meta/classes/image-live.bbclass b/meta/classes/image-live.bbclass
deleted file mode 100644
index 1034acc49e..0000000000
--- a/meta/classes/image-live.bbclass
+++ /dev/null
@@ -1,265 +0,0 @@
1# Copyright (C) 2004, Advanced Micro Devices, Inc.
2#
3# SPDX-License-Identifier: MIT
4
5# Creates a bootable image using syslinux, your kernel and an optional
6# initrd
7
8#
9# End result is two things:
10#
11# 1. A .hddimg file which is an msdos filesystem containing syslinux, a kernel,
12# an initrd and a rootfs image. These can be written to harddisks directly and
13# also booted on USB flash disks (write them there with dd).
14#
15# 2. A CD .iso image
16
17# Boot process is that the initrd will boot and process which label was selected
18# in syslinux. Actions based on the label are then performed (e.g. installing to
19# an hdd)
20
21# External variables (also used by syslinux.bbclass)
22# ${INITRD} - indicates a list of filesystem images to concatenate and use as an initrd (optional)
23# ${HDDIMG_ID} - FAT image volume-id
24# ${ROOTFS} - indicates a filesystem image to include as the root filesystem (optional)
25
26inherit live-vm-common image-artifact-names
27
28do_bootimg[depends] += "dosfstools-native:do_populate_sysroot \
29 mtools-native:do_populate_sysroot \
30 cdrtools-native:do_populate_sysroot \
31 virtual/kernel:do_deploy \
32 ${MLPREFIX}syslinux:do_populate_sysroot \
33 syslinux-native:do_populate_sysroot \
34 ${@'%s:do_image_%s' % (d.getVar('PN'), d.getVar('LIVE_ROOTFS_TYPE').replace('-', '_')) if d.getVar('ROOTFS') else ''} \
35 "
36
37
38LABELS_LIVE ?= "boot install"
39ROOT_LIVE ?= "root=/dev/ram0"
40INITRD_IMAGE_LIVE ?= "${MLPREFIX}core-image-minimal-initramfs"
41INITRD_LIVE ?= "${DEPLOY_DIR_IMAGE}/${INITRD_IMAGE_LIVE}-${MACHINE}.${INITRAMFS_FSTYPES}"
42
43LIVE_ROOTFS_TYPE ?= "ext4"
44ROOTFS ?= "${IMGDEPLOYDIR}/${IMAGE_LINK_NAME}.${LIVE_ROOTFS_TYPE}"
45
46IMAGE_TYPEDEP:live = "${LIVE_ROOTFS_TYPE}"
47IMAGE_TYPEDEP:iso = "${LIVE_ROOTFS_TYPE}"
48IMAGE_TYPEDEP:hddimg = "${LIVE_ROOTFS_TYPE}"
49IMAGE_TYPES_MASKED += "live hddimg iso"
50
51python() {
52 image_b = d.getVar('IMAGE_BASENAME')
53 initrd_i = d.getVar('INITRD_IMAGE_LIVE')
54 if image_b == initrd_i:
55 bb.error('INITRD_IMAGE_LIVE %s cannot use image live, hddimg or iso.' % initrd_i)
56 bb.fatal('Check IMAGE_FSTYPES and INITRAMFS_FSTYPES settings.')
57 elif initrd_i:
58 d.appendVarFlag('do_bootimg', 'depends', ' %s:do_image_complete' % initrd_i)
59}
60
61HDDDIR = "${S}/hddimg"
62ISODIR = "${S}/iso"
63EFIIMGDIR = "${S}/efi_img"
64COMPACT_ISODIR = "${S}/iso.z"
65
66ISOLINUXDIR ?= "/isolinux"
67ISO_BOOTIMG = "isolinux/isolinux.bin"
68ISO_BOOTCAT = "isolinux/boot.cat"
69MKISOFS_OPTIONS = "-no-emul-boot -boot-load-size 4 -boot-info-table"
70
71BOOTIMG_VOLUME_ID ?= "boot"
72BOOTIMG_EXTRA_SPACE ?= "512"
73
74populate_live() {
75 populate_kernel $1
76 if [ -s "${ROOTFS}" ]; then
77 install -m 0644 ${ROOTFS} $1/rootfs.img
78 fi
79}
80
81build_iso() {
82 # Only create an ISO if we have an INITRD and the live or iso image type was selected
83 if [ -z "${INITRD}" ] || [ "${@bb.utils.contains_any('IMAGE_FSTYPES', 'live iso', '1', '0', d)}" != "1" ]; then
84 bbnote "ISO image will not be created."
85 return
86 fi
87 # ${INITRD} is a list of multiple filesystem images
88 for fs in ${INITRD}
89 do
90 if [ ! -s "$fs" ]; then
91 bbwarn "ISO image will not be created. $fs is invalid."
92 return
93 fi
94 done
95
96 populate_live ${ISODIR}
97
98 if [ "${PCBIOS}" = "1" ]; then
99 syslinux_iso_populate ${ISODIR}
100 fi
101 if [ "${EFI}" = "1" ]; then
102 efi_iso_populate ${ISODIR}
103 build_fat_img ${EFIIMGDIR} ${ISODIR}/efi.img
104 fi
105
106 # EFI only
107 if [ "${PCBIOS}" != "1" ] && [ "${EFI}" = "1" ] ; then
108 # Work around bug in isohybrid where it requires isolinux.bin
109 # In the boot catalog, even though it is not used
110 mkdir -p ${ISODIR}/${ISOLINUXDIR}
111 install -m 0644 ${STAGING_DATADIR}/syslinux/isolinux.bin ${ISODIR}${ISOLINUXDIR}
112 fi
113
114 # We used to have support for zisofs; this is a relic of that
115 mkisofs_compress_opts="-r"
116
117 # Check the size of ${ISODIR}/rootfs.img, use mkisofs -iso-level 3
118 # when it exceeds 3.8GB, the specification is 4G - 1 bytes, we need
119 # leave a few space for other files.
120 mkisofs_iso_level=""
121
122 if [ -n "${ROOTFS}" ] && [ -s "${ROOTFS}" ]; then
123 rootfs_img_size=`stat -c '%s' ${ISODIR}/rootfs.img`
124 # 4080218931 = 3.8 * 1024 * 1024 * 1024
125 if [ $rootfs_img_size -gt 4080218931 ]; then
126 bbnote "${ISODIR}/rootfs.img execeeds 3.8GB, using '-iso-level 3' for mkisofs"
127 mkisofs_iso_level="-iso-level 3"
128 fi
129 fi
130
131 if [ "${PCBIOS}" = "1" ] && [ "${EFI}" != "1" ] ; then
132 # PCBIOS only media
133 mkisofs -V ${BOOTIMG_VOLUME_ID} \
134 -o ${IMGDEPLOYDIR}/${IMAGE_NAME}.iso \
135 -b ${ISO_BOOTIMG} -c ${ISO_BOOTCAT} \
136 $mkisofs_compress_opts \
137 ${MKISOFS_OPTIONS} $mkisofs_iso_level ${ISODIR}
138 else
139 # EFI only OR EFI+PCBIOS
140 mkisofs -A ${BOOTIMG_VOLUME_ID} -V ${BOOTIMG_VOLUME_ID} \
141 -o ${IMGDEPLOYDIR}/${IMAGE_NAME}.iso \
142 -b ${ISO_BOOTIMG} -c ${ISO_BOOTCAT} \
143 $mkisofs_compress_opts ${MKISOFS_OPTIONS} $mkisofs_iso_level \
144 -eltorito-alt-boot -eltorito-platform efi \
145 -b efi.img -no-emul-boot \
146 ${ISODIR}
147 isohybrid_args="-u"
148 fi
149
150 isohybrid $isohybrid_args ${IMGDEPLOYDIR}/${IMAGE_NAME}.iso
151}
152
153build_fat_img() {
154 FATSOURCEDIR=$1
155 FATIMG=$2
156
157 # Calculate the size required for the final image including the
158 # data and filesystem overhead.
159 # Sectors: 512 bytes
160 # Blocks: 1024 bytes
161
162 # Determine the sector count just for the data
163 SECTORS=$(expr $(du --apparent-size -ks ${FATSOURCEDIR} | cut -f 1) \* 2)
164
165 # Account for the filesystem overhead. This includes directory
166 # entries in the clusters as well as the FAT itself.
167 # Assumptions:
168 # FAT32 (12 or 16 may be selected by mkdosfs, but the extra
169 # padding will be minimal on those smaller images and not
170 # worth the logic here to caclulate the smaller FAT sizes)
171 # < 16 entries per directory
172 # 8.3 filenames only
173
174 # 32 bytes per dir entry
175 DIR_BYTES=$(expr $(find ${FATSOURCEDIR} | tail -n +2 | wc -l) \* 32)
176 # 32 bytes for every end-of-directory dir entry
177 DIR_BYTES=$(expr $DIR_BYTES + $(expr $(find ${FATSOURCEDIR} -type d | tail -n +2 | wc -l) \* 32))
178 # 4 bytes per FAT entry per sector of data
179 FAT_BYTES=$(expr $SECTORS \* 4)
180 # 4 bytes per FAT entry per end-of-cluster list
181 FAT_BYTES=$(expr $FAT_BYTES + $(expr $(find ${FATSOURCEDIR} -type d | tail -n +2 | wc -l) \* 4))
182
183 # Use a ceiling function to determine FS overhead in sectors
184 DIR_SECTORS=$(expr $(expr $DIR_BYTES + 511) / 512)
185 # There are two FATs on the image
186 FAT_SECTORS=$(expr $(expr $(expr $FAT_BYTES + 511) / 512) \* 2)
187 SECTORS=$(expr $SECTORS + $(expr $DIR_SECTORS + $FAT_SECTORS))
188
189 # Determine the final size in blocks accounting for some padding
190 BLOCKS=$(expr $(expr $SECTORS / 2) + ${BOOTIMG_EXTRA_SPACE})
191
192 # mkdosfs will sometimes use FAT16 when it is not appropriate,
193 # resulting in a boot failure from SYSLINUX. Use FAT32 for
194 # images larger than 512MB, otherwise let mkdosfs decide.
195 if [ $(expr $BLOCKS / 1024) -gt 512 ]; then
196 FATSIZE="-F 32"
197 fi
198
199 # mkdosfs will fail if ${FATIMG} exists. Since we are creating an
200 # new image, it is safe to delete any previous image.
201 if [ -e ${FATIMG} ]; then
202 rm ${FATIMG}
203 fi
204
205 if [ -z "${HDDIMG_ID}" ]; then
206 mkdosfs ${FATSIZE} -n ${BOOTIMG_VOLUME_ID} ${MKDOSFS_EXTRAOPTS} -C ${FATIMG} \
207 ${BLOCKS}
208 else
209 mkdosfs ${FATSIZE} -n ${BOOTIMG_VOLUME_ID} ${MKDOSFS_EXTRAOPTS} -C ${FATIMG} \
210 ${BLOCKS} -i ${HDDIMG_ID}
211 fi
212
213 # Copy FATSOURCEDIR recursively into the image file directly
214 mcopy -i ${FATIMG} -s ${FATSOURCEDIR}/* ::/
215}
216
217build_hddimg() {
218 # Create an HDD image
219 if [ "${@bb.utils.contains_any('IMAGE_FSTYPES', 'live hddimg', '1', '0', d)}" = "1" ] ; then
220 populate_live ${HDDDIR}
221
222 if [ "${PCBIOS}" = "1" ]; then
223 syslinux_hddimg_populate ${HDDDIR}
224 fi
225 if [ "${EFI}" = "1" ]; then
226 efi_hddimg_populate ${HDDDIR}
227 fi
228
229 # Check the size of ${HDDDIR}/rootfs.img, error out if it
230 # exceeds 4GB, it is the single file's max size of FAT fs.
231 if [ -f ${HDDDIR}/rootfs.img ]; then
232 rootfs_img_size=`stat -c '%s' ${HDDDIR}/rootfs.img`
233 max_size=`expr 4 \* 1024 \* 1024 \* 1024`
234 if [ $rootfs_img_size -ge $max_size ]; then
235 bberror "${HDDDIR}/rootfs.img rootfs size is greather than or equal to 4GB,"
236 bberror "and this doesn't work on a FAT filesystem. You can either:"
237 bberror "1) Reduce the size of rootfs.img, or,"
238 bbfatal "2) Use wic, vmdk,vhd, vhdx or vdi instead of hddimg\n"
239 fi
240 fi
241
242 build_fat_img ${HDDDIR} ${IMGDEPLOYDIR}/${IMAGE_NAME}.hddimg
243
244 if [ "${PCBIOS}" = "1" ]; then
245 syslinux_hddimg_install
246 fi
247
248 chmod 644 ${IMGDEPLOYDIR}/${IMAGE_NAME}.hddimg
249 fi
250}
251
252python do_bootimg() {
253 set_live_vm_vars(d, 'LIVE')
254 if d.getVar("PCBIOS") == "1":
255 bb.build.exec_func('build_syslinux_cfg', d)
256 if d.getVar("EFI") == "1":
257 bb.build.exec_func('build_efi_cfg', d)
258 bb.build.exec_func('build_hddimg', d)
259 bb.build.exec_func('build_iso', d)
260 bb.build.exec_func('create_symlinks', d)
261}
262do_bootimg[subimages] = "hddimg iso"
263do_bootimg[imgsuffix] = "."
264
265addtask bootimg before do_image_complete after do_rootfs
diff --git a/meta/classes/image-postinst-intercepts.bbclass b/meta/classes/image-postinst-intercepts.bbclass
deleted file mode 100644
index fc15926384..0000000000
--- a/meta/classes/image-postinst-intercepts.bbclass
+++ /dev/null
@@ -1,29 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# Gather existing and candidate postinst intercepts from BBPATH
8POSTINST_INTERCEPTS_DIR ?= "${COREBASE}/scripts/postinst-intercepts"
9POSTINST_INTERCEPTS_PATHS ?= "${@':'.join('%s/postinst-intercepts' % p for p in '${BBPATH}'.split(':'))}:${POSTINST_INTERCEPTS_DIR}"
10
11python find_intercepts() {
12 intercepts = {}
13 search_paths = []
14 paths = d.getVar('POSTINST_INTERCEPTS_PATHS').split(':')
15 overrides = (':' + d.getVar('FILESOVERRIDES')).split(':') + ['']
16 search_paths = [os.path.join(p, op) for p in paths for op in overrides]
17 searched = oe.path.which_wild('*', ':'.join(search_paths), candidates=True)
18 files, chksums = [], []
19 for pathname, candidates in searched:
20 if os.path.isfile(pathname):
21 files.append(pathname)
22 chksums.append('%s:True' % pathname)
23 chksums.extend('%s:False' % c for c in candidates[:-1])
24
25 d.setVar('POSTINST_INTERCEPT_CHECKSUMS', ' '.join(chksums))
26 d.setVar('POSTINST_INTERCEPTS', ' '.join(files))
27}
28find_intercepts[eventmask] += "bb.event.RecipePreFinalise"
29addhandler find_intercepts
diff --git a/meta/classes/image.bbclass b/meta/classes/image.bbclass
deleted file mode 100644
index 433172378a..0000000000
--- a/meta/classes/image.bbclass
+++ /dev/null
@@ -1,684 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7IMAGE_CLASSES ??= ""
8
9# rootfs bootstrap install
10# warning - image-container resets this
11ROOTFS_BOOTSTRAP_INSTALL = "run-postinsts"
12
13# Handle inherits of any of the image classes we need
14IMGCLASSES = "rootfs_${IMAGE_PKGTYPE} image_types ${IMAGE_CLASSES}"
15# Only Linux SDKs support populate_sdk_ext, fall back to populate_sdk_base
16# in the non-Linux SDK_OS case, such as mingw32
17IMGCLASSES += "${@['populate_sdk_base', 'populate_sdk_ext']['linux' in d.getVar("SDK_OS")]}"
18IMGCLASSES += "${@bb.utils.contains_any('IMAGE_FSTYPES', 'live iso hddimg', 'image-live', '', d)}"
19IMGCLASSES += "${@bb.utils.contains('IMAGE_FSTYPES', 'container', 'image-container', '', d)}"
20IMGCLASSES += "image_types_wic"
21IMGCLASSES += "rootfs-postcommands"
22IMGCLASSES += "image-postinst-intercepts"
23IMGCLASSES += "overlayfs-etc"
24inherit ${IMGCLASSES}
25
26TOOLCHAIN_TARGET_TASK += "${PACKAGE_INSTALL}"
27TOOLCHAIN_TARGET_TASK_ATTEMPTONLY += "${PACKAGE_INSTALL_ATTEMPTONLY}"
28POPULATE_SDK_POST_TARGET_COMMAND += "rootfs_sysroot_relativelinks; "
29
30LICENSE ?= "MIT"
31PACKAGES = ""
32DEPENDS += "${@' '.join(["%s-qemuwrapper-cross" % m for m in d.getVar("MULTILIB_VARIANTS").split()])} qemuwrapper-cross depmodwrapper-cross cross-localedef-native"
33RDEPENDS += "${PACKAGE_INSTALL} ${LINGUAS_INSTALL} ${IMAGE_INSTALL_DEBUGFS}"
34RRECOMMENDS += "${PACKAGE_INSTALL_ATTEMPTONLY}"
35PATH:prepend = "${@":".join(all_multilib_tune_values(d, 'STAGING_BINDIR_CROSS').split())}:"
36
37INHIBIT_DEFAULT_DEPS = "1"
38
39# IMAGE_FEATURES may contain any available package group
40IMAGE_FEATURES ?= ""
41IMAGE_FEATURES[type] = "list"
42IMAGE_FEATURES[validitems] += "debug-tweaks read-only-rootfs read-only-rootfs-delayed-postinsts stateless-rootfs empty-root-password allow-empty-password allow-root-login post-install-logging overlayfs-etc"
43
44# Generate companion debugfs?
45IMAGE_GEN_DEBUGFS ?= "0"
46
47# These packages will be installed as additional into debug rootfs
48IMAGE_INSTALL_DEBUGFS ?= ""
49
50# These packages will be removed from a read-only rootfs after all other
51# packages have been installed
52ROOTFS_RO_UNNEEDED ??= "update-rc.d base-passwd shadow ${VIRTUAL-RUNTIME_update-alternatives} ${ROOTFS_BOOTSTRAP_INSTALL}"
53
54# packages to install from features
55FEATURE_INSTALL = "${@' '.join(oe.packagegroup.required_packages(oe.data.typed_value('IMAGE_FEATURES', d), d))}"
56FEATURE_INSTALL[vardepvalue] = "${FEATURE_INSTALL}"
57FEATURE_INSTALL_OPTIONAL = "${@' '.join(oe.packagegroup.optional_packages(oe.data.typed_value('IMAGE_FEATURES', d), d))}"
58FEATURE_INSTALL_OPTIONAL[vardepvalue] = "${FEATURE_INSTALL_OPTIONAL}"
59
60# Define some very basic feature package groups
61FEATURE_PACKAGES_package-management = "${ROOTFS_PKGMANAGE}"
62SPLASH ?= "${@bb.utils.contains("MACHINE_FEATURES", "screen", "psplash", "", d)}"
63FEATURE_PACKAGES_splash = "${SPLASH}"
64
65IMAGE_INSTALL_COMPLEMENTARY = '${@complementary_globs("IMAGE_FEATURES", d)}'
66
67def check_image_features(d):
68 valid_features = (d.getVarFlag('IMAGE_FEATURES', 'validitems') or "").split()
69 valid_features += d.getVarFlags('COMPLEMENTARY_GLOB').keys()
70 for var in d:
71 if var.startswith("FEATURE_PACKAGES_"):
72 valid_features.append(var[17:])
73 valid_features.sort()
74
75 features = set(oe.data.typed_value('IMAGE_FEATURES', d))
76 for feature in features:
77 if feature not in valid_features:
78 if bb.utils.contains('EXTRA_IMAGE_FEATURES', feature, True, False, d):
79 raise bb.parse.SkipRecipe("'%s' in IMAGE_FEATURES (added via EXTRA_IMAGE_FEATURES) is not a valid image feature. Valid features: %s" % (feature, ' '.join(valid_features)))
80 else:
81 raise bb.parse.SkipRecipe("'%s' in IMAGE_FEATURES is not a valid image feature. Valid features: %s" % (feature, ' '.join(valid_features)))
82
83IMAGE_INSTALL ?= ""
84IMAGE_INSTALL[type] = "list"
85export PACKAGE_INSTALL ?= "${IMAGE_INSTALL} ${ROOTFS_BOOTSTRAP_INSTALL} ${FEATURE_INSTALL}"
86PACKAGE_INSTALL_ATTEMPTONLY ?= "${FEATURE_INSTALL_OPTIONAL}"
87
88IMGDEPLOYDIR = "${WORKDIR}/deploy-${PN}-image-complete"
89
90# Images are generally built explicitly, do not need to be part of world.
91EXCLUDE_FROM_WORLD = "1"
92
93USE_DEVFS ?= "1"
94USE_DEPMOD ?= "1"
95
96PID = "${@os.getpid()}"
97
98PACKAGE_ARCH = "${MACHINE_ARCH}"
99
100LDCONFIGDEPEND ?= "ldconfig-native:do_populate_sysroot"
101LDCONFIGDEPEND:libc-musl = ""
102
103# This is needed to have depmod data in PKGDATA_DIR,
104# but if you're building small initramfs image
105# e.g. to include it in your kernel, you probably
106# don't want this dependency, which is causing dependency loop
107KERNELDEPMODDEPEND ?= "virtual/kernel:do_packagedata"
108
109do_rootfs[depends] += " \
110 makedevs-native:do_populate_sysroot virtual/fakeroot-native:do_populate_sysroot ${LDCONFIGDEPEND} \
111 virtual/update-alternatives-native:do_populate_sysroot update-rc.d-native:do_populate_sysroot \
112 ${KERNELDEPMODDEPEND} \
113"
114do_rootfs[recrdeptask] += "do_packagedata"
115
116def rootfs_command_variables(d):
117 return ['ROOTFS_POSTPROCESS_COMMAND','ROOTFS_PREPROCESS_COMMAND','ROOTFS_POSTINSTALL_COMMAND','ROOTFS_POSTUNINSTALL_COMMAND','OPKG_PREPROCESS_COMMANDS','OPKG_POSTPROCESS_COMMANDS','IMAGE_POSTPROCESS_COMMAND',
118 'IMAGE_PREPROCESS_COMMAND','RPM_PREPROCESS_COMMANDS','RPM_POSTPROCESS_COMMANDS','DEB_PREPROCESS_COMMANDS','DEB_POSTPROCESS_COMMANDS']
119
120python () {
121 variables = rootfs_command_variables(d)
122 for var in variables:
123 if d.getVar(var, False):
124 d.setVarFlag(var, 'func', '1')
125}
126
127def rootfs_variables(d):
128 from oe.rootfs import variable_depends
129 variables = ['IMAGE_DEVICE_TABLE','IMAGE_DEVICE_TABLES','BUILD_IMAGES_FROM_FEEDS','IMAGE_TYPES_MASKED','IMAGE_ROOTFS_ALIGNMENT','IMAGE_OVERHEAD_FACTOR','IMAGE_ROOTFS_SIZE','IMAGE_ROOTFS_EXTRA_SPACE',
130 'IMAGE_ROOTFS_MAXSIZE','IMAGE_NAME','IMAGE_LINK_NAME','IMAGE_MANIFEST','DEPLOY_DIR_IMAGE','IMAGE_FSTYPES','IMAGE_INSTALL_COMPLEMENTARY','IMAGE_LINGUAS', 'IMAGE_LINGUAS_COMPLEMENTARY', 'IMAGE_LOCALES_ARCHIVE',
131 'MULTILIBRE_ALLOW_REP','MULTILIB_TEMP_ROOTFS','MULTILIB_VARIANTS','MULTILIBS','ALL_MULTILIB_PACKAGE_ARCHS','MULTILIB_GLOBAL_VARIANTS','BAD_RECOMMENDATIONS','NO_RECOMMENDATIONS',
132 'PACKAGE_ARCHS','PACKAGE_CLASSES','TARGET_VENDOR','TARGET_ARCH','TARGET_OS','OVERRIDES','BBEXTENDVARIANT','FEED_DEPLOYDIR_BASE_URI','INTERCEPT_DIR','USE_DEVFS',
133 'CONVERSIONTYPES', 'IMAGE_GEN_DEBUGFS', 'ROOTFS_RO_UNNEEDED', 'IMGDEPLOYDIR', 'PACKAGE_EXCLUDE_COMPLEMENTARY', 'REPRODUCIBLE_TIMESTAMP_ROOTFS', 'IMAGE_INSTALL_DEBUGFS']
134 variables.extend(rootfs_command_variables(d))
135 variables.extend(variable_depends(d))
136 return " ".join(variables)
137
138do_rootfs[vardeps] += "${@rootfs_variables(d)}"
139
140# This is needed to have kernel image in DEPLOY_DIR.
141# This follows many common usecases and user expectations.
142# But if you are building an image which doesn't need the kernel image at all,
143# you can unset this variable manually.
144KERNEL_DEPLOY_DEPEND ?= "virtual/kernel:do_deploy"
145do_build[depends] += "${KERNEL_DEPLOY_DEPEND}"
146
147
148python () {
149 def extraimage_getdepends(task):
150 deps = ""
151 for dep in (d.getVar('EXTRA_IMAGEDEPENDS') or "").split():
152 if ":" in dep:
153 deps += " %s " % (dep)
154 else:
155 deps += " %s:%s" % (dep, task)
156 return deps
157
158 d.appendVarFlag('do_image_complete', 'depends', extraimage_getdepends('do_populate_sysroot'))
159
160 deps = " " + imagetypes_getdepends(d)
161 d.appendVarFlag('do_rootfs', 'depends', deps)
162
163 #process IMAGE_FEATURES, we must do this before runtime_mapping_rename
164 #Check for replaces image features
165 features = set(oe.data.typed_value('IMAGE_FEATURES', d))
166 remain_features = features.copy()
167 for feature in features:
168 replaces = set((d.getVar("IMAGE_FEATURES_REPLACES_%s" % feature) or "").split())
169 remain_features -= replaces
170
171 #Check for conflict image features
172 for feature in remain_features:
173 conflicts = set((d.getVar("IMAGE_FEATURES_CONFLICTS_%s" % feature) or "").split())
174 temp = conflicts & remain_features
175 if temp:
176 bb.fatal("%s contains conflicting IMAGE_FEATURES %s %s" % (d.getVar('PN'), feature, ' '.join(list(temp))))
177
178 d.setVar('IMAGE_FEATURES', ' '.join(sorted(list(remain_features))))
179
180 check_image_features(d)
181}
182
183IMAGE_POSTPROCESS_COMMAND ?= ""
184
185# some default locales
186IMAGE_LINGUAS ?= "de-de fr-fr en-gb"
187
188LINGUAS_INSTALL ?= "${@" ".join(map(lambda s: "locale-base-%s" % s, d.getVar('IMAGE_LINGUAS').split()))}"
189
190# per default create a locale archive
191IMAGE_LOCALES_ARCHIVE ?= '1'
192
193# Prefer image, but use the fallback files for lookups if the image ones
194# aren't yet available.
195PSEUDO_PASSWD = "${IMAGE_ROOTFS}:${STAGING_DIR_NATIVE}"
196
197PSEUDO_IGNORE_PATHS .= ",${WORKDIR}/intercept_scripts,${WORKDIR}/oe-rootfs-repo,${WORKDIR}/sstate-build-image_complete"
198
199PACKAGE_EXCLUDE ??= ""
200PACKAGE_EXCLUDE[type] = "list"
201
202fakeroot python do_rootfs () {
203 from oe.rootfs import create_rootfs
204 from oe.manifest import create_manifest
205 import logging
206
207 logger = d.getVar('BB_TASK_LOGGER', False)
208 if logger:
209 logcatcher = bb.utils.LogCatcher()
210 logger.addHandler(logcatcher)
211 else:
212 logcatcher = None
213
214 # NOTE: if you add, remove or significantly refactor the stages of this
215 # process then you should recalculate the weightings here. This is quite
216 # easy to do - just change the MultiStageProgressReporter line temporarily
217 # to pass debug=True as the last parameter and you'll get a printout of
218 # the weightings as well as a map to the lines where next_stage() was
219 # called. Of course this isn't critical, but it helps to keep the progress
220 # reporting accurate.
221 stage_weights = [1, 203, 354, 186, 65, 4228, 1, 353, 49, 330, 382, 23, 1]
222 progress_reporter = bb.progress.MultiStageProgressReporter(d, stage_weights)
223 progress_reporter.next_stage()
224
225 # Handle package exclusions
226 excl_pkgs = d.getVar("PACKAGE_EXCLUDE").split()
227 inst_pkgs = d.getVar("PACKAGE_INSTALL").split()
228 inst_attempt_pkgs = d.getVar("PACKAGE_INSTALL_ATTEMPTONLY").split()
229
230 d.setVar('PACKAGE_INSTALL_ORIG', ' '.join(inst_pkgs))
231 d.setVar('PACKAGE_INSTALL_ATTEMPTONLY', ' '.join(inst_attempt_pkgs))
232
233 for pkg in excl_pkgs:
234 if pkg in inst_pkgs:
235 bb.warn("Package %s, set to be excluded, is in %s PACKAGE_INSTALL (%s). It will be removed from the list." % (pkg, d.getVar('PN'), inst_pkgs))
236 inst_pkgs.remove(pkg)
237
238 if pkg in inst_attempt_pkgs:
239 bb.warn("Package %s, set to be excluded, is in %s PACKAGE_INSTALL_ATTEMPTONLY (%s). It will be removed from the list." % (pkg, d.getVar('PN'), inst_pkgs))
240 inst_attempt_pkgs.remove(pkg)
241
242 d.setVar("PACKAGE_INSTALL", ' '.join(inst_pkgs))
243 d.setVar("PACKAGE_INSTALL_ATTEMPTONLY", ' '.join(inst_attempt_pkgs))
244
245 # Ensure we handle package name remapping
246 # We have to delay the runtime_mapping_rename until just before rootfs runs
247 # otherwise, the multilib renaming could step in and squash any fixups that
248 # may have occurred.
249 pn = d.getVar('PN')
250 runtime_mapping_rename("PACKAGE_INSTALL", pn, d)
251 runtime_mapping_rename("PACKAGE_INSTALL_ATTEMPTONLY", pn, d)
252 runtime_mapping_rename("BAD_RECOMMENDATIONS", pn, d)
253
254 # Generate the initial manifest
255 create_manifest(d)
256
257 progress_reporter.next_stage()
258
259 # generate rootfs
260 d.setVarFlag('REPRODUCIBLE_TIMESTAMP_ROOTFS', 'export', '1')
261 create_rootfs(d, progress_reporter=progress_reporter, logcatcher=logcatcher)
262
263 progress_reporter.finish()
264}
265do_rootfs[dirs] = "${TOPDIR}"
266do_rootfs[cleandirs] += "${IMAGE_ROOTFS} ${IMGDEPLOYDIR} ${S}"
267do_rootfs[file-checksums] += "${POSTINST_INTERCEPT_CHECKSUMS}"
268addtask rootfs after do_prepare_recipe_sysroot
269
270fakeroot python do_image () {
271 from oe.utils import execute_pre_post_process
272
273 d.setVarFlag('REPRODUCIBLE_TIMESTAMP_ROOTFS', 'export', '1')
274 pre_process_cmds = d.getVar("IMAGE_PREPROCESS_COMMAND")
275
276 execute_pre_post_process(d, pre_process_cmds)
277}
278do_image[dirs] = "${TOPDIR}"
279addtask do_image after do_rootfs
280
281fakeroot python do_image_complete () {
282 from oe.utils import execute_pre_post_process
283
284 post_process_cmds = d.getVar("IMAGE_POSTPROCESS_COMMAND")
285
286 execute_pre_post_process(d, post_process_cmds)
287}
288do_image_complete[dirs] = "${TOPDIR}"
289SSTATETASKS += "do_image_complete"
290SSTATE_SKIP_CREATION:task-image-complete = '1'
291do_image_complete[sstate-inputdirs] = "${IMGDEPLOYDIR}"
292do_image_complete[sstate-outputdirs] = "${DEPLOY_DIR_IMAGE}"
293do_image_complete[stamp-extra-info] = "${MACHINE_ARCH}"
294addtask do_image_complete after do_image before do_build
295python do_image_complete_setscene () {
296 sstate_setscene(d)
297}
298addtask do_image_complete_setscene
299
300# Add image-level QA/sanity checks to IMAGE_QA_COMMANDS
301#
302# IMAGE_QA_COMMANDS += " \
303# image_check_everything_ok \
304# "
305# This task runs all functions in IMAGE_QA_COMMANDS after the rootfs
306# construction has completed in order to validate the resulting image.
307#
308# The functions should use ${IMAGE_ROOTFS} to find the unpacked rootfs
309# directory, which if QA passes will be the basis for the images.
310fakeroot python do_image_qa () {
311 from oe.utils import ImageQAFailed
312
313 qa_cmds = (d.getVar('IMAGE_QA_COMMANDS') or '').split()
314 qamsg = ""
315
316 for cmd in qa_cmds:
317 try:
318 bb.build.exec_func(cmd, d)
319 except oe.utils.ImageQAFailed as e:
320 qamsg = qamsg + '\tImage QA function %s failed: %s\n' % (e.name, e.description)
321 except Exception as e:
322 qamsg = qamsg + '\tImage QA function %s failed\n' % cmd
323
324 if qamsg:
325 imgname = d.getVar('IMAGE_NAME')
326 bb.fatal("QA errors found whilst validating image: %s\n%s" % (imgname, qamsg))
327}
328addtask do_image_qa after do_rootfs before do_image
329
330SSTATETASKS += "do_image_qa"
331SSTATE_SKIP_CREATION:task-image-qa = '1'
332do_image_qa[sstate-inputdirs] = ""
333do_image_qa[sstate-outputdirs] = ""
334python do_image_qa_setscene () {
335 sstate_setscene(d)
336}
337addtask do_image_qa_setscene
338
339def setup_debugfs_variables(d):
340 d.appendVar('IMAGE_ROOTFS', '-dbg')
341 if d.getVar('IMAGE_LINK_NAME'):
342 d.appendVar('IMAGE_LINK_NAME', '-dbg')
343 d.appendVar('IMAGE_NAME','-dbg')
344 d.setVar('IMAGE_BUILDING_DEBUGFS', 'true')
345 debugfs_image_fstypes = d.getVar('IMAGE_FSTYPES_DEBUGFS')
346 if debugfs_image_fstypes:
347 d.setVar('IMAGE_FSTYPES', debugfs_image_fstypes)
348
349python setup_debugfs () {
350 setup_debugfs_variables(d)
351}
352
353python () {
354 vardeps = set()
355 # We allow CONVERSIONTYPES to have duplicates. That avoids breaking
356 # derived distros when OE-core or some other layer independently adds
357 # the same type. There is still only one command for each type, but
358 # presumably the commands will do the same when the type is the same,
359 # even when added in different places.
360 #
361 # Without de-duplication, gen_conversion_cmds() below
362 # would create the same compression command multiple times.
363 ctypes = set(d.getVar('CONVERSIONTYPES').split())
364 old_overrides = d.getVar('OVERRIDES', False)
365
366 def _image_base_type(type):
367 basetype = type
368 for ctype in ctypes:
369 if type.endswith("." + ctype):
370 basetype = type[:-len("." + ctype)]
371 break
372
373 if basetype != type:
374 # New base type itself might be generated by a conversion command.
375 basetype = _image_base_type(basetype)
376
377 return basetype
378
379 basetypes = {}
380 alltypes = d.getVar('IMAGE_FSTYPES').split()
381 typedeps = {}
382
383 if d.getVar('IMAGE_GEN_DEBUGFS') == "1":
384 debugfs_fstypes = d.getVar('IMAGE_FSTYPES_DEBUGFS').split()
385 for t in debugfs_fstypes:
386 alltypes.append("debugfs_" + t)
387
388 def _add_type(t):
389 baset = _image_base_type(t)
390 input_t = t
391 if baset not in basetypes:
392 basetypes[baset]= []
393 if t not in basetypes[baset]:
394 basetypes[baset].append(t)
395 debug = ""
396 if t.startswith("debugfs_"):
397 t = t[8:]
398 debug = "debugfs_"
399 deps = (d.getVar('IMAGE_TYPEDEP:' + t) or "").split()
400 vardeps.add('IMAGE_TYPEDEP:' + t)
401 if baset not in typedeps:
402 typedeps[baset] = set()
403 deps = [debug + dep for dep in deps]
404 for dep in deps:
405 if dep not in alltypes:
406 alltypes.append(dep)
407 _add_type(dep)
408 basedep = _image_base_type(dep)
409 typedeps[baset].add(basedep)
410
411 if baset != input_t:
412 _add_type(baset)
413
414 for t in alltypes[:]:
415 _add_type(t)
416
417 d.appendVarFlag('do_image', 'vardeps', ' '.join(vardeps))
418
419 maskedtypes = (d.getVar('IMAGE_TYPES_MASKED') or "").split()
420 maskedtypes = [dbg + t for t in maskedtypes for dbg in ("", "debugfs_")]
421
422 for t in basetypes:
423 vardeps = set()
424 cmds = []
425 subimages = []
426 realt = t
427
428 if t in maskedtypes:
429 continue
430
431 localdata = bb.data.createCopy(d)
432 debug = ""
433 if t.startswith("debugfs_"):
434 setup_debugfs_variables(localdata)
435 debug = "setup_debugfs "
436 realt = t[8:]
437 localdata.setVar('OVERRIDES', '%s:%s' % (realt, old_overrides))
438 localdata.setVar('type', realt)
439 # Delete DATETIME so we don't expand any references to it now
440 # This means the task's hash can be stable rather than having hardcoded
441 # date/time values. It will get expanded at execution time.
442 # Similarly TMPDIR since otherwise we see QA stamp comparision problems
443 # Expand PV else it can trigger get_srcrev which can fail due to these variables being unset
444 localdata.setVar('PV', d.getVar('PV'))
445 localdata.delVar('DATETIME')
446 localdata.delVar('DATE')
447 localdata.delVar('TMPDIR')
448 localdata.delVar('IMAGE_VERSION_SUFFIX')
449 vardepsexclude = (d.getVarFlag('IMAGE_CMD:' + realt, 'vardepsexclude', True) or '').split()
450 for dep in vardepsexclude:
451 localdata.delVar(dep)
452
453 image_cmd = localdata.getVar("IMAGE_CMD")
454 vardeps.add('IMAGE_CMD:' + realt)
455 if image_cmd:
456 cmds.append("\t" + image_cmd)
457 else:
458 bb.fatal("No IMAGE_CMD defined for IMAGE_FSTYPES entry '%s' - possibly invalid type name or missing support class" % t)
459 cmds.append(localdata.expand("\tcd ${IMGDEPLOYDIR}"))
460
461 # Since a copy of IMAGE_CMD:xxx will be inlined within do_image_xxx,
462 # prevent a redundant copy of IMAGE_CMD:xxx being emitted as a function.
463 d.delVarFlag('IMAGE_CMD:' + realt, 'func')
464
465 rm_tmp_images = set()
466 def gen_conversion_cmds(bt):
467 for ctype in sorted(ctypes):
468 if bt.endswith("." + ctype):
469 type = bt[0:-len(ctype) - 1]
470 if type.startswith("debugfs_"):
471 type = type[8:]
472 # Create input image first.
473 gen_conversion_cmds(type)
474 localdata.setVar('type', type)
475 cmd = "\t" + localdata.getVar("CONVERSION_CMD:" + ctype)
476 if cmd not in cmds:
477 cmds.append(cmd)
478 vardeps.add('CONVERSION_CMD:' + ctype)
479 subimage = type + "." + ctype
480 if subimage not in subimages:
481 subimages.append(subimage)
482 if type not in alltypes:
483 rm_tmp_images.add(localdata.expand("${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"))
484
485 for bt in basetypes[t]:
486 gen_conversion_cmds(bt)
487
488 localdata.setVar('type', realt)
489 if t not in alltypes:
490 rm_tmp_images.add(localdata.expand("${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"))
491 else:
492 subimages.append(realt)
493
494 # Clean up after applying all conversion commands. Some of them might
495 # use the same input, therefore we cannot delete sooner without applying
496 # some complex dependency analysis.
497 for image in sorted(rm_tmp_images):
498 cmds.append("\trm " + image)
499
500 after = 'do_image'
501 for dep in typedeps[t]:
502 after += ' do_image_%s' % dep.replace("-", "_").replace(".", "_")
503
504 task = "do_image_%s" % t.replace("-", "_").replace(".", "_")
505
506 d.setVar(task, '\n'.join(cmds))
507 d.setVarFlag(task, 'func', '1')
508 d.setVarFlag(task, 'fakeroot', '1')
509
510 d.appendVarFlag(task, 'prefuncs', ' ' + debug + ' set_image_size')
511 d.prependVarFlag(task, 'postfuncs', 'create_symlinks ')
512 d.appendVarFlag(task, 'subimages', ' ' + ' '.join(subimages))
513 d.appendVarFlag(task, 'vardeps', ' ' + ' '.join(vardeps))
514 d.appendVarFlag(task, 'vardepsexclude', ' DATETIME DATE ' + ' '.join(vardepsexclude))
515
516 bb.debug(2, "Adding task %s before %s, after %s" % (task, 'do_image_complete', after))
517 bb.build.addtask(task, 'do_image_complete', after, d)
518}
519
520#
521# Compute the rootfs size
522#
523def get_rootfs_size(d):
524 import subprocess, oe.utils
525
526 rootfs_alignment = int(d.getVar('IMAGE_ROOTFS_ALIGNMENT'))
527 overhead_factor = float(d.getVar('IMAGE_OVERHEAD_FACTOR'))
528 rootfs_req_size = int(d.getVar('IMAGE_ROOTFS_SIZE'))
529 rootfs_extra_space = eval(d.getVar('IMAGE_ROOTFS_EXTRA_SPACE'))
530 rootfs_maxsize = d.getVar('IMAGE_ROOTFS_MAXSIZE')
531 image_fstypes = d.getVar('IMAGE_FSTYPES') or ''
532 initramfs_fstypes = d.getVar('INITRAMFS_FSTYPES') or ''
533 initramfs_maxsize = d.getVar('INITRAMFS_MAXSIZE')
534
535 size_kb = oe.utils.directory_size(d.getVar("IMAGE_ROOTFS")) / 1024
536
537 base_size = size_kb * overhead_factor
538 bb.debug(1, '%f = %d * %f' % (base_size, size_kb, overhead_factor))
539 base_size2 = max(base_size, rootfs_req_size) + rootfs_extra_space
540 bb.debug(1, '%f = max(%f, %d)[%f] + %d' % (base_size2, base_size, rootfs_req_size, max(base_size, rootfs_req_size), rootfs_extra_space))
541
542 base_size = base_size2
543 if base_size != int(base_size):
544 base_size = int(base_size + 1)
545 else:
546 base_size = int(base_size)
547 bb.debug(1, '%f = int(%f)' % (base_size, base_size2))
548
549 base_size_saved = base_size
550 base_size += rootfs_alignment - 1
551 base_size -= base_size % rootfs_alignment
552 bb.debug(1, '%d = aligned(%d)' % (base_size, base_size_saved))
553
554 # Do not check image size of the debugfs image. This is not supposed
555 # to be deployed, etc. so it doesn't make sense to limit the size
556 # of the debug.
557 if (d.getVar('IMAGE_BUILDING_DEBUGFS') or "") == "true":
558 bb.debug(1, 'returning debugfs size %d' % (base_size))
559 return base_size
560
561 # Check the rootfs size against IMAGE_ROOTFS_MAXSIZE (if set)
562 if rootfs_maxsize:
563 rootfs_maxsize_int = int(rootfs_maxsize)
564 if base_size > rootfs_maxsize_int:
565 bb.fatal("The rootfs size %d(K) exceeds IMAGE_ROOTFS_MAXSIZE: %d(K)" % \
566 (base_size, rootfs_maxsize_int))
567
568 # Check the initramfs size against INITRAMFS_MAXSIZE (if set)
569 if image_fstypes == initramfs_fstypes != '' and initramfs_maxsize:
570 initramfs_maxsize_int = int(initramfs_maxsize)
571 if base_size > initramfs_maxsize_int:
572 bb.error("The initramfs size %d(K) exceeds INITRAMFS_MAXSIZE: %d(K)" % \
573 (base_size, initramfs_maxsize_int))
574 bb.error("You can set INITRAMFS_MAXSIZE a larger value. Usually, it should")
575 bb.fatal("be less than 1/2 of ram size, or you may fail to boot it.\n")
576
577 bb.debug(1, 'returning %d' % (base_size))
578 return base_size
579
580python set_image_size () {
581 rootfs_size = get_rootfs_size(d)
582 d.setVar('ROOTFS_SIZE', str(rootfs_size))
583 d.setVarFlag('ROOTFS_SIZE', 'export', '1')
584}
585
586#
587# Create symlinks to the newly created image
588#
589python create_symlinks() {
590
591 deploy_dir = d.getVar('IMGDEPLOYDIR')
592 img_name = d.getVar('IMAGE_NAME')
593 link_name = d.getVar('IMAGE_LINK_NAME')
594 manifest_name = d.getVar('IMAGE_MANIFEST')
595 taskname = d.getVar("BB_CURRENTTASK")
596 subimages = (d.getVarFlag("do_" + taskname, 'subimages', False) or "").split()
597 imgsuffix = d.getVarFlag("do_" + taskname, 'imgsuffix') or d.expand("${IMAGE_NAME_SUFFIX}.")
598
599 if not link_name:
600 return
601 for type in subimages:
602 dst = os.path.join(deploy_dir, link_name + "." + type)
603 src = img_name + imgsuffix + type
604 if os.path.exists(os.path.join(deploy_dir, src)):
605 bb.note("Creating symlink: %s -> %s" % (dst, src))
606 if os.path.islink(dst):
607 os.remove(dst)
608 os.symlink(src, dst)
609 else:
610 bb.note("Skipping symlink, source does not exist: %s -> %s" % (dst, src))
611}
612
613MULTILIBRE_ALLOW_REP =. "${base_bindir}|${base_sbindir}|${bindir}|${sbindir}|${libexecdir}|${sysconfdir}|${nonarch_base_libdir}/udev|/lib/modules/[^/]*/modules.*|"
614MULTILIB_CHECK_FILE = "${WORKDIR}/multilib_check.py"
615MULTILIB_TEMP_ROOTFS = "${WORKDIR}/multilib"
616
617do_fetch[noexec] = "1"
618do_unpack[noexec] = "1"
619do_patch[noexec] = "1"
620do_configure[noexec] = "1"
621do_compile[noexec] = "1"
622do_install[noexec] = "1"
623deltask do_populate_lic
624deltask do_populate_sysroot
625do_package[noexec] = "1"
626deltask do_package_qa
627deltask do_packagedata
628deltask do_package_write_ipk
629deltask do_package_write_deb
630deltask do_package_write_rpm
631
632# Prepare the root links to point to the /usr counterparts.
633create_merged_usr_symlinks() {
634 root="$1"
635 install -d $root${base_bindir} $root${base_sbindir} $root${base_libdir}
636 ln -rs $root${base_bindir} $root/bin
637 ln -rs $root${base_sbindir} $root/sbin
638 ln -rs $root${base_libdir} $root/${baselib}
639
640 if [ "${nonarch_base_libdir}" != "${base_libdir}" ]; then
641 install -d $root${nonarch_base_libdir}
642 ln -rs $root${nonarch_base_libdir} $root/lib
643 fi
644
645 # create base links for multilibs
646 multi_libdirs="${@d.getVar('MULTILIB_VARIANTS')}"
647 for d in $multi_libdirs; do
648 install -d $root${exec_prefix}/$d
649 ln -rs $root${exec_prefix}/$d $root/$d
650 done
651}
652
653create_merged_usr_symlinks_rootfs() {
654 create_merged_usr_symlinks ${IMAGE_ROOTFS}
655}
656
657create_merged_usr_symlinks_sdk() {
658 create_merged_usr_symlinks ${SDK_OUTPUT}${SDKTARGETSYSROOT}
659}
660
661ROOTFS_PREPROCESS_COMMAND += "${@bb.utils.contains('DISTRO_FEATURES', 'usrmerge', 'create_merged_usr_symlinks_rootfs; ', '',d)}"
662POPULATE_SDK_PRE_TARGET_COMMAND += "${@bb.utils.contains('DISTRO_FEATURES', 'usrmerge', 'create_merged_usr_symlinks_sdk; ', '',d)}"
663
664reproducible_final_image_task () {
665 if [ "$REPRODUCIBLE_TIMESTAMP_ROOTFS" = "" ]; then
666 REPRODUCIBLE_TIMESTAMP_ROOTFS=`git -C "${COREBASE}" log -1 --pretty=%ct 2>/dev/null` || true
667 if [ "$REPRODUCIBLE_TIMESTAMP_ROOTFS" = "" ]; then
668 REPRODUCIBLE_TIMESTAMP_ROOTFS=`stat -c%Y ${@bb.utils.which(d.getVar("BBPATH"), "conf/bitbake.conf")}`
669 fi
670 fi
671 # Set mtime of all files to a reproducible value
672 bbnote "reproducible_final_image_task: mtime set to $REPRODUCIBLE_TIMESTAMP_ROOTFS"
673 find ${IMAGE_ROOTFS} -print0 | xargs -0 touch -h --date=@$REPRODUCIBLE_TIMESTAMP_ROOTFS
674}
675
676systemd_preset_all () {
677 if [ -e ${IMAGE_ROOTFS}${root_prefix}/lib/systemd/systemd ]; then
678 systemctl --root="${IMAGE_ROOTFS}" --preset-mode=enable-only preset-all
679 fi
680}
681
682IMAGE_PREPROCESS_COMMAND:append = " ${@ 'systemd_preset_all;' if bb.utils.contains('DISTRO_FEATURES', 'systemd', True, False, d) and not bb.utils.contains('IMAGE_FEATURES', 'stateless-rootfs', True, False, d) else ''} reproducible_final_image_task; "
683
684CVE_PRODUCT = ""
diff --git a/meta/classes/image_types.bbclass b/meta/classes/image_types.bbclass
deleted file mode 100644
index a731e585b2..0000000000
--- a/meta/classes/image_types.bbclass
+++ /dev/null
@@ -1,355 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# The default aligment of the size of the rootfs is set to 1KiB. In case
8# you're using the SD card emulation of a QEMU system simulator you may
9# set this value to 2048 (2MiB alignment).
10IMAGE_ROOTFS_ALIGNMENT ?= "1"
11
12def imagetypes_getdepends(d):
13 def adddep(depstr, deps):
14 for d in (depstr or "").split():
15 # Add task dependency if not already present
16 if ":" not in d:
17 d += ":do_populate_sysroot"
18 deps.add(d)
19
20 # Take a type in the form of foo.bar.car and split it into the items
21 # needed for the image deps "foo", and the conversion deps ["bar", "car"]
22 def split_types(typestring):
23 types = typestring.split(".")
24 return types[0], types[1:]
25
26 fstypes = set((d.getVar('IMAGE_FSTYPES') or "").split())
27 fstypes |= set((d.getVar('IMAGE_FSTYPES_DEBUGFS') or "").split())
28
29 deprecated = set()
30 deps = set()
31 for typestring in fstypes:
32 basetype, resttypes = split_types(typestring)
33
34 var = "IMAGE_DEPENDS_%s" % basetype
35 if d.getVar(var) is not None:
36 deprecated.add(var)
37
38 for typedepends in (d.getVar("IMAGE_TYPEDEP:%s" % basetype) or "").split():
39 base, rest = split_types(typedepends)
40 resttypes += rest
41
42 var = "IMAGE_DEPENDS_%s" % base
43 if d.getVar(var) is not None:
44 deprecated.add(var)
45
46 for ctype in resttypes:
47 adddep(d.getVar("CONVERSION_DEPENDS_%s" % ctype), deps)
48 adddep(d.getVar("COMPRESS_DEPENDS_%s" % ctype), deps)
49
50 if deprecated:
51 bb.fatal('Deprecated variable(s) found: "%s". '
52 'Use do_image_<type>[depends] += "<recipe>:<task>" instead' % ', '.join(deprecated))
53
54 # Sort the set so that ordering is consistant
55 return " ".join(sorted(deps))
56
57XZ_COMPRESSION_LEVEL ?= "-9"
58XZ_INTEGRITY_CHECK ?= "crc32"
59
60ZIP_COMPRESSION_LEVEL ?= "-9"
61
62ZSTD_COMPRESSION_LEVEL ?= "-3"
63
64JFFS2_SUM_EXTRA_ARGS ?= ""
65IMAGE_CMD:jffs2 = "mkfs.jffs2 --root=${IMAGE_ROOTFS} --faketime --output=${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.jffs2 ${EXTRA_IMAGECMD}"
66
67IMAGE_CMD:cramfs = "mkfs.cramfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.cramfs ${EXTRA_IMAGECMD}"
68
69oe_mkext234fs () {
70 fstype=$1
71 extra_imagecmd=""
72
73 if [ $# -gt 1 ]; then
74 shift
75 extra_imagecmd=$@
76 fi
77
78 # If generating an empty image the size of the sparse block should be large
79 # enough to allocate an ext4 filesystem using 4096 bytes per inode, this is
80 # about 60K, so dd needs a minimum count of 60, with bs=1024 (bytes per IO)
81 eval local COUNT=\"0\"
82 eval local MIN_COUNT=\"60\"
83 if [ $ROOTFS_SIZE -lt $MIN_COUNT ]; then
84 eval COUNT=\"$MIN_COUNT\"
85 fi
86 # Create a sparse image block
87 bbdebug 1 Executing "dd if=/dev/zero of=${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.$fstype seek=$ROOTFS_SIZE count=$COUNT bs=1024"
88 dd if=/dev/zero of=${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.$fstype seek=$ROOTFS_SIZE count=$COUNT bs=1024
89 bbdebug 1 "Actual Rootfs size: `du -s ${IMAGE_ROOTFS}`"
90 bbdebug 1 "Actual Partition size: `stat -c '%s' ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.$fstype`"
91 bbdebug 1 Executing "mkfs.$fstype -F $extra_imagecmd ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.$fstype -d ${IMAGE_ROOTFS}"
92 mkfs.$fstype -F $extra_imagecmd ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.$fstype -d ${IMAGE_ROOTFS}
93 # Error codes 0-3 indicate successfull operation of fsck (no errors or errors corrected)
94 fsck.$fstype -pvfD ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.$fstype || [ $? -le 3 ]
95}
96
97IMAGE_CMD:ext2 = "oe_mkext234fs ext2 ${EXTRA_IMAGECMD}"
98IMAGE_CMD:ext3 = "oe_mkext234fs ext3 ${EXTRA_IMAGECMD}"
99IMAGE_CMD:ext4 = "oe_mkext234fs ext4 ${EXTRA_IMAGECMD}"
100
101MIN_BTRFS_SIZE ?= "16384"
102IMAGE_CMD:btrfs () {
103 size=${ROOTFS_SIZE}
104 if [ ${size} -lt ${MIN_BTRFS_SIZE} ] ; then
105 size=${MIN_BTRFS_SIZE}
106 bbwarn "Rootfs size is too small for BTRFS. Filesystem will be extended to ${size}K"
107 fi
108 dd if=/dev/zero of=${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.btrfs seek=${size} count=0 bs=1024
109 mkfs.btrfs ${EXTRA_IMAGECMD} -r ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.btrfs
110}
111
112IMAGE_CMD:squashfs = "mksquashfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.squashfs ${EXTRA_IMAGECMD} -noappend"
113IMAGE_CMD:squashfs-xz = "mksquashfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.squashfs-xz ${EXTRA_IMAGECMD} -noappend -comp xz"
114IMAGE_CMD:squashfs-lzo = "mksquashfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.squashfs-lzo ${EXTRA_IMAGECMD} -noappend -comp lzo"
115IMAGE_CMD:squashfs-lz4 = "mksquashfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.squashfs-lz4 ${EXTRA_IMAGECMD} -noappend -comp lz4"
116IMAGE_CMD:squashfs-zst = "mksquashfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.squashfs-zst ${EXTRA_IMAGECMD} -noappend -comp zstd"
117
118IMAGE_CMD:erofs = "mkfs.erofs ${EXTRA_IMAGECMD} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.erofs ${IMAGE_ROOTFS}"
119IMAGE_CMD:erofs-lz4 = "mkfs.erofs -zlz4 ${EXTRA_IMAGECMD} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.erofs-lz4 ${IMAGE_ROOTFS}"
120IMAGE_CMD:erofs-lz4hc = "mkfs.erofs -zlz4hc ${EXTRA_IMAGECMD} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.erofs-lz4hc ${IMAGE_ROOTFS}"
121
122
123IMAGE_CMD_TAR ?= "tar"
124# ignore return code 1 "file changed as we read it" as other tasks(e.g. do_image_wic) may be hardlinking rootfs
125IMAGE_CMD:tar = "${IMAGE_CMD_TAR} --sort=name --format=posix --numeric-owner -cf ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.tar -C ${IMAGE_ROOTFS} . || [ $? -eq 1 ]"
126
127do_image_cpio[cleandirs] += "${WORKDIR}/cpio_append"
128IMAGE_CMD:cpio () {
129 (cd ${IMAGE_ROOTFS} && find . | sort | cpio --reproducible -o -H newc >${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.cpio)
130 # We only need the /init symlink if we're building the real
131 # image. The -dbg image doesn't need it! By being clever
132 # about this we also avoid 'touch' below failing, as it
133 # might be trying to touch /sbin/init on the host since both
134 # the normal and the -dbg image share the same WORKDIR
135 if [ "${IMAGE_BUILDING_DEBUGFS}" != "true" ]; then
136 if [ ! -L ${IMAGE_ROOTFS}/init ] && [ ! -e ${IMAGE_ROOTFS}/init ]; then
137 if [ -L ${IMAGE_ROOTFS}/sbin/init ] || [ -e ${IMAGE_ROOTFS}/sbin/init ]; then
138 ln -sf /sbin/init ${WORKDIR}/cpio_append/init
139 else
140 touch ${WORKDIR}/cpio_append/init
141 fi
142 (cd ${WORKDIR}/cpio_append && echo ./init | cpio -oA -H newc -F ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.cpio)
143 fi
144 fi
145}
146
147UBI_VOLNAME ?= "${MACHINE}-rootfs"
148UBI_VOLTYPE ?= "dynamic"
149UBI_IMGTYPE ?= "ubifs"
150
151write_ubi_config() {
152 if [ -z "$1" ]; then
153 local vname=""
154 else
155 local vname="_$1"
156 fi
157
158 cat <<EOF > ubinize${vname}-${IMAGE_NAME}.cfg
159[ubifs]
160mode=ubi
161image=${IMGDEPLOYDIR}/${IMAGE_NAME}${vname}${IMAGE_NAME_SUFFIX}.${UBI_IMGTYPE}
162vol_id=0
163vol_type=${UBI_VOLTYPE}
164vol_name=${UBI_VOLNAME}
165vol_flags=autoresize
166EOF
167}
168
169multiubi_mkfs() {
170 local mkubifs_args="$1"
171 local ubinize_args="$2"
172
173 # Added prompt error message for ubi and ubifs image creation.
174 if [ -z "$mkubifs_args" ] || [ -z "$ubinize_args" ]; then
175 bbfatal "MKUBIFS_ARGS and UBINIZE_ARGS have to be set, see http://www.linux-mtd.infradead.org/faq/ubifs.html for details"
176 fi
177
178 write_ubi_config "$3"
179
180 if [ -n "$vname" ]; then
181 mkfs.ubifs -r ${IMAGE_ROOTFS} -o ${IMGDEPLOYDIR}/${IMAGE_NAME}${vname}${IMAGE_NAME_SUFFIX}.ubifs ${mkubifs_args}
182 fi
183 ubinize -o ${IMGDEPLOYDIR}/${IMAGE_NAME}${vname}${IMAGE_NAME_SUFFIX}.ubi ${ubinize_args} ubinize${vname}-${IMAGE_NAME}.cfg
184
185 # Cleanup cfg file
186 mv ubinize${vname}-${IMAGE_NAME}.cfg ${IMGDEPLOYDIR}/
187
188 # Create own symlinks for 'named' volumes
189 if [ -n "$vname" ]; then
190 cd ${IMGDEPLOYDIR}
191 if [ -e ${IMAGE_NAME}${vname}${IMAGE_NAME_SUFFIX}.ubifs ]; then
192 ln -sf ${IMAGE_NAME}${vname}${IMAGE_NAME_SUFFIX}.ubifs \
193 ${IMAGE_LINK_NAME}${vname}.ubifs
194 fi
195 if [ -e ${IMAGE_NAME}${vname}${IMAGE_NAME_SUFFIX}.ubi ]; then
196 ln -sf ${IMAGE_NAME}${vname}${IMAGE_NAME_SUFFIX}.ubi \
197 ${IMAGE_LINK_NAME}${vname}.ubi
198 fi
199 cd -
200 fi
201}
202
203IMAGE_CMD:multiubi () {
204 # Split MKUBIFS_ARGS_<name> and UBINIZE_ARGS_<name>
205 for name in ${MULTIUBI_BUILD}; do
206 eval local mkubifs_args=\"\$MKUBIFS_ARGS_${name}\"
207 eval local ubinize_args=\"\$UBINIZE_ARGS_${name}\"
208
209 multiubi_mkfs "${mkubifs_args}" "${ubinize_args}" "${name}"
210 done
211}
212
213IMAGE_CMD:ubi () {
214 multiubi_mkfs "${MKUBIFS_ARGS}" "${UBINIZE_ARGS}"
215}
216IMAGE_TYPEDEP:ubi = "${UBI_IMGTYPE}"
217
218IMAGE_CMD:ubifs = "mkfs.ubifs -r ${IMAGE_ROOTFS} -o ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.ubifs ${MKUBIFS_ARGS}"
219
220MIN_F2FS_SIZE ?= "524288"
221IMAGE_CMD:f2fs () {
222 # We need to add additional smarts here form devices smaller than 1.5G
223 # Need to scale appropriately between 40M -> 1.5G as the "overprovision
224 # ratio" goes down as the device gets bigger (70% -> 4.5%), below about
225 # 500M the standard IMAGE_OVERHEAD_FACTOR does not work, so add additional
226 # space here when under 500M
227 size=${ROOTFS_SIZE}
228 if [ ${size} -lt ${MIN_F2FS_SIZE} ] ; then
229 size=${MIN_F2FS_SIZE}
230 bbwarn "Rootfs size is too small for F2FS. Filesystem will be extended to ${size}K"
231 fi
232 dd if=/dev/zero of=${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.f2fs seek=${size} count=0 bs=1024
233 mkfs.f2fs ${EXTRA_IMAGECMD} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.f2fs
234 sload.f2fs -f ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.f2fs
235}
236
237EXTRA_IMAGECMD = ""
238
239inherit siteinfo kernel-arch image-artifact-names
240
241JFFS2_ENDIANNESS ?= "${@oe.utils.conditional('SITEINFO_ENDIANNESS', 'le', '-l', '-b', d)}"
242JFFS2_ERASEBLOCK ?= "0x40000"
243EXTRA_IMAGECMD:jffs2 ?= "--pad ${JFFS2_ENDIANNESS} --eraseblock=${JFFS2_ERASEBLOCK} --no-cleanmarkers"
244
245# Change these if you want default mkfs behavior (i.e. create minimal inode number)
246EXTRA_IMAGECMD:ext2 ?= "-i 4096"
247EXTRA_IMAGECMD:ext3 ?= "-i 4096"
248EXTRA_IMAGECMD:ext4 ?= "-i 4096"
249EXTRA_IMAGECMD:btrfs ?= "-n 4096 --shrink"
250EXTRA_IMAGECMD:f2fs ?= ""
251
252do_image_cpio[depends] += "cpio-native:do_populate_sysroot"
253do_image_jffs2[depends] += "mtd-utils-native:do_populate_sysroot"
254do_image_cramfs[depends] += "util-linux-native:do_populate_sysroot"
255do_image_ext2[depends] += "e2fsprogs-native:do_populate_sysroot"
256do_image_ext3[depends] += "e2fsprogs-native:do_populate_sysroot"
257do_image_ext4[depends] += "e2fsprogs-native:do_populate_sysroot"
258do_image_btrfs[depends] += "btrfs-tools-native:do_populate_sysroot"
259do_image_squashfs[depends] += "squashfs-tools-native:do_populate_sysroot"
260do_image_squashfs_xz[depends] += "squashfs-tools-native:do_populate_sysroot"
261do_image_squashfs_lzo[depends] += "squashfs-tools-native:do_populate_sysroot"
262do_image_squashfs_lz4[depends] += "squashfs-tools-native:do_populate_sysroot"
263do_image_squashfs_zst[depends] += "squashfs-tools-native:do_populate_sysroot"
264do_image_ubi[depends] += "mtd-utils-native:do_populate_sysroot"
265do_image_ubifs[depends] += "mtd-utils-native:do_populate_sysroot"
266do_image_multiubi[depends] += "mtd-utils-native:do_populate_sysroot"
267do_image_f2fs[depends] += "f2fs-tools-native:do_populate_sysroot"
268do_image_erofs[depends] += "erofs-utils-native:do_populate_sysroot"
269do_image_erofs_lz4[depends] += "erofs-utils-native:do_populate_sysroot"
270do_image_erofs_lz4hc[depends] += "erofs-utils-native:do_populate_sysroot"
271
272# This variable is available to request which values are suitable for IMAGE_FSTYPES
273IMAGE_TYPES = " \
274 jffs2 jffs2.sum \
275 cramfs \
276 ext2 ext2.gz ext2.bz2 ext2.lzma \
277 ext3 ext3.gz \
278 ext4 ext4.gz \
279 btrfs \
280 squashfs squashfs-xz squashfs-lzo squashfs-lz4 squashfs-zst \
281 ubi ubifs multiubi \
282 tar tar.gz tar.bz2 tar.xz tar.lz4 tar.zst \
283 cpio cpio.gz cpio.xz cpio.lzma cpio.lz4 cpio.zst \
284 wic wic.gz wic.bz2 wic.lzma wic.zst \
285 container \
286 f2fs \
287 erofs erofs-lz4 erofs-lz4hc \
288"
289# These image types are x86 specific as they need syslinux
290IMAGE_TYPES:append:x86 = " hddimg iso"
291IMAGE_TYPES:append:x86-64 = " hddimg iso"
292
293# Compression is a special case of conversion. The old variable
294# names are still supported for backward-compatibility. When defining
295# new compression or conversion commands, use CONVERSIONTYPES and
296# CONVERSION_CMD/DEPENDS.
297COMPRESSIONTYPES ?= ""
298
299CONVERSIONTYPES = "gz bz2 lzma xz lz4 lzo zip zst sum md5sum sha1sum sha224sum sha256sum sha384sum sha512sum bmap u-boot vmdk vhd vhdx vdi qcow2 base64 gzsync zsync ${COMPRESSIONTYPES}"
300CONVERSION_CMD:lzma = "lzma -k -f -7 ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
301CONVERSION_CMD:gz = "gzip -f -9 -n -c --rsyncable ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.gz"
302CONVERSION_CMD:bz2 = "pbzip2 -f -k ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
303CONVERSION_CMD:xz = "xz -f -k -c ${XZ_COMPRESSION_LEVEL} ${XZ_DEFAULTS} --check=${XZ_INTEGRITY_CHECK} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.xz"
304CONVERSION_CMD:lz4 = "lz4 -9 -z -l ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.lz4"
305CONVERSION_CMD:lzo = "lzop -9 ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
306CONVERSION_CMD:zip = "zip ${ZIP_COMPRESSION_LEVEL} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.zip ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
307CONVERSION_CMD:zst = "zstd -f -k -T0 -c ${ZSTD_COMPRESSION_LEVEL} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.zst"
308CONVERSION_CMD:sum = "sumtool -i ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} -o ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sum ${JFFS2_SUM_EXTRA_ARGS}"
309CONVERSION_CMD:md5sum = "md5sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.md5sum"
310CONVERSION_CMD:sha1sum = "sha1sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha1sum"
311CONVERSION_CMD:sha224sum = "sha224sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha224sum"
312CONVERSION_CMD:sha256sum = "sha256sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha256sum"
313CONVERSION_CMD:sha384sum = "sha384sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha384sum"
314CONVERSION_CMD:sha512sum = "sha512sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha512sum"
315CONVERSION_CMD:bmap = "bmaptool create ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} -o ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.bmap"
316CONVERSION_CMD:u-boot = "mkimage -A ${UBOOT_ARCH} -O linux -T ramdisk -C none -n ${IMAGE_NAME} -d ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.u-boot"
317CONVERSION_CMD:vmdk = "qemu-img convert -O vmdk ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.vmdk"
318CONVERSION_CMD:vhdx = "qemu-img convert -O vhdx -o subformat=dynamic ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.vhdx"
319CONVERSION_CMD:vhd = "qemu-img convert -O vpc -o subformat=fixed ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.vhd"
320CONVERSION_CMD:vdi = "qemu-img convert -O vdi ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.vdi"
321CONVERSION_CMD:qcow2 = "qemu-img convert -O qcow2 ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.qcow2"
322CONVERSION_CMD:base64 = "base64 ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.base64"
323CONVERSION_CMD:zsync = "zsyncmake_curl ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
324CONVERSION_CMD:gzsync = "zsyncmake_curl -z ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
325CONVERSION_DEPENDS_lzma = "xz-native"
326CONVERSION_DEPENDS_gz = "pigz-native"
327CONVERSION_DEPENDS_bz2 = "pbzip2-native"
328CONVERSION_DEPENDS_xz = "xz-native"
329CONVERSION_DEPENDS_lz4 = "lz4-native"
330CONVERSION_DEPENDS_lzo = "lzop-native"
331CONVERSION_DEPENDS_zip = "zip-native"
332CONVERSION_DEPENDS_zst = "zstd-native"
333CONVERSION_DEPENDS_sum = "mtd-utils-native"
334CONVERSION_DEPENDS_bmap = "bmap-tools-native"
335CONVERSION_DEPENDS_u-boot = "u-boot-tools-native"
336CONVERSION_DEPENDS_vmdk = "qemu-system-native"
337CONVERSION_DEPENDS_vdi = "qemu-system-native"
338CONVERSION_DEPENDS_qcow2 = "qemu-system-native"
339CONVERSION_DEPENDS_base64 = "coreutils-native"
340CONVERSION_DEPENDS_vhdx = "qemu-system-native"
341CONVERSION_DEPENDS_vhd = "qemu-system-native"
342CONVERSION_DEPENDS_zsync = "zsync-curl-native"
343CONVERSION_DEPENDS_gzsync = "zsync-curl-native"
344
345RUNNABLE_IMAGE_TYPES ?= "ext2 ext3 ext4"
346RUNNABLE_MACHINE_PATTERNS ?= "qemu"
347
348DEPLOYABLE_IMAGE_TYPES ?= "hddimg iso"
349
350# The IMAGE_TYPES_MASKED variable will be used to mask out from the IMAGE_FSTYPES,
351# images that will not be built at do_rootfs time: vmdk, vhd, vhdx, vdi, qcow2, hddimg, iso, etc.
352IMAGE_TYPES_MASKED ?= ""
353
354# bmap requires python3 to be in the PATH
355EXTRANATIVEPATH += "${@'python3-native' if d.getVar('IMAGE_FSTYPES').find('.bmap') else ''}"
diff --git a/meta/classes/image_types_wic.bbclass b/meta/classes/image_types_wic.bbclass
deleted file mode 100644
index c339b9bdfb..0000000000
--- a/meta/classes/image_types_wic.bbclass
+++ /dev/null
@@ -1,190 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# The WICVARS variable is used to define list of bitbake variables used in wic code
8# variables from this list is written to <image>.env file
9WICVARS ?= "\
10 APPEND \
11 ASSUME_PROVIDED \
12 BBLAYERS \
13 DEPLOY_DIR_IMAGE \
14 FAKEROOTCMD \
15 HOSTTOOLS_DIR \
16 IMAGE_BASENAME \
17 IMAGE_BOOT_FILES \
18 IMAGE_EFI_BOOT_FILES \
19 IMAGE_LINK_NAME \
20 IMAGE_ROOTFS \
21 IMGDEPLOYDIR \
22 INITRAMFS_FSTYPES \
23 INITRAMFS_IMAGE \
24 INITRAMFS_IMAGE_BUNDLE \
25 INITRAMFS_LINK_NAME \
26 INITRD \
27 INITRD_LIVE \
28 ISODIR \
29 KERNEL_IMAGETYPE \
30 MACHINE \
31 PSEUDO_IGNORE_PATHS \
32 RECIPE_SYSROOT_NATIVE \
33 ROOTFS_SIZE \
34 STAGING_DATADIR \
35 STAGING_DIR \
36 STAGING_DIR_HOST \
37 STAGING_LIBDIR \
38 TARGET_SYS \
39"
40
41inherit ${@bb.utils.contains('INITRAMFS_IMAGE_BUNDLE', '1', 'kernel-artifact-names', '', d)}
42
43WKS_FILE ??= "${IMAGE_BASENAME}.${MACHINE}.wks"
44WKS_FILES ?= "${WKS_FILE} ${IMAGE_BASENAME}.wks"
45WKS_SEARCH_PATH ?= "${THISDIR}:${@':'.join('%s/wic' % p for p in '${BBPATH}'.split(':'))}:${@':'.join('%s/scripts/lib/wic/canned-wks' % l for l in '${BBPATH}:${COREBASE}'.split(':'))}"
46WKS_FULL_PATH = "${@wks_search(d.getVar('WKS_FILES').split(), d.getVar('WKS_SEARCH_PATH')) or ''}"
47
48def wks_search(files, search_path):
49 for f in files:
50 if os.path.isabs(f):
51 if os.path.exists(f):
52 return f
53 else:
54 searched = bb.utils.which(search_path, f)
55 if searched:
56 return searched
57
58WIC_CREATE_EXTRA_ARGS ?= ""
59
60IMAGE_CMD:wic () {
61 out="${IMGDEPLOYDIR}/${IMAGE_NAME}"
62 build_wic="${WORKDIR}/build-wic"
63 tmp_wic="${WORKDIR}/tmp-wic"
64 wks="${WKS_FULL_PATH}"
65 if [ -e "$tmp_wic" ]; then
66 # Ensure we don't have any junk leftover from a previously interrupted
67 # do_image_wic execution
68 rm -rf "$tmp_wic"
69 fi
70 if [ -z "$wks" ]; then
71 bbfatal "No kickstart files from WKS_FILES were found: ${WKS_FILES}. Please set WKS_FILE or WKS_FILES appropriately."
72 fi
73 BUILDDIR="${TOPDIR}" PSEUDO_UNLOAD=1 wic create "$wks" --vars "${STAGING_DIR}/${MACHINE}/imgdata/" -e "${IMAGE_BASENAME}" -o "$build_wic/" -w "$tmp_wic" ${WIC_CREATE_EXTRA_ARGS}
74 mv "$build_wic/$(basename "${wks%.wks}")"*.direct "$out${IMAGE_NAME_SUFFIX}.wic"
75}
76IMAGE_CMD:wic[vardepsexclude] = "WKS_FULL_PATH WKS_FILES TOPDIR"
77do_image_wic[cleandirs] = "${WORKDIR}/build-wic"
78
79PSEUDO_IGNORE_PATHS .= ",${WORKDIR}/build-wic"
80
81# Rebuild when the wks file or vars in WICVARS change
82USING_WIC = "${@bb.utils.contains_any('IMAGE_FSTYPES', 'wic ' + ' '.join('wic.%s' % c for c in '${CONVERSIONTYPES}'.split()), '1', '', d)}"
83WKS_FILE_CHECKSUM = "${@'${WKS_FULL_PATH}:%s' % os.path.exists('${WKS_FULL_PATH}') if '${USING_WIC}' else ''}"
84do_image_wic[file-checksums] += "${WKS_FILE_CHECKSUM}"
85do_image_wic[depends] += "${@' '.join('%s-native:do_populate_sysroot' % r for r in ('parted', 'gptfdisk', 'dosfstools', 'mtools'))}"
86
87# We ensure all artfacts are deployed (e.g virtual/bootloader)
88do_image_wic[recrdeptask] += "do_deploy"
89do_image_wic[deptask] += "do_image_complete"
90
91WKS_FILE_DEPENDS_DEFAULT = '${@bb.utils.contains_any("BUILD_ARCH", [ 'x86_64', 'i686' ], "syslinux-native", "",d)}'
92WKS_FILE_DEPENDS_DEFAULT += "bmap-tools-native cdrtools-native btrfs-tools-native squashfs-tools-native e2fsprogs-native erofs-utils-native"
93# Unified kernel images need objcopy
94WKS_FILE_DEPENDS_DEFAULT += "virtual/${MLPREFIX}${TARGET_PREFIX}binutils"
95WKS_FILE_DEPENDS_BOOTLOADERS = ""
96WKS_FILE_DEPENDS_BOOTLOADERS:x86 = "syslinux grub-efi systemd-boot os-release"
97WKS_FILE_DEPENDS_BOOTLOADERS:x86-64 = "syslinux grub-efi systemd-boot os-release"
98WKS_FILE_DEPENDS_BOOTLOADERS:x86-x32 = "syslinux grub-efi"
99
100WKS_FILE_DEPENDS ??= "${WKS_FILE_DEPENDS_DEFAULT} ${WKS_FILE_DEPENDS_BOOTLOADERS}"
101
102DEPENDS += "${@ '${WKS_FILE_DEPENDS}' if d.getVar('USING_WIC') else '' }"
103
104python do_write_wks_template () {
105 """Write out expanded template contents to WKS_FULL_PATH."""
106 import re
107
108 template_body = d.getVar('_WKS_TEMPLATE')
109
110 # Remove any remnant variable references left behind by the expansion
111 # due to undefined variables
112 expand_var_regexp = re.compile(r"\${[^{}@\n\t :]+}")
113 while True:
114 new_body = re.sub(expand_var_regexp, '', template_body)
115 if new_body == template_body:
116 break
117 else:
118 template_body = new_body
119
120 wks_file = d.getVar('WKS_FULL_PATH')
121 with open(wks_file, 'w') as f:
122 f.write(template_body)
123 f.close()
124 # Copy the finalized wks file to the deploy directory for later use
125 depdir = d.getVar('IMGDEPLOYDIR')
126 basename = d.getVar('IMAGE_BASENAME')
127 bb.utils.copyfile(wks_file, "%s/%s" % (depdir, basename + '-' + os.path.basename(wks_file)))
128}
129
130do_flush_pseudodb() {
131 ${FAKEROOTENV} ${FAKEROOTCMD} -S
132}
133
134python () {
135 if d.getVar('USING_WIC'):
136 wks_file_u = d.getVar('WKS_FULL_PATH', False)
137 wks_file = d.expand(wks_file_u)
138 base, ext = os.path.splitext(wks_file)
139 if ext == '.in' and os.path.exists(wks_file):
140 wks_out_file = os.path.join(d.getVar('WORKDIR'), os.path.basename(base))
141 d.setVar('WKS_FULL_PATH', wks_out_file)
142 d.setVar('WKS_TEMPLATE_PATH', wks_file_u)
143 d.setVar('WKS_FILE_CHECKSUM', '${WKS_TEMPLATE_PATH}:True')
144
145 # We need to re-parse each time the file changes, and bitbake
146 # needs to be told about that explicitly.
147 bb.parse.mark_dependency(d, wks_file)
148
149 try:
150 with open(wks_file, 'r') as f:
151 body = f.read()
152 except (IOError, OSError) as exc:
153 pass
154 else:
155 # Previously, I used expandWithRefs to get the dependency list
156 # and add it to WICVARS, but there's no point re-parsing the
157 # file in process_wks_template as well, so just put it in
158 # a variable and let the metadata deal with the deps.
159 d.setVar('_WKS_TEMPLATE', body)
160 bb.build.addtask('do_write_wks_template', 'do_image_wic', 'do_image', d)
161 bb.build.addtask('do_image_wic', 'do_image_complete', None, d)
162}
163
164#
165# Write environment variables used by wic
166# to tmp/sysroots/<machine>/imgdata/<image>.env
167#
168python do_rootfs_wicenv () {
169 wicvars = d.getVar('WICVARS')
170 if not wicvars:
171 return
172
173 stdir = d.getVar('STAGING_DIR')
174 outdir = os.path.join(stdir, d.getVar('MACHINE'), 'imgdata')
175 bb.utils.mkdirhier(outdir)
176 basename = d.getVar('IMAGE_BASENAME')
177 with open(os.path.join(outdir, basename) + '.env', 'w') as envf:
178 for var in wicvars.split():
179 value = d.getVar(var)
180 if value:
181 envf.write('%s="%s"\n' % (var, value.strip()))
182 envf.close()
183 # Copy .env file to deploy directory for later use with stand alone wic
184 depdir = d.getVar('IMGDEPLOYDIR')
185 bb.utils.copyfile(os.path.join(outdir, basename) + '.env', os.path.join(depdir, basename) + '.env')
186}
187addtask do_flush_pseudodb after do_rootfs before do_image do_image_qa
188addtask do_rootfs_wicenv after do_image before do_image_wic
189do_rootfs_wicenv[vardeps] += "${WICVARS}"
190do_rootfs_wicenv[prefuncs] = 'set_image_size'
diff --git a/meta/classes/insane.bbclass b/meta/classes/insane.bbclass
deleted file mode 100644
index 46ea41e271..0000000000
--- a/meta/classes/insane.bbclass
+++ /dev/null
@@ -1,1453 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# BB Class inspired by ebuild.sh
8#
9# This class will test files after installation for certain
10# security issues and other kind of issues.
11#
12# Checks we do:
13# -Check the ownership and permissions
14# -Check the RUNTIME path for the $TMPDIR
15# -Check if .la files wrongly point to workdir
16# -Check if .pc files wrongly point to workdir
17# -Check if packages contains .debug directories or .so files
18# where they should be in -dev or -dbg
19# -Check if config.log contains traces to broken autoconf tests
20# -Check invalid characters (non-utf8) on some package metadata
21# -Ensure that binaries in base_[bindir|sbindir|libdir] do not link
22# into exec_prefix
23# -Check that scripts in base_[bindir|sbindir|libdir] do not reference
24# files under exec_prefix
25# -Check if the package name is upper case
26
27# Elect whether a given type of error is a warning or error, they may
28# have been set by other files.
29WARN_QA ?= " libdir xorg-driver-abi buildpaths \
30 textrel incompatible-license files-invalid \
31 infodir build-deps src-uri-bad symlink-to-sysroot multilib \
32 invalid-packageconfig host-user-contaminated uppercase-pn patch-fuzz \
33 mime mime-xdg unlisted-pkg-lics unhandled-features-check \
34 missing-update-alternatives native-last missing-ptest \
35 license-exists license-no-generic license-syntax license-format \
36 license-incompatible license-file-missing obsolete-license \
37 "
38ERROR_QA ?= "dev-so debug-deps dev-deps debug-files arch pkgconfig la \
39 perms dep-cmp pkgvarcheck perm-config perm-line perm-link \
40 split-strip packages-list pkgv-undefined var-undefined \
41 version-going-backwards expanded-d invalid-chars \
42 license-checksum dev-elf file-rdeps configure-unsafe \
43 configure-gettext perllocalpod shebang-size \
44 already-stripped installed-vs-shipped ldflags compile-host-path \
45 install-host-path pn-overrides unknown-configure-option \
46 useless-rpaths rpaths staticdev empty-dirs \
47 "
48# Add usrmerge QA check based on distro feature
49ERROR_QA:append = "${@bb.utils.contains('DISTRO_FEATURES', 'usrmerge', ' usrmerge', '', d)}"
50
51FAKEROOT_QA = "host-user-contaminated"
52FAKEROOT_QA[doc] = "QA tests which need to run under fakeroot. If any \
53enabled tests are listed here, the do_package_qa task will run under fakeroot."
54
55ALL_QA = "${WARN_QA} ${ERROR_QA}"
56
57UNKNOWN_CONFIGURE_OPT_IGNORE ?= "--enable-nls --disable-nls --disable-silent-rules --disable-dependency-tracking --with-libtool-sysroot --disable-static"
58
59# This is a list of directories that are expected to be empty.
60QA_EMPTY_DIRS ?= " \
61 /dev/pts \
62 /media \
63 /proc \
64 /run \
65 /tmp \
66 ${localstatedir}/run \
67 ${localstatedir}/volatile \
68"
69# It is possible to specify why a directory is expected to be empty by defining
70# QA_EMPTY_DIRS_RECOMMENDATION:<path>, which will then be included in the error
71# message if the directory is not empty. If it is not specified for a directory,
72# then "but it is expected to be empty" will be used.
73
74def package_qa_clean_path(path, d, pkg=None):
75 """
76 Remove redundant paths from the path for display. If pkg isn't set then
77 TMPDIR is stripped, otherwise PKGDEST/pkg is stripped.
78 """
79 if pkg:
80 path = path.replace(os.path.join(d.getVar("PKGDEST"), pkg), "/")
81 return path.replace(d.getVar("TMPDIR"), "/").replace("//", "/")
82
83QAPATHTEST[shebang-size] = "package_qa_check_shebang_size"
84def package_qa_check_shebang_size(path, name, d, elf, messages):
85 import stat
86 if os.path.islink(path) or stat.S_ISFIFO(os.stat(path).st_mode) or elf:
87 return
88
89 try:
90 with open(path, 'rb') as f:
91 stanza = f.readline(130)
92 except IOError:
93 return
94
95 if stanza.startswith(b'#!'):
96 #Shebang not found
97 try:
98 stanza = stanza.decode("utf-8")
99 except UnicodeDecodeError:
100 #If it is not a text file, it is not a script
101 return
102
103 if len(stanza) > 129:
104 oe.qa.add_message(messages, "shebang-size", "%s: %s maximum shebang size exceeded, the maximum size is 128." % (name, package_qa_clean_path(path, d)))
105 return
106
107QAPATHTEST[libexec] = "package_qa_check_libexec"
108def package_qa_check_libexec(path,name, d, elf, messages):
109
110 # Skip the case where the default is explicitly /usr/libexec
111 libexec = d.getVar('libexecdir')
112 if libexec == "/usr/libexec":
113 return True
114
115 if 'libexec' in path.split(os.path.sep):
116 oe.qa.add_message(messages, "libexec", "%s: %s is using libexec please relocate to %s" % (name, package_qa_clean_path(path, d), libexec))
117 return False
118
119 return True
120
121QAPATHTEST[rpaths] = "package_qa_check_rpath"
122def package_qa_check_rpath(file,name, d, elf, messages):
123 """
124 Check for dangerous RPATHs
125 """
126 if not elf:
127 return
128
129 if os.path.islink(file):
130 return
131
132 bad_dirs = [d.getVar('BASE_WORKDIR'), d.getVar('STAGING_DIR_TARGET')]
133
134 phdrs = elf.run_objdump("-p", d)
135
136 import re
137 rpath_re = re.compile(r"\s+RPATH\s+(.*)")
138 for line in phdrs.split("\n"):
139 m = rpath_re.match(line)
140 if m:
141 rpath = m.group(1)
142 for dir in bad_dirs:
143 if dir in rpath:
144 oe.qa.add_message(messages, "rpaths", "package %s contains bad RPATH %s in file %s" % (name, rpath, file))
145
146QAPATHTEST[useless-rpaths] = "package_qa_check_useless_rpaths"
147def package_qa_check_useless_rpaths(file, name, d, elf, messages):
148 """
149 Check for RPATHs that are useless but not dangerous
150 """
151 def rpath_eq(a, b):
152 return os.path.normpath(a) == os.path.normpath(b)
153
154 if not elf:
155 return
156
157 if os.path.islink(file):
158 return
159
160 libdir = d.getVar("libdir")
161 base_libdir = d.getVar("base_libdir")
162
163 phdrs = elf.run_objdump("-p", d)
164
165 import re
166 rpath_re = re.compile(r"\s+RPATH\s+(.*)")
167 for line in phdrs.split("\n"):
168 m = rpath_re.match(line)
169 if m:
170 rpath = m.group(1)
171 if rpath_eq(rpath, libdir) or rpath_eq(rpath, base_libdir):
172 # The dynamic linker searches both these places anyway. There is no point in
173 # looking there again.
174 oe.qa.add_message(messages, "useless-rpaths", "%s: %s contains probably-redundant RPATH %s" % (name, package_qa_clean_path(file, d, name), rpath))
175
176QAPATHTEST[dev-so] = "package_qa_check_dev"
177def package_qa_check_dev(path, name, d, elf, messages):
178 """
179 Check for ".so" library symlinks in non-dev packages
180 """
181
182 if not name.endswith("-dev") and not name.endswith("-dbg") and not name.endswith("-ptest") and not name.startswith("nativesdk-") and path.endswith(".so") and os.path.islink(path):
183 oe.qa.add_message(messages, "dev-so", "non -dev/-dbg/nativesdk- package %s contains symlink .so '%s'" % \
184 (name, package_qa_clean_path(path, d, name)))
185
186QAPATHTEST[dev-elf] = "package_qa_check_dev_elf"
187def package_qa_check_dev_elf(path, name, d, elf, messages):
188 """
189 Check that -dev doesn't contain real shared libraries. The test has to
190 check that the file is not a link and is an ELF object as some recipes
191 install link-time .so files that are linker scripts.
192 """
193 if name.endswith("-dev") and path.endswith(".so") and not os.path.islink(path) and elf:
194 oe.qa.add_message(messages, "dev-elf", "-dev package %s contains non-symlink .so '%s'" % \
195 (name, package_qa_clean_path(path, d, name)))
196
197QAPATHTEST[staticdev] = "package_qa_check_staticdev"
198def package_qa_check_staticdev(path, name, d, elf, messages):
199 """
200 Check for ".a" library in non-staticdev packages
201 There are a number of exceptions to this rule, -pic packages can contain
202 static libraries, the _nonshared.a belong with their -dev packages and
203 libgcc.a, libgcov.a will be skipped in their packages
204 """
205
206 if not name.endswith("-pic") and not name.endswith("-staticdev") and not name.endswith("-ptest") and path.endswith(".a") and not path.endswith("_nonshared.a") and not '/usr/lib/debug-static/' in path and not '/.debug-static/' in path:
207 oe.qa.add_message(messages, "staticdev", "non -staticdev package contains static .a library: %s path '%s'" % \
208 (name, package_qa_clean_path(path,d, name)))
209
210QAPATHTEST[mime] = "package_qa_check_mime"
211def package_qa_check_mime(path, name, d, elf, messages):
212 """
213 Check if package installs mime types to /usr/share/mime/packages
214 while no inheriting mime.bbclass
215 """
216
217 if d.getVar("datadir") + "/mime/packages" in path and path.endswith('.xml') and not bb.data.inherits_class("mime", d):
218 oe.qa.add_message(messages, "mime", "package contains mime types but does not inherit mime: %s path '%s'" % \
219 (name, package_qa_clean_path(path,d)))
220
221QAPATHTEST[mime-xdg] = "package_qa_check_mime_xdg"
222def package_qa_check_mime_xdg(path, name, d, elf, messages):
223 """
224 Check if package installs desktop file containing MimeType and requires
225 mime-types.bbclass to create /usr/share/applications/mimeinfo.cache
226 """
227
228 if d.getVar("datadir") + "/applications" in path and path.endswith('.desktop') and not bb.data.inherits_class("mime-xdg", d):
229 mime_type_found = False
230 try:
231 with open(path, 'r') as f:
232 for line in f.read().split('\n'):
233 if 'MimeType' in line:
234 mime_type_found = True
235 break;
236 except:
237 # At least libreoffice installs symlinks with absolute paths that are dangling here.
238 # We could implement some magic but for few (one) recipes it is not worth the effort so just warn:
239 wstr = "%s cannot open %s - is it a symlink with absolute path?\n" % (name, package_qa_clean_path(path,d))
240 wstr += "Please check if (linked) file contains key 'MimeType'.\n"
241 pkgname = name
242 if name == d.getVar('PN'):
243 pkgname = '${PN}'
244 wstr += "If yes: add \'inhert mime-xdg\' and \'MIME_XDG_PACKAGES += \"%s\"\' / if no add \'INSANE_SKIP:%s += \"mime-xdg\"\' to recipe." % (pkgname, pkgname)
245 oe.qa.add_message(messages, "mime-xdg", wstr)
246 if mime_type_found:
247 oe.qa.add_message(messages, "mime-xdg", "package contains desktop file with key 'MimeType' but does not inhert mime-xdg: %s path '%s'" % \
248 (name, package_qa_clean_path(path,d)))
249
250def package_qa_check_libdir(d):
251 """
252 Check for wrong library installation paths. For instance, catch
253 recipes installing /lib/bar.so when ${base_libdir}="lib32" or
254 installing in /usr/lib64 when ${libdir}="/usr/lib"
255 """
256 import re
257
258 pkgdest = d.getVar('PKGDEST')
259 base_libdir = d.getVar("base_libdir") + os.sep
260 libdir = d.getVar("libdir") + os.sep
261 libexecdir = d.getVar("libexecdir") + os.sep
262 exec_prefix = d.getVar("exec_prefix") + os.sep
263
264 messages = []
265
266 # The re's are purposely fuzzy, as some there are some .so.x.y.z files
267 # that don't follow the standard naming convention. It checks later
268 # that they are actual ELF files
269 lib_re = re.compile(r"^/lib.+\.so(\..+)?$")
270 exec_re = re.compile(r"^%s.*/lib.+\.so(\..+)?$" % exec_prefix)
271
272 for root, dirs, files in os.walk(pkgdest):
273 if root == pkgdest:
274 # Skip subdirectories for any packages with libdir in INSANE_SKIP
275 skippackages = []
276 for package in dirs:
277 if 'libdir' in (d.getVar('INSANE_SKIP:' + package) or "").split():
278 bb.note("Package %s skipping libdir QA test" % (package))
279 skippackages.append(package)
280 elif d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-file-directory' and package.endswith("-dbg"):
281 bb.note("Package %s skipping libdir QA test for PACKAGE_DEBUG_SPLIT_STYLE equals debug-file-directory" % (package))
282 skippackages.append(package)
283 for package in skippackages:
284 dirs.remove(package)
285 for file in files:
286 full_path = os.path.join(root, file)
287 rel_path = os.path.relpath(full_path, pkgdest)
288 if os.sep in rel_path:
289 package, rel_path = rel_path.split(os.sep, 1)
290 rel_path = os.sep + rel_path
291 if lib_re.match(rel_path):
292 if base_libdir not in rel_path:
293 # make sure it's an actual ELF file
294 elf = oe.qa.ELFFile(full_path)
295 try:
296 elf.open()
297 messages.append("%s: found library in wrong location: %s" % (package, rel_path))
298 except (oe.qa.NotELFFileError):
299 pass
300 if exec_re.match(rel_path):
301 if libdir not in rel_path and libexecdir not in rel_path:
302 # make sure it's an actual ELF file
303 elf = oe.qa.ELFFile(full_path)
304 try:
305 elf.open()
306 messages.append("%s: found library in wrong location: %s" % (package, rel_path))
307 except (oe.qa.NotELFFileError):
308 pass
309
310 if messages:
311 oe.qa.handle_error("libdir", "\n".join(messages), d)
312
313QAPATHTEST[debug-files] = "package_qa_check_dbg"
314def package_qa_check_dbg(path, name, d, elf, messages):
315 """
316 Check for ".debug" files or directories outside of the dbg package
317 """
318
319 if not "-dbg" in name and not "-ptest" in name:
320 if '.debug' in path.split(os.path.sep):
321 oe.qa.add_message(messages, "debug-files", "non debug package contains .debug directory: %s path %s" % \
322 (name, package_qa_clean_path(path,d)))
323
324QAPATHTEST[arch] = "package_qa_check_arch"
325def package_qa_check_arch(path,name,d, elf, messages):
326 """
327 Check if archs are compatible
328 """
329 import re, oe.elf
330
331 if not elf:
332 return
333
334 target_os = d.getVar('HOST_OS')
335 target_arch = d.getVar('HOST_ARCH')
336 provides = d.getVar('PROVIDES')
337 bpn = d.getVar('BPN')
338
339 if target_arch == "allarch":
340 pn = d.getVar('PN')
341 oe.qa.add_message(messages, "arch", pn + ": Recipe inherits the allarch class, but has packaged architecture-specific binaries")
342 return
343
344 # FIXME: Cross package confuse this check, so just skip them
345 for s in ['cross', 'nativesdk', 'cross-canadian']:
346 if bb.data.inherits_class(s, d):
347 return
348
349 # avoid following links to /usr/bin (e.g. on udev builds)
350 # we will check the files pointed to anyway...
351 if os.path.islink(path):
352 return
353
354 #if this will throw an exception, then fix the dict above
355 (machine, osabi, abiversion, littleendian, bits) \
356 = oe.elf.machine_dict(d)[target_os][target_arch]
357
358 # Check the architecture and endiannes of the binary
359 is_32 = (("virtual/kernel" in provides) or bb.data.inherits_class("module", d)) and \
360 (target_os == "linux-gnux32" or target_os == "linux-muslx32" or \
361 target_os == "linux-gnu_ilp32" or re.match(r'mips64.*32', d.getVar('DEFAULTTUNE')))
362 is_bpf = (oe.qa.elf_machine_to_string(elf.machine()) == "BPF")
363 if not ((machine == elf.machine()) or is_32 or is_bpf):
364 oe.qa.add_message(messages, "arch", "Architecture did not match (%s, expected %s) in %s" % \
365 (oe.qa.elf_machine_to_string(elf.machine()), oe.qa.elf_machine_to_string(machine), package_qa_clean_path(path, d, name)))
366 elif not ((bits == elf.abiSize()) or is_32 or is_bpf):
367 oe.qa.add_message(messages, "arch", "Bit size did not match (%d, expected %d) in %s" % \
368 (elf.abiSize(), bits, package_qa_clean_path(path, d, name)))
369 elif not ((littleendian == elf.isLittleEndian()) or is_bpf):
370 oe.qa.add_message(messages, "arch", "Endiannes did not match (%d, expected %d) in %s" % \
371 (elf.isLittleEndian(), littleendian, package_qa_clean_path(path,d, name)))
372
373QAPATHTEST[desktop] = "package_qa_check_desktop"
374def package_qa_check_desktop(path, name, d, elf, messages):
375 """
376 Run all desktop files through desktop-file-validate.
377 """
378 if path.endswith(".desktop"):
379 desktop_file_validate = os.path.join(d.getVar('STAGING_BINDIR_NATIVE'),'desktop-file-validate')
380 output = os.popen("%s %s" % (desktop_file_validate, path))
381 # This only produces output on errors
382 for l in output:
383 oe.qa.add_message(messages, "desktop", "Desktop file issue: " + l.strip())
384
385QAPATHTEST[textrel] = "package_qa_textrel"
386def package_qa_textrel(path, name, d, elf, messages):
387 """
388 Check if the binary contains relocations in .text
389 """
390
391 if not elf:
392 return
393
394 if os.path.islink(path):
395 return
396
397 phdrs = elf.run_objdump("-p", d)
398 sane = True
399
400 import re
401 textrel_re = re.compile(r"\s+TEXTREL\s+")
402 for line in phdrs.split("\n"):
403 if textrel_re.match(line):
404 sane = False
405 break
406
407 if not sane:
408 path = package_qa_clean_path(path, d, name)
409 oe.qa.add_message(messages, "textrel", "%s: ELF binary %s has relocations in .text" % (name, path))
410
411QAPATHTEST[ldflags] = "package_qa_hash_style"
412def package_qa_hash_style(path, name, d, elf, messages):
413 """
414 Check if the binary has the right hash style...
415 """
416
417 if not elf:
418 return
419
420 if os.path.islink(path):
421 return
422
423 gnu_hash = "--hash-style=gnu" in d.getVar('LDFLAGS')
424 if not gnu_hash:
425 gnu_hash = "--hash-style=both" in d.getVar('LDFLAGS')
426 if not gnu_hash:
427 return
428
429 sane = False
430 has_syms = False
431
432 phdrs = elf.run_objdump("-p", d)
433
434 # If this binary has symbols, we expect it to have GNU_HASH too.
435 for line in phdrs.split("\n"):
436 if "SYMTAB" in line:
437 has_syms = True
438 if "GNU_HASH" in line or "MIPS_XHASH" in line:
439 sane = True
440 if ("[mips32]" in line or "[mips64]" in line) and d.getVar('TCLIBC') == "musl":
441 sane = True
442 if has_syms and not sane:
443 path = package_qa_clean_path(path, d, name)
444 oe.qa.add_message(messages, "ldflags", "File %s in package %s doesn't have GNU_HASH (didn't pass LDFLAGS?)" % (path, name))
445
446
447QAPATHTEST[buildpaths] = "package_qa_check_buildpaths"
448def package_qa_check_buildpaths(path, name, d, elf, messages):
449 """
450 Check for build paths inside target files and error if paths are not
451 explicitly ignored.
452 """
453 import stat
454
455 # Ignore symlinks/devs/fifos
456 mode = os.lstat(path).st_mode
457 if stat.S_ISLNK(mode) or stat.S_ISBLK(mode) or stat.S_ISFIFO(mode) or stat.S_ISCHR(mode) or stat.S_ISSOCK(mode):
458 return
459
460 tmpdir = bytes(d.getVar('TMPDIR'), encoding="utf-8")
461 with open(path, 'rb') as f:
462 file_content = f.read()
463 if tmpdir in file_content:
464 trimmed = path.replace(os.path.join (d.getVar("PKGDEST"), name), "")
465 oe.qa.add_message(messages, "buildpaths", "File %s in package %s contains reference to TMPDIR" % (trimmed, name))
466
467
468QAPATHTEST[xorg-driver-abi] = "package_qa_check_xorg_driver_abi"
469def package_qa_check_xorg_driver_abi(path, name, d, elf, messages):
470 """
471 Check that all packages containing Xorg drivers have ABI dependencies
472 """
473
474 # Skip dev, dbg or nativesdk packages
475 if name.endswith("-dev") or name.endswith("-dbg") or name.startswith("nativesdk-"):
476 return
477
478 driverdir = d.expand("${libdir}/xorg/modules/drivers/")
479 if driverdir in path and path.endswith(".so"):
480 mlprefix = d.getVar('MLPREFIX') or ''
481 for rdep in bb.utils.explode_deps(d.getVar('RDEPENDS:' + name) or ""):
482 if rdep.startswith("%sxorg-abi-" % mlprefix):
483 return
484 oe.qa.add_message(messages, "xorg-driver-abi", "Package %s contains Xorg driver (%s) but no xorg-abi- dependencies" % (name, os.path.basename(path)))
485
486QAPATHTEST[infodir] = "package_qa_check_infodir"
487def package_qa_check_infodir(path, name, d, elf, messages):
488 """
489 Check that /usr/share/info/dir isn't shipped in a particular package
490 """
491 infodir = d.expand("${infodir}/dir")
492
493 if infodir in path:
494 oe.qa.add_message(messages, "infodir", "The /usr/share/info/dir file is not meant to be shipped in a particular package.")
495
496QAPATHTEST[symlink-to-sysroot] = "package_qa_check_symlink_to_sysroot"
497def package_qa_check_symlink_to_sysroot(path, name, d, elf, messages):
498 """
499 Check that the package doesn't contain any absolute symlinks to the sysroot.
500 """
501 if os.path.islink(path):
502 target = os.readlink(path)
503 if os.path.isabs(target):
504 tmpdir = d.getVar('TMPDIR')
505 if target.startswith(tmpdir):
506 trimmed = path.replace(os.path.join (d.getVar("PKGDEST"), name), "")
507 oe.qa.add_message(messages, "symlink-to-sysroot", "Symlink %s in %s points to TMPDIR" % (trimmed, name))
508
509# Check license variables
510do_populate_lic[postfuncs] += "populate_lic_qa_checksum"
511python populate_lic_qa_checksum() {
512 """
513 Check for changes in the license files.
514 """
515
516 lic_files = d.getVar('LIC_FILES_CHKSUM') or ''
517 lic = d.getVar('LICENSE')
518 pn = d.getVar('PN')
519
520 if lic == "CLOSED":
521 return
522
523 if not lic_files and d.getVar('SRC_URI'):
524 oe.qa.handle_error("license-checksum", pn + ": Recipe file fetches files and does not have license file information (LIC_FILES_CHKSUM)", d)
525
526 srcdir = d.getVar('S')
527 corebase_licensefile = d.getVar('COREBASE') + "/LICENSE"
528 for url in lic_files.split():
529 try:
530 (type, host, path, user, pswd, parm) = bb.fetch.decodeurl(url)
531 except bb.fetch.MalformedUrl:
532 oe.qa.handle_error("license-checksum", pn + ": LIC_FILES_CHKSUM contains an invalid URL: " + url, d)
533 continue
534 srclicfile = os.path.join(srcdir, path)
535 if not os.path.isfile(srclicfile):
536 oe.qa.handle_error("license-checksum", pn + ": LIC_FILES_CHKSUM points to an invalid file: " + srclicfile, d)
537 continue
538
539 if (srclicfile == corebase_licensefile):
540 bb.warn("${COREBASE}/LICENSE is not a valid license file, please use '${COMMON_LICENSE_DIR}/MIT' for a MIT License file in LIC_FILES_CHKSUM. This will become an error in the future")
541
542 recipemd5 = parm.get('md5', '')
543 beginline, endline = 0, 0
544 if 'beginline' in parm:
545 beginline = int(parm['beginline'])
546 if 'endline' in parm:
547 endline = int(parm['endline'])
548
549 if (not beginline) and (not endline):
550 md5chksum = bb.utils.md5_file(srclicfile)
551 with open(srclicfile, 'r', errors='replace') as f:
552 license = f.read().splitlines()
553 else:
554 with open(srclicfile, 'rb') as f:
555 import hashlib
556 lineno = 0
557 license = []
558 m = hashlib.new('MD5', usedforsecurity=False)
559 for line in f:
560 lineno += 1
561 if (lineno >= beginline):
562 if ((lineno <= endline) or not endline):
563 m.update(line)
564 license.append(line.decode('utf-8', errors='replace').rstrip())
565 else:
566 break
567 md5chksum = m.hexdigest()
568 if recipemd5 == md5chksum:
569 bb.note (pn + ": md5 checksum matched for ", url)
570 else:
571 if recipemd5:
572 msg = pn + ": The LIC_FILES_CHKSUM does not match for " + url
573 msg = msg + "\n" + pn + ": The new md5 checksum is " + md5chksum
574 max_lines = int(d.getVar('QA_MAX_LICENSE_LINES') or 20)
575 if not license or license[-1] != '':
576 # Ensure that our license text ends with a line break
577 # (will be added with join() below).
578 license.append('')
579 remove = len(license) - max_lines
580 if remove > 0:
581 start = max_lines // 2
582 end = start + remove - 1
583 del license[start:end]
584 license.insert(start, '...')
585 msg = msg + "\n" + pn + ": Here is the selected license text:" + \
586 "\n" + \
587 "{:v^70}".format(" beginline=%d " % beginline if beginline else "") + \
588 "\n" + "\n".join(license) + \
589 "{:^^70}".format(" endline=%d " % endline if endline else "")
590 if beginline:
591 if endline:
592 srcfiledesc = "%s (lines %d through to %d)" % (srclicfile, beginline, endline)
593 else:
594 srcfiledesc = "%s (beginning on line %d)" % (srclicfile, beginline)
595 elif endline:
596 srcfiledesc = "%s (ending on line %d)" % (srclicfile, endline)
597 else:
598 srcfiledesc = srclicfile
599 msg = msg + "\n" + pn + ": Check if the license information has changed in %s to verify that the LICENSE value \"%s\" remains valid" % (srcfiledesc, lic)
600
601 else:
602 msg = pn + ": LIC_FILES_CHKSUM is not specified for " + url
603 msg = msg + "\n" + pn + ": The md5 checksum is " + md5chksum
604 oe.qa.handle_error("license-checksum", msg, d)
605
606 oe.qa.exit_if_errors(d)
607}
608
609def qa_check_staged(path,d):
610 """
611 Check staged la and pc files for common problems like references to the work
612 directory.
613
614 As this is run after every stage we should be able to find the one
615 responsible for the errors easily even if we look at every .pc and .la file.
616 """
617
618 tmpdir = d.getVar('TMPDIR')
619 workdir = os.path.join(tmpdir, "work")
620 recipesysroot = d.getVar("RECIPE_SYSROOT")
621
622 if bb.data.inherits_class("native", d) or bb.data.inherits_class("cross", d):
623 pkgconfigcheck = workdir
624 else:
625 pkgconfigcheck = tmpdir
626
627 skip = (d.getVar('INSANE_SKIP') or "").split()
628 skip_la = False
629 if 'la' in skip:
630 bb.note("Recipe %s skipping qa checking: la" % d.getVar('PN'))
631 skip_la = True
632
633 skip_pkgconfig = False
634 if 'pkgconfig' in skip:
635 bb.note("Recipe %s skipping qa checking: pkgconfig" % d.getVar('PN'))
636 skip_pkgconfig = True
637
638 skip_shebang_size = False
639 if 'shebang-size' in skip:
640 bb.note("Recipe %s skipping qa checkking: shebang-size" % d.getVar('PN'))
641 skip_shebang_size = True
642
643 # find all .la and .pc files
644 # read the content
645 # and check for stuff that looks wrong
646 for root, dirs, files in os.walk(path):
647 for file in files:
648 path = os.path.join(root,file)
649 if file.endswith(".la") and not skip_la:
650 with open(path) as f:
651 file_content = f.read()
652 file_content = file_content.replace(recipesysroot, "")
653 if workdir in file_content:
654 error_msg = "%s failed sanity test (workdir) in path %s" % (file,root)
655 oe.qa.handle_error("la", error_msg, d)
656 elif file.endswith(".pc") and not skip_pkgconfig:
657 with open(path) as f:
658 file_content = f.read()
659 file_content = file_content.replace(recipesysroot, "")
660 if pkgconfigcheck in file_content:
661 error_msg = "%s failed sanity test (tmpdir) in path %s" % (file,root)
662 oe.qa.handle_error("pkgconfig", error_msg, d)
663
664 if not skip_shebang_size:
665 errors = {}
666 package_qa_check_shebang_size(path, "", d, None, errors)
667 for e in errors:
668 oe.qa.handle_error(e, errors[e], d)
669
670
671# Run all package-wide warnfuncs and errorfuncs
672def package_qa_package(warnfuncs, errorfuncs, package, d):
673 warnings = {}
674 errors = {}
675
676 for func in warnfuncs:
677 func(package, d, warnings)
678 for func in errorfuncs:
679 func(package, d, errors)
680
681 for w in warnings:
682 oe.qa.handle_error(w, warnings[w], d)
683 for e in errors:
684 oe.qa.handle_error(e, errors[e], d)
685
686 return len(errors) == 0
687
688# Run all recipe-wide warnfuncs and errorfuncs
689def package_qa_recipe(warnfuncs, errorfuncs, pn, d):
690 warnings = {}
691 errors = {}
692
693 for func in warnfuncs:
694 func(pn, d, warnings)
695 for func in errorfuncs:
696 func(pn, d, errors)
697
698 for w in warnings:
699 oe.qa.handle_error(w, warnings[w], d)
700 for e in errors:
701 oe.qa.handle_error(e, errors[e], d)
702
703 return len(errors) == 0
704
705def prepopulate_objdump_p(elf, d):
706 output = elf.run_objdump("-p", d)
707 return (elf.name, output)
708
709# Walk over all files in a directory and call func
710def package_qa_walk(warnfuncs, errorfuncs, package, d):
711 #if this will throw an exception, then fix the dict above
712 target_os = d.getVar('HOST_OS')
713 target_arch = d.getVar('HOST_ARCH')
714
715 warnings = {}
716 errors = {}
717 elves = {}
718 for path in pkgfiles[package]:
719 elf = None
720 if os.path.isfile(path):
721 elf = oe.qa.ELFFile(path)
722 try:
723 elf.open()
724 elf.close()
725 except oe.qa.NotELFFileError:
726 elf = None
727 if elf:
728 elves[path] = elf
729
730 results = oe.utils.multiprocess_launch(prepopulate_objdump_p, elves.values(), d, extraargs=(d,))
731 for item in results:
732 elves[item[0]].set_objdump("-p", item[1])
733
734 for path in pkgfiles[package]:
735 if path in elves:
736 elves[path].open()
737 for func in warnfuncs:
738 func(path, package, d, elves.get(path), warnings)
739 for func in errorfuncs:
740 func(path, package, d, elves.get(path), errors)
741 if path in elves:
742 elves[path].close()
743
744 for w in warnings:
745 oe.qa.handle_error(w, warnings[w], d)
746 for e in errors:
747 oe.qa.handle_error(e, errors[e], d)
748
749def package_qa_check_rdepends(pkg, pkgdest, skip, taskdeps, packages, d):
750 # Don't do this check for kernel/module recipes, there aren't too many debug/development
751 # packages and you can get false positives e.g. on kernel-module-lirc-dev
752 if bb.data.inherits_class("kernel", d) or bb.data.inherits_class("module-base", d):
753 return
754
755 if not "-dbg" in pkg and not "packagegroup-" in pkg and not "-image" in pkg:
756 localdata = bb.data.createCopy(d)
757 localdata.setVar('OVERRIDES', localdata.getVar('OVERRIDES') + ':' + pkg)
758
759 # Now check the RDEPENDS
760 rdepends = bb.utils.explode_deps(localdata.getVar('RDEPENDS') or "")
761
762 # Now do the sanity check!!!
763 if "build-deps" not in skip:
764 for rdepend in rdepends:
765 if "-dbg" in rdepend and "debug-deps" not in skip:
766 error_msg = "%s rdepends on %s" % (pkg,rdepend)
767 oe.qa.handle_error("debug-deps", error_msg, d)
768 if (not "-dev" in pkg and not "-staticdev" in pkg) and rdepend.endswith("-dev") and "dev-deps" not in skip:
769 error_msg = "%s rdepends on %s" % (pkg, rdepend)
770 oe.qa.handle_error("dev-deps", error_msg, d)
771 if rdepend not in packages:
772 rdep_data = oe.packagedata.read_subpkgdata(rdepend, d)
773 if rdep_data and 'PN' in rdep_data and rdep_data['PN'] in taskdeps:
774 continue
775 if not rdep_data or not 'PN' in rdep_data:
776 pkgdata_dir = d.getVar("PKGDATA_DIR")
777 try:
778 possibles = os.listdir("%s/runtime-rprovides/%s/" % (pkgdata_dir, rdepend))
779 except OSError:
780 possibles = []
781 for p in possibles:
782 rdep_data = oe.packagedata.read_subpkgdata(p, d)
783 if rdep_data and 'PN' in rdep_data and rdep_data['PN'] in taskdeps:
784 break
785 if rdep_data and 'PN' in rdep_data and rdep_data['PN'] in taskdeps:
786 continue
787 if rdep_data and 'PN' in rdep_data:
788 error_msg = "%s rdepends on %s, but it isn't a build dependency, missing %s in DEPENDS or PACKAGECONFIG?" % (pkg, rdepend, rdep_data['PN'])
789 else:
790 error_msg = "%s rdepends on %s, but it isn't a build dependency?" % (pkg, rdepend)
791 oe.qa.handle_error("build-deps", error_msg, d)
792
793 if "file-rdeps" not in skip:
794 ignored_file_rdeps = set(['/bin/sh', '/usr/bin/env', 'rtld(GNU_HASH)'])
795 if bb.data.inherits_class('nativesdk', d):
796 ignored_file_rdeps |= set(['/bin/bash', '/usr/bin/perl', 'perl'])
797 # For Saving the FILERDEPENDS
798 filerdepends = {}
799 rdep_data = oe.packagedata.read_subpkgdata(pkg, d)
800 for key in rdep_data:
801 if key.startswith("FILERDEPENDS:"):
802 for subkey in bb.utils.explode_deps(rdep_data[key]):
803 if subkey not in ignored_file_rdeps and \
804 not subkey.startswith('perl('):
805 # We already know it starts with FILERDEPENDS_
806 filerdepends[subkey] = key[13:]
807
808 if filerdepends:
809 done = rdepends[:]
810 # Add the rprovides of itself
811 if pkg not in done:
812 done.insert(0, pkg)
813
814 # The python is not a package, but python-core provides it, so
815 # skip checking /usr/bin/python if python is in the rdeps, in
816 # case there is a RDEPENDS:pkg = "python" in the recipe.
817 for py in [ d.getVar('MLPREFIX') + "python", "python" ]:
818 if py in done:
819 filerdepends.pop("/usr/bin/python",None)
820 done.remove(py)
821 for rdep in done:
822 # The file dependencies may contain package names, e.g.,
823 # perl
824 filerdepends.pop(rdep,None)
825
826 # For Saving the FILERPROVIDES, RPROVIDES and FILES_INFO
827 rdep_data = oe.packagedata.read_subpkgdata(rdep, d)
828 for key in rdep_data:
829 if key.startswith("FILERPROVIDES:") or key.startswith("RPROVIDES:"):
830 for subkey in bb.utils.explode_deps(rdep_data[key]):
831 filerdepends.pop(subkey,None)
832 # Add the files list to the rprovides
833 if key.startswith("FILES_INFO:"):
834 # Use eval() to make it as a dict
835 for subkey in eval(rdep_data[key]):
836 filerdepends.pop(subkey,None)
837 if not filerdepends:
838 # Break if all the file rdepends are met
839 break
840 if filerdepends:
841 for key in filerdepends:
842 error_msg = "%s contained in package %s requires %s, but no providers found in RDEPENDS:%s?" % \
843 (filerdepends[key].replace(":%s" % pkg, "").replace("@underscore@", "_"), pkg, key, pkg)
844 oe.qa.handle_error("file-rdeps", error_msg, d)
845package_qa_check_rdepends[vardepsexclude] = "OVERRIDES"
846
847def package_qa_check_deps(pkg, pkgdest, d):
848
849 localdata = bb.data.createCopy(d)
850 localdata.setVar('OVERRIDES', pkg)
851
852 def check_valid_deps(var):
853 try:
854 rvar = bb.utils.explode_dep_versions2(localdata.getVar(var) or "")
855 except ValueError as e:
856 bb.fatal("%s:%s: %s" % (var, pkg, e))
857 for dep in rvar:
858 for v in rvar[dep]:
859 if v and not v.startswith(('< ', '= ', '> ', '<= ', '>=')):
860 error_msg = "%s:%s is invalid: %s (%s) only comparisons <, =, >, <=, and >= are allowed" % (var, pkg, dep, v)
861 oe.qa.handle_error("dep-cmp", error_msg, d)
862
863 check_valid_deps('RDEPENDS')
864 check_valid_deps('RRECOMMENDS')
865 check_valid_deps('RSUGGESTS')
866 check_valid_deps('RPROVIDES')
867 check_valid_deps('RREPLACES')
868 check_valid_deps('RCONFLICTS')
869
870QAPKGTEST[usrmerge] = "package_qa_check_usrmerge"
871def package_qa_check_usrmerge(pkg, d, messages):
872
873 pkgdest = d.getVar('PKGDEST')
874 pkg_dir = pkgdest + os.sep + pkg + os.sep
875 merged_dirs = ['bin', 'sbin', 'lib'] + d.getVar('MULTILIB_VARIANTS').split()
876 for f in merged_dirs:
877 if os.path.exists(pkg_dir + f) and not os.path.islink(pkg_dir + f):
878 msg = "%s package is not obeying usrmerge distro feature. /%s should be relocated to /usr." % (pkg, f)
879 oe.qa.add_message(messages, "usrmerge", msg)
880 return False
881 return True
882
883QAPKGTEST[perllocalpod] = "package_qa_check_perllocalpod"
884def package_qa_check_perllocalpod(pkg, d, messages):
885 """
886 Check that the recipe didn't ship a perlocal.pod file, which shouldn't be
887 installed in a distribution package. cpan.bbclass sets NO_PERLLOCAL=1 to
888 handle this for most recipes.
889 """
890 import glob
891 pkgd = oe.path.join(d.getVar('PKGDEST'), pkg)
892 podpath = oe.path.join(pkgd, d.getVar("libdir"), "perl*", "*", "*", "perllocal.pod")
893
894 matches = glob.glob(podpath)
895 if matches:
896 matches = [package_qa_clean_path(path, d, pkg) for path in matches]
897 msg = "%s contains perllocal.pod (%s), should not be installed" % (pkg, " ".join(matches))
898 oe.qa.add_message(messages, "perllocalpod", msg)
899
900QAPKGTEST[expanded-d] = "package_qa_check_expanded_d"
901def package_qa_check_expanded_d(package, d, messages):
902 """
903 Check for the expanded D (${D}) value in pkg_* and FILES
904 variables, warn the user to use it correctly.
905 """
906 sane = True
907 expanded_d = d.getVar('D')
908
909 for var in 'FILES','pkg_preinst', 'pkg_postinst', 'pkg_prerm', 'pkg_postrm':
910 bbvar = d.getVar(var + ":" + package) or ""
911 if expanded_d in bbvar:
912 if var == 'FILES':
913 oe.qa.add_message(messages, "expanded-d", "FILES in %s recipe should not contain the ${D} variable as it references the local build directory not the target filesystem, best solution is to remove the ${D} reference" % package)
914 sane = False
915 else:
916 oe.qa.add_message(messages, "expanded-d", "%s in %s recipe contains ${D}, it should be replaced by $D instead" % (var, package))
917 sane = False
918 return sane
919
920QAPKGTEST[unlisted-pkg-lics] = "package_qa_check_unlisted_pkg_lics"
921def package_qa_check_unlisted_pkg_lics(package, d, messages):
922 """
923 Check that all licenses for a package are among the licenses for the recipe.
924 """
925 pkg_lics = d.getVar('LICENSE:' + package)
926 if not pkg_lics:
927 return True
928
929 recipe_lics_set = oe.license.list_licenses(d.getVar('LICENSE'))
930 package_lics = oe.license.list_licenses(pkg_lics)
931 unlisted = package_lics - recipe_lics_set
932 if unlisted:
933 oe.qa.add_message(messages, "unlisted-pkg-lics",
934 "LICENSE:%s includes licenses (%s) that are not "
935 "listed in LICENSE" % (package, ' '.join(unlisted)))
936 return False
937 obsolete = set(oe.license.obsolete_license_list()) & package_lics - recipe_lics_set
938 if obsolete:
939 oe.qa.add_message(messages, "obsolete-license",
940 "LICENSE:%s includes obsolete licenses %s" % (package, ' '.join(obsolete)))
941 return False
942 return True
943
944QAPKGTEST[empty-dirs] = "package_qa_check_empty_dirs"
945def package_qa_check_empty_dirs(pkg, d, messages):
946 """
947 Check for the existence of files in directories that are expected to be
948 empty.
949 """
950
951 pkgd = oe.path.join(d.getVar('PKGDEST'), pkg)
952 for dir in (d.getVar('QA_EMPTY_DIRS') or "").split():
953 empty_dir = oe.path.join(pkgd, dir)
954 if os.path.exists(empty_dir) and os.listdir(empty_dir):
955 recommendation = (d.getVar('QA_EMPTY_DIRS_RECOMMENDATION:' + dir) or
956 "but it is expected to be empty")
957 msg = "%s installs files in %s, %s" % (pkg, dir, recommendation)
958 oe.qa.add_message(messages, "empty-dirs", msg)
959
960def package_qa_check_encoding(keys, encode, d):
961 def check_encoding(key, enc):
962 sane = True
963 value = d.getVar(key)
964 if value:
965 try:
966 s = value.encode(enc)
967 except UnicodeDecodeError as e:
968 error_msg = "%s has non %s characters" % (key,enc)
969 sane = False
970 oe.qa.handle_error("invalid-chars", error_msg, d)
971 return sane
972
973 for key in keys:
974 sane = check_encoding(key, encode)
975 if not sane:
976 break
977
978HOST_USER_UID := "${@os.getuid()}"
979HOST_USER_GID := "${@os.getgid()}"
980
981QAPATHTEST[host-user-contaminated] = "package_qa_check_host_user"
982def package_qa_check_host_user(path, name, d, elf, messages):
983 """Check for paths outside of /home which are owned by the user running bitbake."""
984
985 if not os.path.lexists(path):
986 return
987
988 dest = d.getVar('PKGDEST')
989 pn = d.getVar('PN')
990 home = os.path.join(dest, name, 'home')
991 if path == home or path.startswith(home + os.sep):
992 return
993
994 try:
995 stat = os.lstat(path)
996 except OSError as exc:
997 import errno
998 if exc.errno != errno.ENOENT:
999 raise
1000 else:
1001 check_uid = int(d.getVar('HOST_USER_UID'))
1002 if stat.st_uid == check_uid:
1003 oe.qa.add_message(messages, "host-user-contaminated", "%s: %s is owned by uid %d, which is the same as the user running bitbake. This may be due to host contamination" % (pn, package_qa_clean_path(path, d, name), check_uid))
1004 return False
1005
1006 check_gid = int(d.getVar('HOST_USER_GID'))
1007 if stat.st_gid == check_gid:
1008 oe.qa.add_message(messages, "host-user-contaminated", "%s: %s is owned by gid %d, which is the same as the user running bitbake. This may be due to host contamination" % (pn, package_qa_clean_path(path, d, name), check_gid))
1009 return False
1010 return True
1011
1012QARECIPETEST[unhandled-features-check] = "package_qa_check_unhandled_features_check"
1013def package_qa_check_unhandled_features_check(pn, d, messages):
1014 if not bb.data.inherits_class('features_check', d):
1015 var_set = False
1016 for kind in ['DISTRO', 'MACHINE', 'COMBINED']:
1017 for var in ['ANY_OF_' + kind + '_FEATURES', 'REQUIRED_' + kind + '_FEATURES', 'CONFLICT_' + kind + '_FEATURES']:
1018 if d.getVar(var) is not None or d.hasOverrides(var):
1019 var_set = True
1020 if var_set:
1021 oe.qa.handle_error("unhandled-features-check", "%s: recipe doesn't inherit features_check" % pn, d)
1022
1023QARECIPETEST[missing-update-alternatives] = "package_qa_check_missing_update_alternatives"
1024def package_qa_check_missing_update_alternatives(pn, d, messages):
1025 # Look at all packages and find out if any of those sets ALTERNATIVE variable
1026 # without inheriting update-alternatives class
1027 for pkg in (d.getVar('PACKAGES') or '').split():
1028 if d.getVar('ALTERNATIVE:%s' % pkg) and not bb.data.inherits_class('update-alternatives', d):
1029 oe.qa.handle_error("missing-update-alternatives", "%s: recipe defines ALTERNATIVE:%s but doesn't inherit update-alternatives. This might fail during do_rootfs later!" % (pn, pkg), d)
1030
1031# The PACKAGE FUNC to scan each package
1032python do_package_qa () {
1033 import subprocess
1034 import oe.packagedata
1035
1036 bb.note("DO PACKAGE QA")
1037
1038 main_lic = d.getVar('LICENSE')
1039
1040 # Check for obsolete license references in main LICENSE (packages are checked below for any changes)
1041 main_licenses = oe.license.list_licenses(d.getVar('LICENSE'))
1042 obsolete = set(oe.license.obsolete_license_list()) & main_licenses
1043 if obsolete:
1044 oe.qa.handle_error("obsolete-license", "Recipe LICENSE includes obsolete licenses %s" % ' '.join(obsolete), d)
1045
1046 bb.build.exec_func("read_subpackage_metadata", d)
1047
1048 # Check non UTF-8 characters on recipe's metadata
1049 package_qa_check_encoding(['DESCRIPTION', 'SUMMARY', 'LICENSE', 'SECTION'], 'utf-8', d)
1050
1051 logdir = d.getVar('T')
1052 pn = d.getVar('PN')
1053
1054 # Scan the packages...
1055 pkgdest = d.getVar('PKGDEST')
1056 packages = set((d.getVar('PACKAGES') or '').split())
1057
1058 global pkgfiles
1059 pkgfiles = {}
1060 for pkg in packages:
1061 pkgfiles[pkg] = []
1062 pkgdir = os.path.join(pkgdest, pkg)
1063 for walkroot, dirs, files in os.walk(pkgdir):
1064 # Don't walk into top-level CONTROL or DEBIAN directories as these
1065 # are temporary directories created by do_package.
1066 if walkroot == pkgdir:
1067 for control in ("CONTROL", "DEBIAN"):
1068 if control in dirs:
1069 dirs.remove(control)
1070 for file in files:
1071 pkgfiles[pkg].append(os.path.join(walkroot, file))
1072
1073 # no packages should be scanned
1074 if not packages:
1075 return
1076
1077 import re
1078 # The package name matches the [a-z0-9.+-]+ regular expression
1079 pkgname_pattern = re.compile(r"^[a-z0-9.+-]+$")
1080
1081 taskdepdata = d.getVar("BB_TASKDEPDATA", False)
1082 taskdeps = set()
1083 for dep in taskdepdata:
1084 taskdeps.add(taskdepdata[dep][0])
1085
1086 def parse_test_matrix(matrix_name):
1087 testmatrix = d.getVarFlags(matrix_name) or {}
1088 g = globals()
1089 warnchecks = []
1090 for w in (d.getVar("WARN_QA") or "").split():
1091 if w in skip:
1092 continue
1093 if w in testmatrix and testmatrix[w] in g:
1094 warnchecks.append(g[testmatrix[w]])
1095
1096 errorchecks = []
1097 for e in (d.getVar("ERROR_QA") or "").split():
1098 if e in skip:
1099 continue
1100 if e in testmatrix and testmatrix[e] in g:
1101 errorchecks.append(g[testmatrix[e]])
1102 return warnchecks, errorchecks
1103
1104 for package in packages:
1105 skip = set((d.getVar('INSANE_SKIP') or "").split() +
1106 (d.getVar('INSANE_SKIP:' + package) or "").split())
1107 if skip:
1108 bb.note("Package %s skipping QA tests: %s" % (package, str(skip)))
1109
1110 bb.note("Checking Package: %s" % package)
1111 # Check package name
1112 if not pkgname_pattern.match(package):
1113 oe.qa.handle_error("pkgname",
1114 "%s doesn't match the [a-z0-9.+-]+ regex" % package, d)
1115
1116 warn_checks, error_checks = parse_test_matrix("QAPATHTEST")
1117 package_qa_walk(warn_checks, error_checks, package, d)
1118
1119 warn_checks, error_checks = parse_test_matrix("QAPKGTEST")
1120 package_qa_package(warn_checks, error_checks, package, d)
1121
1122 package_qa_check_rdepends(package, pkgdest, skip, taskdeps, packages, d)
1123 package_qa_check_deps(package, pkgdest, d)
1124
1125 warn_checks, error_checks = parse_test_matrix("QARECIPETEST")
1126 package_qa_recipe(warn_checks, error_checks, pn, d)
1127
1128 if 'libdir' in d.getVar("ALL_QA").split():
1129 package_qa_check_libdir(d)
1130
1131 oe.qa.exit_if_errors(d)
1132}
1133
1134# binutils is used for most checks, so need to set as dependency
1135# POPULATESYSROOTDEPS is defined in staging class.
1136do_package_qa[depends] += "${POPULATESYSROOTDEPS}"
1137do_package_qa[vardeps] = "${@bb.utils.contains('ERROR_QA', 'empty-dirs', 'QA_EMPTY_DIRS', '', d)}"
1138do_package_qa[vardepsexclude] = "BB_TASKDEPDATA"
1139do_package_qa[rdeptask] = "do_packagedata"
1140addtask do_package_qa after do_packagedata do_package before do_build
1141
1142# Add the package specific INSANE_SKIPs to the sstate dependencies
1143python() {
1144 pkgs = (d.getVar('PACKAGES') or '').split()
1145 for pkg in pkgs:
1146 d.appendVarFlag("do_package_qa", "vardeps", " INSANE_SKIP:{}".format(pkg))
1147}
1148
1149SSTATETASKS += "do_package_qa"
1150do_package_qa[sstate-inputdirs] = ""
1151do_package_qa[sstate-outputdirs] = ""
1152python do_package_qa_setscene () {
1153 sstate_setscene(d)
1154}
1155addtask do_package_qa_setscene
1156
1157python do_qa_sysroot() {
1158 bb.note("QA checking do_populate_sysroot")
1159 sysroot_destdir = d.expand('${SYSROOT_DESTDIR}')
1160 for sysroot_dir in d.expand('${SYSROOT_DIRS}').split():
1161 qa_check_staged(sysroot_destdir + sysroot_dir, d)
1162 oe.qa.exit_with_message_if_errors("do_populate_sysroot for this recipe installed files with QA issues", d)
1163}
1164do_populate_sysroot[postfuncs] += "do_qa_sysroot"
1165
1166python do_qa_patch() {
1167 import subprocess
1168
1169 ###########################################################################
1170 # Check patch.log for fuzz warnings
1171 #
1172 # Further information on why we check for patch fuzz warnings:
1173 # http://lists.openembedded.org/pipermail/openembedded-core/2018-March/148675.html
1174 # https://bugzilla.yoctoproject.org/show_bug.cgi?id=10450
1175 ###########################################################################
1176
1177 logdir = d.getVar('T')
1178 patchlog = os.path.join(logdir,"log.do_patch")
1179
1180 if os.path.exists(patchlog):
1181 fuzzheader = '--- Patch fuzz start ---'
1182 fuzzfooter = '--- Patch fuzz end ---'
1183 statement = "grep -e '%s' %s > /dev/null" % (fuzzheader, patchlog)
1184 if subprocess.call(statement, shell=True) == 0:
1185 msg = "Fuzz detected:\n\n"
1186 fuzzmsg = ""
1187 inFuzzInfo = False
1188 f = open(patchlog, "r")
1189 for line in f:
1190 if fuzzheader in line:
1191 inFuzzInfo = True
1192 fuzzmsg = ""
1193 elif fuzzfooter in line:
1194 fuzzmsg = fuzzmsg.replace('\n\n', '\n')
1195 msg += fuzzmsg
1196 msg += "\n"
1197 inFuzzInfo = False
1198 elif inFuzzInfo and not 'Now at patch' in line:
1199 fuzzmsg += line
1200 f.close()
1201 msg += "The context lines in the patches can be updated with devtool:\n"
1202 msg += "\n"
1203 msg += " devtool modify %s\n" % d.getVar('PN')
1204 msg += " devtool finish --force-patch-refresh %s <layer_path>\n\n" % d.getVar('PN')
1205 msg += "Don't forget to review changes done by devtool!\n"
1206 if bb.utils.filter('ERROR_QA', 'patch-fuzz', d):
1207 bb.error(msg)
1208 elif bb.utils.filter('WARN_QA', 'patch-fuzz', d):
1209 bb.warn(msg)
1210 msg = "Patch log indicates that patches do not apply cleanly."
1211 oe.qa.handle_error("patch-fuzz", msg, d)
1212
1213 # Check if the patch contains a correctly formatted and spelled Upstream-Status
1214 import re
1215 from oe import patch
1216
1217 for url in patch.src_patches(d):
1218 (_, _, fullpath, _, _, _) = bb.fetch.decodeurl(url)
1219
1220 # skip patches not in oe-core
1221 if '/meta/' not in fullpath:
1222 continue
1223
1224 kinda_status_re = re.compile(r"^.*upstream.*status.*$", re.IGNORECASE | re.MULTILINE)
1225 strict_status_re = re.compile(r"^Upstream-Status: (Pending|Submitted|Denied|Accepted|Inappropriate|Backport|Inactive-Upstream)( .+)?$", re.MULTILINE)
1226 guidelines = "https://www.openembedded.org/wiki/Commit_Patch_Message_Guidelines#Patch_Header_Recommendations:_Upstream-Status"
1227
1228 with open(fullpath, encoding='utf-8', errors='ignore') as f:
1229 file_content = f.read()
1230 match_kinda = kinda_status_re.search(file_content)
1231 match_strict = strict_status_re.search(file_content)
1232
1233 if not match_strict:
1234 if match_kinda:
1235 bb.error("Malformed Upstream-Status in patch\n%s\nPlease correct according to %s :\n%s" % (fullpath, guidelines, match_kinda.group(0)))
1236 else:
1237 bb.error("Missing Upstream-Status in patch\n%s\nPlease add according to %s ." % (fullpath, guidelines))
1238}
1239
1240python do_qa_configure() {
1241 import subprocess
1242
1243 ###########################################################################
1244 # Check config.log for cross compile issues
1245 ###########################################################################
1246
1247 configs = []
1248 workdir = d.getVar('WORKDIR')
1249
1250 skip = (d.getVar('INSANE_SKIP') or "").split()
1251 skip_configure_unsafe = False
1252 if 'configure-unsafe' in skip:
1253 bb.note("Recipe %s skipping qa checking: configure-unsafe" % d.getVar('PN'))
1254 skip_configure_unsafe = True
1255
1256 if bb.data.inherits_class('autotools', d) and not skip_configure_unsafe:
1257 bb.note("Checking autotools environment for common misconfiguration")
1258 for root, dirs, files in os.walk(workdir):
1259 statement = "grep -q -F -e 'is unsafe for cross-compilation' %s" % \
1260 os.path.join(root,"config.log")
1261 if "config.log" in files:
1262 if subprocess.call(statement, shell=True) == 0:
1263 error_msg = """This autoconf log indicates errors, it looked at host include and/or library paths while determining system capabilities.
1264Rerun configure task after fixing this."""
1265 oe.qa.handle_error("configure-unsafe", error_msg, d)
1266
1267 if "configure.ac" in files:
1268 configs.append(os.path.join(root,"configure.ac"))
1269 if "configure.in" in files:
1270 configs.append(os.path.join(root, "configure.in"))
1271
1272 ###########################################################################
1273 # Check gettext configuration and dependencies are correct
1274 ###########################################################################
1275
1276 skip_configure_gettext = False
1277 if 'configure-gettext' in skip:
1278 bb.note("Recipe %s skipping qa checking: configure-gettext" % d.getVar('PN'))
1279 skip_configure_gettext = True
1280
1281 cnf = d.getVar('EXTRA_OECONF') or ""
1282 if not ("gettext" in d.getVar('P') or "gcc-runtime" in d.getVar('P') or \
1283 "--disable-nls" in cnf or skip_configure_gettext):
1284 ml = d.getVar("MLPREFIX") or ""
1285 if bb.data.inherits_class('cross-canadian', d):
1286 gt = "nativesdk-gettext"
1287 else:
1288 gt = "gettext-native"
1289 deps = bb.utils.explode_deps(d.getVar('DEPENDS') or "")
1290 if gt not in deps:
1291 for config in configs:
1292 gnu = "grep \"^[[:space:]]*AM_GNU_GETTEXT\" %s >/dev/null" % config
1293 if subprocess.call(gnu, shell=True) == 0:
1294 error_msg = "AM_GNU_GETTEXT used but no inherit gettext"
1295 oe.qa.handle_error("configure-gettext", error_msg, d)
1296
1297 ###########################################################################
1298 # Check unrecognised configure options (with a white list)
1299 ###########################################################################
1300 if bb.data.inherits_class("autotools", d):
1301 bb.note("Checking configure output for unrecognised options")
1302 try:
1303 if bb.data.inherits_class("autotools", d):
1304 flag = "WARNING: unrecognized options:"
1305 log = os.path.join(d.getVar('B'), 'config.log')
1306 output = subprocess.check_output(['grep', '-F', flag, log]).decode("utf-8").replace(', ', ' ').replace('"', '')
1307 options = set()
1308 for line in output.splitlines():
1309 options |= set(line.partition(flag)[2].split())
1310 ignore_opts = set(d.getVar("UNKNOWN_CONFIGURE_OPT_IGNORE").split())
1311 options -= ignore_opts
1312 if options:
1313 pn = d.getVar('PN')
1314 error_msg = pn + ": configure was passed unrecognised options: " + " ".join(options)
1315 oe.qa.handle_error("unknown-configure-option", error_msg, d)
1316 except subprocess.CalledProcessError:
1317 pass
1318
1319 # Check invalid PACKAGECONFIG
1320 pkgconfig = (d.getVar("PACKAGECONFIG") or "").split()
1321 if pkgconfig:
1322 pkgconfigflags = d.getVarFlags("PACKAGECONFIG") or {}
1323 for pconfig in pkgconfig:
1324 if pconfig not in pkgconfigflags:
1325 pn = d.getVar('PN')
1326 error_msg = "%s: invalid PACKAGECONFIG: %s" % (pn, pconfig)
1327 oe.qa.handle_error("invalid-packageconfig", error_msg, d)
1328
1329 oe.qa.exit_if_errors(d)
1330}
1331
1332def unpack_check_src_uri(pn, d):
1333 import re
1334
1335 skip = (d.getVar('INSANE_SKIP') or "").split()
1336 if 'src-uri-bad' in skip:
1337 bb.note("Recipe %s skipping qa checking: src-uri-bad" % d.getVar('PN'))
1338 return
1339
1340 if "${PN}" in d.getVar("SRC_URI", False):
1341 oe.qa.handle_error("src-uri-bad", "%s: SRC_URI uses PN not BPN" % pn, d)
1342
1343 for url in d.getVar("SRC_URI").split():
1344 # Search for github and gitlab URLs that pull unstable archives (comment for future greppers)
1345 if re.search(r"git(hu|la)b\.com/.+/.+/archive/.+", url):
1346 oe.qa.handle_error("src-uri-bad", "%s: SRC_URI uses unstable GitHub/GitLab archives, convert recipe to use git protocol" % pn, d)
1347
1348python do_qa_unpack() {
1349 src_uri = d.getVar('SRC_URI')
1350 s_dir = d.getVar('S')
1351 if src_uri and not os.path.exists(s_dir):
1352 bb.warn('%s: the directory %s (%s) pointed to by the S variable doesn\'t exist - please set S within the recipe to point to where the source has been unpacked to' % (d.getVar('PN'), d.getVar('S', False), s_dir))
1353
1354 unpack_check_src_uri(d.getVar('PN'), d)
1355}
1356
1357# Check for patch fuzz
1358do_patch[postfuncs] += "do_qa_patch "
1359
1360# Check broken config.log files, for packages requiring Gettext which
1361# don't have it in DEPENDS.
1362#addtask qa_configure after do_configure before do_compile
1363do_configure[postfuncs] += "do_qa_configure "
1364
1365# Check does S exist.
1366do_unpack[postfuncs] += "do_qa_unpack"
1367
1368python () {
1369 import re
1370
1371 tests = d.getVar('ALL_QA').split()
1372 if "desktop" in tests:
1373 d.appendVar("PACKAGE_DEPENDS", " desktop-file-utils-native")
1374
1375 ###########################################################################
1376 # Check various variables
1377 ###########################################################################
1378
1379 # Checking ${FILESEXTRAPATHS}
1380 extrapaths = (d.getVar("FILESEXTRAPATHS") or "")
1381 if '__default' not in extrapaths.split(":"):
1382 msg = "FILESEXTRAPATHS-variable, must always use :prepend (or :append)\n"
1383 msg += "type of assignment, and don't forget the colon.\n"
1384 msg += "Please assign it with the format of:\n"
1385 msg += " FILESEXTRAPATHS:append := \":${THISDIR}/Your_Files_Path\" or\n"
1386 msg += " FILESEXTRAPATHS:prepend := \"${THISDIR}/Your_Files_Path:\"\n"
1387 msg += "in your bbappend file\n\n"
1388 msg += "Your incorrect assignment is:\n"
1389 msg += "%s\n" % extrapaths
1390 bb.warn(msg)
1391
1392 overrides = d.getVar('OVERRIDES').split(':')
1393 pn = d.getVar('PN')
1394 if pn in overrides:
1395 msg = 'Recipe %s has PN of "%s" which is in OVERRIDES, this can result in unexpected behaviour.' % (d.getVar("FILE"), pn)
1396 oe.qa.handle_error("pn-overrides", msg, d)
1397 prog = re.compile(r'[A-Z]')
1398 if prog.search(pn):
1399 oe.qa.handle_error("uppercase-pn", 'PN: %s is upper case, this can result in unexpected behavior.' % pn, d)
1400
1401 # Some people mistakenly use DEPENDS:${PN} instead of DEPENDS and wonder
1402 # why it doesn't work.
1403 if (d.getVar(d.expand('DEPENDS:${PN}'))):
1404 oe.qa.handle_error("pkgvarcheck", "recipe uses DEPENDS:${PN}, should use DEPENDS", d)
1405
1406 issues = []
1407 if (d.getVar('PACKAGES') or "").split():
1408 for dep in (d.getVar('QADEPENDS') or "").split():
1409 d.appendVarFlag('do_package_qa', 'depends', " %s:do_populate_sysroot" % dep)
1410 for var in 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RCONFLICTS', 'RPROVIDES', 'RREPLACES', 'FILES', 'pkg_preinst', 'pkg_postinst', 'pkg_prerm', 'pkg_postrm', 'ALLOW_EMPTY':
1411 if d.getVar(var, False):
1412 issues.append(var)
1413
1414 fakeroot_tests = d.getVar('FAKEROOT_QA').split()
1415 if set(tests) & set(fakeroot_tests):
1416 d.setVarFlag('do_package_qa', 'fakeroot', '1')
1417 d.appendVarFlag('do_package_qa', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
1418 else:
1419 d.setVarFlag('do_package_qa', 'rdeptask', '')
1420 for i in issues:
1421 oe.qa.handle_error("pkgvarcheck", "%s: Variable %s is set as not being package specific, please fix this." % (d.getVar("FILE"), i), d)
1422
1423 if 'native-last' not in (d.getVar('INSANE_SKIP') or "").split():
1424 for native_class in ['native', 'nativesdk']:
1425 if bb.data.inherits_class(native_class, d):
1426
1427 inherited_classes = d.getVar('__inherit_cache', False) or []
1428 needle = "/" + native_class
1429
1430 bbclassextend = (d.getVar('BBCLASSEXTEND') or '').split()
1431 # BBCLASSEXTEND items are always added in the end
1432 skip_classes = bbclassextend
1433 if bb.data.inherits_class('native', d) or 'native' in bbclassextend:
1434 # native also inherits nopackages and relocatable bbclasses
1435 skip_classes.extend(['nopackages', 'relocatable'])
1436
1437 broken_order = []
1438 for class_item in reversed(inherited_classes):
1439 if needle not in class_item:
1440 for extend_item in skip_classes:
1441 if '/%s.bbclass' % extend_item in class_item:
1442 break
1443 else:
1444 pn = d.getVar('PN')
1445 broken_order.append(os.path.basename(class_item))
1446 else:
1447 break
1448 if broken_order:
1449 oe.qa.handle_error("native-last", "%s: native/nativesdk class is not inherited last, this can result in unexpected behaviour. "
1450 "Classes inherited after native/nativesdk: %s" % (pn, " ".join(broken_order)), d)
1451
1452 oe.qa.exit_if_errors(d)
1453}
diff --git a/meta/classes/kernel-arch.bbclass b/meta/classes/kernel-arch.bbclass
deleted file mode 100644
index 6f5d3bde6c..0000000000
--- a/meta/classes/kernel-arch.bbclass
+++ /dev/null
@@ -1,74 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7#
8# set the ARCH environment variable for kernel compilation (including
9# modules). return value must match one of the architecture directories
10# in the kernel source "arch" directory
11#
12
13valid_archs = "alpha cris ia64 \
14 i386 x86 \
15 m68knommu m68k ppc powerpc powerpc64 ppc64 \
16 sparc sparc64 \
17 arm aarch64 \
18 m32r mips \
19 sh sh64 um h8300 \
20 parisc s390 v850 \
21 avr32 blackfin \
22 microblaze \
23 nios2 arc riscv xtensa"
24
25def map_kernel_arch(a, d):
26 import re
27
28 valid_archs = d.getVar('valid_archs').split()
29
30 if re.match('(i.86|athlon|x86.64)$', a): return 'x86'
31 elif re.match('arceb$', a): return 'arc'
32 elif re.match('armeb$', a): return 'arm'
33 elif re.match('aarch64$', a): return 'arm64'
34 elif re.match('aarch64_be$', a): return 'arm64'
35 elif re.match('aarch64_ilp32$', a): return 'arm64'
36 elif re.match('aarch64_be_ilp32$', a): return 'arm64'
37 elif re.match('mips(isa|)(32|64|)(r6|)(el|)$', a): return 'mips'
38 elif re.match('mcf', a): return 'm68k'
39 elif re.match('riscv(32|64|)(eb|)$', a): return 'riscv'
40 elif re.match('p(pc|owerpc)(|64)', a): return 'powerpc'
41 elif re.match('sh(3|4)$', a): return 'sh'
42 elif re.match('bfin', a): return 'blackfin'
43 elif re.match('microblazee[bl]', a): return 'microblaze'
44 elif a in valid_archs: return a
45 else:
46 if not d.getVar("TARGET_OS").startswith("linux"):
47 return a
48 bb.error("cannot map '%s' to a linux kernel architecture" % a)
49
50export ARCH = "${@map_kernel_arch(d.getVar('TARGET_ARCH'), d)}"
51
52def map_uboot_arch(a, d):
53 import re
54
55 if re.match('p(pc|owerpc)(|64)', a): return 'ppc'
56 elif re.match('i.86$', a): return 'x86'
57 return a
58
59export UBOOT_ARCH = "${@map_uboot_arch(d.getVar('ARCH'), d)}"
60
61# Set TARGET_??_KERNEL_ARCH in the machine .conf to set architecture
62# specific options necessary for building the kernel and modules.
63TARGET_CC_KERNEL_ARCH ?= ""
64HOST_CC_KERNEL_ARCH ?= "${TARGET_CC_KERNEL_ARCH}"
65TARGET_LD_KERNEL_ARCH ?= ""
66HOST_LD_KERNEL_ARCH ?= "${TARGET_LD_KERNEL_ARCH}"
67TARGET_AR_KERNEL_ARCH ?= ""
68HOST_AR_KERNEL_ARCH ?= "${TARGET_AR_KERNEL_ARCH}"
69
70KERNEL_CC = "${CCACHE}${HOST_PREFIX}gcc ${HOST_CC_KERNEL_ARCH} -fuse-ld=bfd ${DEBUG_PREFIX_MAP} -fdebug-prefix-map=${STAGING_KERNEL_DIR}=${KERNEL_SRC_PATH} -fdebug-prefix-map=${STAGING_KERNEL_BUILDDIR}=${KERNEL_SRC_PATH}"
71KERNEL_LD = "${CCACHE}${HOST_PREFIX}ld.bfd ${HOST_LD_KERNEL_ARCH}"
72KERNEL_AR = "${CCACHE}${HOST_PREFIX}ar ${HOST_AR_KERNEL_ARCH}"
73TOOLCHAIN = "gcc"
74
diff --git a/meta/classes/kernel-artifact-names.bbclass b/meta/classes/kernel-artifact-names.bbclass
deleted file mode 100644
index 311075c68d..0000000000
--- a/meta/classes/kernel-artifact-names.bbclass
+++ /dev/null
@@ -1,37 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7##################################################################
8# Specific kernel creation info
9# for recipes/bbclasses which need to reuse some of the kernel
10# artifacts, but aren't kernel recipes themselves
11##################################################################
12
13inherit image-artifact-names
14
15KERNEL_ARTIFACT_NAME ?= "${PKGE}-${PKGV}-${PKGR}-${MACHINE}${IMAGE_VERSION_SUFFIX}"
16KERNEL_ARTIFACT_LINK_NAME ?= "${MACHINE}"
17KERNEL_ARTIFACT_BIN_EXT ?= ".bin"
18
19KERNEL_IMAGE_NAME ?= "${KERNEL_ARTIFACT_NAME}"
20KERNEL_IMAGE_LINK_NAME ?= "${KERNEL_ARTIFACT_LINK_NAME}"
21KERNEL_IMAGE_BIN_EXT ?= "${KERNEL_ARTIFACT_BIN_EXT}"
22KERNEL_IMAGETYPE_SYMLINK ?= "1"
23
24KERNEL_DTB_NAME ?= "${KERNEL_ARTIFACT_NAME}"
25KERNEL_DTB_LINK_NAME ?= "${KERNEL_ARTIFACT_LINK_NAME}"
26KERNEL_DTB_BIN_EXT ?= "${KERNEL_ARTIFACT_BIN_EXT}"
27
28KERNEL_FIT_NAME ?= "${KERNEL_ARTIFACT_NAME}"
29KERNEL_FIT_LINK_NAME ?= "${KERNEL_ARTIFACT_LINK_NAME}"
30KERNEL_FIT_BIN_EXT ?= "${KERNEL_ARTIFACT_BIN_EXT}"
31
32MODULE_TARBALL_NAME ?= "${KERNEL_ARTIFACT_NAME}"
33MODULE_TARBALL_LINK_NAME ?= "${KERNEL_ARTIFACT_LINK_NAME}"
34MODULE_TARBALL_DEPLOY ?= "1"
35
36INITRAMFS_NAME ?= "initramfs-${KERNEL_ARTIFACT_NAME}"
37INITRAMFS_LINK_NAME ?= "initramfs-${KERNEL_ARTIFACT_LINK_NAME}"
diff --git a/meta/classes/kernel-devicetree.bbclass b/meta/classes/kernel-devicetree.bbclass
deleted file mode 100644
index b2117de805..0000000000
--- a/meta/classes/kernel-devicetree.bbclass
+++ /dev/null
@@ -1,119 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# Support for device tree generation
8python () {
9 if not bb.data.inherits_class('nopackages', d):
10 d.appendVar("PACKAGES", " ${KERNEL_PACKAGE_NAME}-devicetree")
11 if d.getVar('KERNEL_DEVICETREE_BUNDLE') == '1':
12 d.appendVar("PACKAGES", " ${KERNEL_PACKAGE_NAME}-image-zimage-bundle")
13}
14
15FILES:${KERNEL_PACKAGE_NAME}-devicetree = "/${KERNEL_IMAGEDEST}/*.dtb /${KERNEL_IMAGEDEST}/*.dtbo"
16FILES:${KERNEL_PACKAGE_NAME}-image-zimage-bundle = "/${KERNEL_IMAGEDEST}/zImage-*.dtb.bin"
17
18# Generate kernel+devicetree bundle
19KERNEL_DEVICETREE_BUNDLE ?= "0"
20
21# dtc flags passed via DTC_FLAGS env variable
22KERNEL_DTC_FLAGS ?= ""
23
24normalize_dtb () {
25 dtb="$1"
26 if echo $dtb | grep -q '/dts/'; then
27 bbwarn "$dtb contains the full path to the the dts file, but only the dtb name should be used."
28 dtb=`basename $dtb | sed 's,\.dts$,.dtb,g'`
29 fi
30 echo "$dtb"
31}
32
33get_real_dtb_path_in_kernel () {
34 dtb="$1"
35 dtb_path="${B}/arch/${ARCH}/boot/dts/$dtb"
36 if [ ! -e "$dtb_path" ]; then
37 dtb_path="${B}/arch/${ARCH}/boot/$dtb"
38 fi
39 echo "$dtb_path"
40}
41
42do_configure:append() {
43 if [ "${KERNEL_DEVICETREE_BUNDLE}" = "1" ]; then
44 if echo ${KERNEL_IMAGETYPE_FOR_MAKE} | grep -q 'zImage'; then
45 case "${ARCH}" in
46 "arm")
47 config="${B}/.config"
48 if ! grep -q 'CONFIG_ARM_APPENDED_DTB=y' $config; then
49 bbwarn 'CONFIG_ARM_APPENDED_DTB is NOT enabled in the kernel. Enabling it to allow the kernel to boot with the Device Tree appended!'
50 sed -i "/CONFIG_ARM_APPENDED_DTB[ =]/d" $config
51 echo "CONFIG_ARM_APPENDED_DTB=y" >> $config
52 echo "# CONFIG_ARM_ATAG_DTB_COMPAT is not set" >> $config
53 fi
54 ;;
55 *)
56 bberror "KERNEL_DEVICETREE_BUNDLE is not supported for ${ARCH}. Currently it is only supported for 'ARM'."
57 esac
58 else
59 bberror 'The KERNEL_DEVICETREE_BUNDLE requires the KERNEL_IMAGETYPE to contain zImage.'
60 fi
61 fi
62}
63
64do_compile:append() {
65 if [ -n "${KERNEL_DTC_FLAGS}" ]; then
66 export DTC_FLAGS="${KERNEL_DTC_FLAGS}"
67 fi
68
69 for dtbf in ${KERNEL_DEVICETREE}; do
70 dtb=`normalize_dtb "$dtbf"`
71 oe_runmake $dtb CC="${KERNEL_CC} $cc_extra " LD="${KERNEL_LD}" ${KERNEL_EXTRA_ARGS}
72 done
73}
74
75do_install:append() {
76 for dtbf in ${KERNEL_DEVICETREE}; do
77 dtb=`normalize_dtb "$dtbf"`
78 dtb_ext=${dtb##*.}
79 dtb_base_name=`basename $dtb .$dtb_ext`
80 dtb_path=`get_real_dtb_path_in_kernel "$dtb"`
81 install -m 0644 $dtb_path ${D}/${KERNEL_IMAGEDEST}/$dtb_base_name.$dtb_ext
82 done
83}
84
85do_deploy:append() {
86 for dtbf in ${KERNEL_DEVICETREE}; do
87 dtb=`normalize_dtb "$dtbf"`
88 dtb_ext=${dtb##*.}
89 dtb_base_name=`basename $dtb .$dtb_ext`
90 install -d $deployDir
91 install -m 0644 ${D}/${KERNEL_IMAGEDEST}/$dtb_base_name.$dtb_ext $deployDir/$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext
92 if [ "${KERNEL_IMAGETYPE_SYMLINK}" = "1" ] ; then
93 ln -sf $dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext $deployDir/$dtb_base_name.$dtb_ext
94 fi
95 if [ -n "${KERNEL_DTB_LINK_NAME}" ] ; then
96 ln -sf $dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext $deployDir/$dtb_base_name-${KERNEL_DTB_LINK_NAME}.$dtb_ext
97 fi
98 for type in ${KERNEL_IMAGETYPE_FOR_MAKE}; do
99 if [ "$type" = "zImage" ] && [ "${KERNEL_DEVICETREE_BUNDLE}" = "1" ]; then
100 cat ${D}/${KERNEL_IMAGEDEST}/$type \
101 $deployDir/$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext \
102 > $deployDir/$type-$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext${KERNEL_DTB_BIN_EXT}
103 if [ -n "${KERNEL_DTB_LINK_NAME}" ]; then
104 ln -sf $type-$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext${KERNEL_DTB_BIN_EXT} \
105 $deployDir/$type-$dtb_base_name-${KERNEL_DTB_LINK_NAME}.$dtb_ext${KERNEL_DTB_BIN_EXT}
106 fi
107 if [ -e "${KERNEL_OUTPUT_DIR}/${type}.initramfs" ]; then
108 cat ${KERNEL_OUTPUT_DIR}/${type}.initramfs \
109 $deployDir/$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext \
110 > $deployDir/${type}-${INITRAMFS_NAME}-$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext${KERNEL_DTB_BIN_EXT}
111 if [ -n "${KERNEL_DTB_LINK_NAME}" ]; then
112 ln -sf ${type}-${INITRAMFS_NAME}-$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext${KERNEL_DTB_BIN_EXT} \
113 $deployDir/${type}-${INITRAMFS_NAME}-$dtb_base_name-${KERNEL_DTB_LINK_NAME}.$dtb_ext${KERNEL_DTB_BIN_EXT}
114 fi
115 fi
116 fi
117 done
118 done
119}
diff --git a/meta/classes/kernel-fitimage.bbclass b/meta/classes/kernel-fitimage.bbclass
deleted file mode 100644
index 838ce204cb..0000000000
--- a/meta/classes/kernel-fitimage.bbclass
+++ /dev/null
@@ -1,803 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7inherit kernel-uboot kernel-artifact-names uboot-sign
8
9def get_fit_replacement_type(d):
10 kerneltypes = d.getVar('KERNEL_IMAGETYPES') or ""
11 replacementtype = ""
12 if 'fitImage' in kerneltypes.split():
13 uarch = d.getVar("UBOOT_ARCH")
14 if uarch == "arm64":
15 replacementtype = "Image"
16 elif uarch == "riscv":
17 replacementtype = "Image"
18 elif uarch == "mips":
19 replacementtype = "vmlinuz.bin"
20 elif uarch == "x86":
21 replacementtype = "bzImage"
22 elif uarch == "microblaze":
23 replacementtype = "linux.bin"
24 else:
25 replacementtype = "zImage"
26 return replacementtype
27
28KERNEL_IMAGETYPE_REPLACEMENT ?= "${@get_fit_replacement_type(d)}"
29DEPENDS:append = " ${@'u-boot-tools-native dtc-native' if 'fitImage' in (d.getVar('KERNEL_IMAGETYPES') or '').split() else ''}"
30
31python __anonymous () {
32 # Override KERNEL_IMAGETYPE_FOR_MAKE variable, which is internal
33 # to kernel.bbclass . We have to override it, since we pack zImage
34 # (at least for now) into the fitImage .
35 typeformake = d.getVar("KERNEL_IMAGETYPE_FOR_MAKE") or ""
36 if 'fitImage' in typeformake.split():
37 d.setVar('KERNEL_IMAGETYPE_FOR_MAKE', typeformake.replace('fitImage', d.getVar('KERNEL_IMAGETYPE_REPLACEMENT')))
38
39 image = d.getVar('INITRAMFS_IMAGE')
40 if image:
41 d.appendVarFlag('do_assemble_fitimage_initramfs', 'depends', ' ${INITRAMFS_IMAGE}:do_image_complete')
42
43 ubootenv = d.getVar('UBOOT_ENV')
44 if ubootenv:
45 d.appendVarFlag('do_assemble_fitimage', 'depends', ' virtual/bootloader:do_populate_sysroot')
46
47 #check if there are any dtb providers
48 providerdtb = d.getVar("PREFERRED_PROVIDER_virtual/dtb")
49 if providerdtb:
50 d.appendVarFlag('do_assemble_fitimage', 'depends', ' virtual/dtb:do_populate_sysroot')
51 d.appendVarFlag('do_assemble_fitimage_initramfs', 'depends', ' virtual/dtb:do_populate_sysroot')
52 d.setVar('EXTERNAL_KERNEL_DEVICETREE', "${RECIPE_SYSROOT}/boot/devicetree")
53
54 # Verified boot will sign the fitImage and append the public key to
55 # U-Boot dtb. We ensure the U-Boot dtb is deployed before assembling
56 # the fitImage:
57 if d.getVar('UBOOT_SIGN_ENABLE') == "1" and d.getVar('UBOOT_DTB_BINARY'):
58 uboot_pn = d.getVar('PREFERRED_PROVIDER_u-boot') or 'u-boot'
59 d.appendVarFlag('do_assemble_fitimage', 'depends', ' %s:do_populate_sysroot' % uboot_pn)
60 if d.getVar('INITRAMFS_IMAGE_BUNDLE') == "1":
61 d.appendVarFlag('do_assemble_fitimage_initramfs', 'depends', ' %s:do_populate_sysroot' % uboot_pn)
62}
63
64
65# Description string
66FIT_DESC ?= "Kernel fitImage for ${DISTRO_NAME}/${PV}/${MACHINE}"
67
68# Sign individual images as well
69FIT_SIGN_INDIVIDUAL ?= "0"
70
71FIT_CONF_PREFIX ?= "conf-"
72FIT_CONF_PREFIX[doc] = "Prefix to use for FIT configuration node name"
73
74FIT_SUPPORTED_INITRAMFS_FSTYPES ?= "cpio.lz4 cpio.lzo cpio.lzma cpio.xz cpio.zst cpio.gz ext2.gz cpio"
75
76# Keys used to sign individually image nodes.
77# The keys to sign image nodes must be different from those used to sign
78# configuration nodes, otherwise the "required" property, from
79# UBOOT_DTB_BINARY, will be set to "conf", because "conf" prevails on "image".
80# Then the images signature checking will not be mandatory and no error will be
81# raised in case of failure.
82# UBOOT_SIGN_IMG_KEYNAME = "dev2" # keys name in keydir (eg. "dev2.crt", "dev2.key")
83
84#
85# Emit the fitImage ITS header
86#
87# $1 ... .its filename
88fitimage_emit_fit_header() {
89 cat << EOF >> $1
90/dts-v1/;
91
92/ {
93 description = "${FIT_DESC}";
94 #address-cells = <1>;
95EOF
96}
97
98#
99# Emit the fitImage section bits
100#
101# $1 ... .its filename
102# $2 ... Section bit type: imagestart - image section start
103# confstart - configuration section start
104# sectend - section end
105# fitend - fitimage end
106#
107fitimage_emit_section_maint() {
108 case $2 in
109 imagestart)
110 cat << EOF >> $1
111
112 images {
113EOF
114 ;;
115 confstart)
116 cat << EOF >> $1
117
118 configurations {
119EOF
120 ;;
121 sectend)
122 cat << EOF >> $1
123 };
124EOF
125 ;;
126 fitend)
127 cat << EOF >> $1
128};
129EOF
130 ;;
131 esac
132}
133
134#
135# Emit the fitImage ITS kernel section
136#
137# $1 ... .its filename
138# $2 ... Image counter
139# $3 ... Path to kernel image
140# $4 ... Compression type
141fitimage_emit_section_kernel() {
142
143 kernel_csum="${FIT_HASH_ALG}"
144 kernel_sign_algo="${FIT_SIGN_ALG}"
145 kernel_sign_keyname="${UBOOT_SIGN_IMG_KEYNAME}"
146
147 ENTRYPOINT="${UBOOT_ENTRYPOINT}"
148 if [ -n "${UBOOT_ENTRYSYMBOL}" ]; then
149 ENTRYPOINT=`${HOST_PREFIX}nm vmlinux | \
150 awk '$3=="${UBOOT_ENTRYSYMBOL}" {print "0x"$1;exit}'`
151 fi
152
153 cat << EOF >> $1
154 kernel-$2 {
155 description = "Linux kernel";
156 data = /incbin/("$3");
157 type = "${UBOOT_MKIMAGE_KERNEL_TYPE}";
158 arch = "${UBOOT_ARCH}";
159 os = "linux";
160 compression = "$4";
161 load = <${UBOOT_LOADADDRESS}>;
162 entry = <$ENTRYPOINT>;
163 hash-1 {
164 algo = "$kernel_csum";
165 };
166 };
167EOF
168
169 if [ "${UBOOT_SIGN_ENABLE}" = "1" -a "${FIT_SIGN_INDIVIDUAL}" = "1" -a -n "$kernel_sign_keyname" ] ; then
170 sed -i '$ d' $1
171 cat << EOF >> $1
172 signature-1 {
173 algo = "$kernel_csum,$kernel_sign_algo";
174 key-name-hint = "$kernel_sign_keyname";
175 };
176 };
177EOF
178 fi
179}
180
181#
182# Emit the fitImage ITS DTB section
183#
184# $1 ... .its filename
185# $2 ... Image counter
186# $3 ... Path to DTB image
187fitimage_emit_section_dtb() {
188
189 dtb_csum="${FIT_HASH_ALG}"
190 dtb_sign_algo="${FIT_SIGN_ALG}"
191 dtb_sign_keyname="${UBOOT_SIGN_IMG_KEYNAME}"
192
193 dtb_loadline=""
194 dtb_ext=${DTB##*.}
195 if [ "${dtb_ext}" = "dtbo" ]; then
196 if [ -n "${UBOOT_DTBO_LOADADDRESS}" ]; then
197 dtb_loadline="load = <${UBOOT_DTBO_LOADADDRESS}>;"
198 fi
199 elif [ -n "${UBOOT_DTB_LOADADDRESS}" ]; then
200 dtb_loadline="load = <${UBOOT_DTB_LOADADDRESS}>;"
201 fi
202 cat << EOF >> $1
203 fdt-$2 {
204 description = "Flattened Device Tree blob";
205 data = /incbin/("$3");
206 type = "flat_dt";
207 arch = "${UBOOT_ARCH}";
208 compression = "none";
209 $dtb_loadline
210 hash-1 {
211 algo = "$dtb_csum";
212 };
213 };
214EOF
215
216 if [ "${UBOOT_SIGN_ENABLE}" = "1" -a "${FIT_SIGN_INDIVIDUAL}" = "1" -a -n "$dtb_sign_keyname" ] ; then
217 sed -i '$ d' $1
218 cat << EOF >> $1
219 signature-1 {
220 algo = "$dtb_csum,$dtb_sign_algo";
221 key-name-hint = "$dtb_sign_keyname";
222 };
223 };
224EOF
225 fi
226}
227
228#
229# Emit the fitImage ITS u-boot script section
230#
231# $1 ... .its filename
232# $2 ... Image counter
233# $3 ... Path to boot script image
234fitimage_emit_section_boot_script() {
235
236 bootscr_csum="${FIT_HASH_ALG}"
237 bootscr_sign_algo="${FIT_SIGN_ALG}"
238 bootscr_sign_keyname="${UBOOT_SIGN_IMG_KEYNAME}"
239
240 cat << EOF >> $1
241 bootscr-$2 {
242 description = "U-boot script";
243 data = /incbin/("$3");
244 type = "script";
245 arch = "${UBOOT_ARCH}";
246 compression = "none";
247 hash-1 {
248 algo = "$bootscr_csum";
249 };
250 };
251EOF
252
253 if [ "${UBOOT_SIGN_ENABLE}" = "1" -a "${FIT_SIGN_INDIVIDUAL}" = "1" -a -n "$bootscr_sign_keyname" ] ; then
254 sed -i '$ d' $1
255 cat << EOF >> $1
256 signature-1 {
257 algo = "$bootscr_csum,$bootscr_sign_algo";
258 key-name-hint = "$bootscr_sign_keyname";
259 };
260 };
261EOF
262 fi
263}
264
265#
266# Emit the fitImage ITS setup section
267#
268# $1 ... .its filename
269# $2 ... Image counter
270# $3 ... Path to setup image
271fitimage_emit_section_setup() {
272
273 setup_csum="${FIT_HASH_ALG}"
274
275 cat << EOF >> $1
276 setup-$2 {
277 description = "Linux setup.bin";
278 data = /incbin/("$3");
279 type = "x86_setup";
280 arch = "${UBOOT_ARCH}";
281 os = "linux";
282 compression = "none";
283 load = <0x00090000>;
284 entry = <0x00090000>;
285 hash-1 {
286 algo = "$setup_csum";
287 };
288 };
289EOF
290}
291
292#
293# Emit the fitImage ITS ramdisk section
294#
295# $1 ... .its filename
296# $2 ... Image counter
297# $3 ... Path to ramdisk image
298fitimage_emit_section_ramdisk() {
299
300 ramdisk_csum="${FIT_HASH_ALG}"
301 ramdisk_sign_algo="${FIT_SIGN_ALG}"
302 ramdisk_sign_keyname="${UBOOT_SIGN_IMG_KEYNAME}"
303 ramdisk_loadline=""
304 ramdisk_entryline=""
305
306 if [ -n "${UBOOT_RD_LOADADDRESS}" ]; then
307 ramdisk_loadline="load = <${UBOOT_RD_LOADADDRESS}>;"
308 fi
309 if [ -n "${UBOOT_RD_ENTRYPOINT}" ]; then
310 ramdisk_entryline="entry = <${UBOOT_RD_ENTRYPOINT}>;"
311 fi
312
313 cat << EOF >> $1
314 ramdisk-$2 {
315 description = "${INITRAMFS_IMAGE}";
316 data = /incbin/("$3");
317 type = "ramdisk";
318 arch = "${UBOOT_ARCH}";
319 os = "linux";
320 compression = "none";
321 $ramdisk_loadline
322 $ramdisk_entryline
323 hash-1 {
324 algo = "$ramdisk_csum";
325 };
326 };
327EOF
328
329 if [ "${UBOOT_SIGN_ENABLE}" = "1" -a "${FIT_SIGN_INDIVIDUAL}" = "1" -a -n "$ramdisk_sign_keyname" ] ; then
330 sed -i '$ d' $1
331 cat << EOF >> $1
332 signature-1 {
333 algo = "$ramdisk_csum,$ramdisk_sign_algo";
334 key-name-hint = "$ramdisk_sign_keyname";
335 };
336 };
337EOF
338 fi
339}
340
341#
342# Emit the fitImage ITS configuration section
343#
344# $1 ... .its filename
345# $2 ... Linux kernel ID
346# $3 ... DTB image name
347# $4 ... ramdisk ID
348# $5 ... u-boot script ID
349# $6 ... config ID
350# $7 ... default flag
351fitimage_emit_section_config() {
352
353 conf_csum="${FIT_HASH_ALG}"
354 conf_sign_algo="${FIT_SIGN_ALG}"
355 conf_padding_algo="${FIT_PAD_ALG}"
356 if [ "${UBOOT_SIGN_ENABLE}" = "1" ] ; then
357 conf_sign_keyname="${UBOOT_SIGN_KEYNAME}"
358 fi
359
360 its_file="$1"
361 kernel_id="$2"
362 dtb_image="$3"
363 ramdisk_id="$4"
364 bootscr_id="$5"
365 config_id="$6"
366 default_flag="$7"
367
368 # Test if we have any DTBs at all
369 sep=""
370 conf_desc=""
371 conf_node="${FIT_CONF_PREFIX}"
372 kernel_line=""
373 fdt_line=""
374 ramdisk_line=""
375 bootscr_line=""
376 setup_line=""
377 default_line=""
378
379 # conf node name is selected based on dtb ID if it is present,
380 # otherwise its selected based on kernel ID
381 if [ -n "$dtb_image" ]; then
382 conf_node=$conf_node$dtb_image
383 else
384 conf_node=$conf_node$kernel_id
385 fi
386
387 if [ -n "$kernel_id" ]; then
388 conf_desc="Linux kernel"
389 sep=", "
390 kernel_line="kernel = \"kernel-$kernel_id\";"
391 fi
392
393 if [ -n "$dtb_image" ]; then
394 conf_desc="$conf_desc${sep}FDT blob"
395 sep=", "
396 fdt_line="fdt = \"fdt-$dtb_image\";"
397 fi
398
399 if [ -n "$ramdisk_id" ]; then
400 conf_desc="$conf_desc${sep}ramdisk"
401 sep=", "
402 ramdisk_line="ramdisk = \"ramdisk-$ramdisk_id\";"
403 fi
404
405 if [ -n "$bootscr_id" ]; then
406 conf_desc="$conf_desc${sep}u-boot script"
407 sep=", "
408 bootscr_line="bootscr = \"bootscr-$bootscr_id\";"
409 fi
410
411 if [ -n "$config_id" ]; then
412 conf_desc="$conf_desc${sep}setup"
413 setup_line="setup = \"setup-$config_id\";"
414 fi
415
416 if [ "$default_flag" = "1" ]; then
417 # default node is selected based on dtb ID if it is present,
418 # otherwise its selected based on kernel ID
419 if [ -n "$dtb_image" ]; then
420 default_line="default = \"${FIT_CONF_PREFIX}$dtb_image\";"
421 else
422 default_line="default = \"${FIT_CONF_PREFIX}$kernel_id\";"
423 fi
424 fi
425
426 cat << EOF >> $its_file
427 $default_line
428 $conf_node {
429 description = "$default_flag $conf_desc";
430 $kernel_line
431 $fdt_line
432 $ramdisk_line
433 $bootscr_line
434 $setup_line
435 hash-1 {
436 algo = "$conf_csum";
437 };
438EOF
439
440 if [ -n "$conf_sign_keyname" ] ; then
441
442 sign_line="sign-images = "
443 sep=""
444
445 if [ -n "$kernel_id" ]; then
446 sign_line="$sign_line${sep}\"kernel\""
447 sep=", "
448 fi
449
450 if [ -n "$dtb_image" ]; then
451 sign_line="$sign_line${sep}\"fdt\""
452 sep=", "
453 fi
454
455 if [ -n "$ramdisk_id" ]; then
456 sign_line="$sign_line${sep}\"ramdisk\""
457 sep=", "
458 fi
459
460 if [ -n "$bootscr_id" ]; then
461 sign_line="$sign_line${sep}\"bootscr\""
462 sep=", "
463 fi
464
465 if [ -n "$config_id" ]; then
466 sign_line="$sign_line${sep}\"setup\""
467 fi
468
469 sign_line="$sign_line;"
470
471 cat << EOF >> $its_file
472 signature-1 {
473 algo = "$conf_csum,$conf_sign_algo";
474 key-name-hint = "$conf_sign_keyname";
475 padding = "$conf_padding_algo";
476 $sign_line
477 };
478EOF
479 fi
480
481 cat << EOF >> $its_file
482 };
483EOF
484}
485
486#
487# Assemble fitImage
488#
489# $1 ... .its filename
490# $2 ... fitImage name
491# $3 ... include ramdisk
492fitimage_assemble() {
493 kernelcount=1
494 dtbcount=""
495 DTBS=""
496 ramdiskcount=$3
497 setupcount=""
498 bootscr_id=""
499 rm -f $1 arch/${ARCH}/boot/$2
500
501 if [ -n "${UBOOT_SIGN_IMG_KEYNAME}" -a "${UBOOT_SIGN_KEYNAME}" = "${UBOOT_SIGN_IMG_KEYNAME}" ]; then
502 bbfatal "Keys used to sign images and configuration nodes must be different."
503 fi
504
505 fitimage_emit_fit_header $1
506
507 #
508 # Step 1: Prepare a kernel image section.
509 #
510 fitimage_emit_section_maint $1 imagestart
511
512 uboot_prep_kimage
513 fitimage_emit_section_kernel $1 $kernelcount linux.bin "$linux_comp"
514
515 #
516 # Step 2: Prepare a DTB image section
517 #
518
519 if [ -n "${KERNEL_DEVICETREE}" ]; then
520 dtbcount=1
521 for DTB in ${KERNEL_DEVICETREE}; do
522 if echo $DTB | grep -q '/dts/'; then
523 bbwarn "$DTB contains the full path to the the dts file, but only the dtb name should be used."
524 DTB=`basename $DTB | sed 's,\.dts$,.dtb,g'`
525 fi
526
527 # Skip ${DTB} if it's also provided in ${EXTERNAL_KERNEL_DEVICETREE}
528 if [ -n "${EXTERNAL_KERNEL_DEVICETREE}" ] && [ -s ${EXTERNAL_KERNEL_DEVICETREE}/${DTB} ]; then
529 continue
530 fi
531
532 DTB_PATH="arch/${ARCH}/boot/dts/$DTB"
533 if [ ! -e "$DTB_PATH" ]; then
534 DTB_PATH="arch/${ARCH}/boot/$DTB"
535 fi
536
537 DTB=$(echo "$DTB" | tr '/' '_')
538 DTBS="$DTBS $DTB"
539 fitimage_emit_section_dtb $1 $DTB $DTB_PATH
540 done
541 fi
542
543 if [ -n "${EXTERNAL_KERNEL_DEVICETREE}" ]; then
544 dtbcount=1
545 for DTB in $(find "${EXTERNAL_KERNEL_DEVICETREE}" \( -name '*.dtb' -o -name '*.dtbo' \) -printf '%P\n' | sort); do
546 DTB=$(echo "$DTB" | tr '/' '_')
547 DTBS="$DTBS $DTB"
548 fitimage_emit_section_dtb $1 $DTB "${EXTERNAL_KERNEL_DEVICETREE}/$DTB"
549 done
550 fi
551
552 #
553 # Step 3: Prepare a u-boot script section
554 #
555
556 if [ -n "${UBOOT_ENV}" ] && [ -d "${STAGING_DIR_HOST}/boot" ]; then
557 if [ -e "${STAGING_DIR_HOST}/boot/${UBOOT_ENV_BINARY}" ]; then
558 cp ${STAGING_DIR_HOST}/boot/${UBOOT_ENV_BINARY} ${B}
559 bootscr_id="${UBOOT_ENV_BINARY}"
560 fitimage_emit_section_boot_script $1 "$bootscr_id" ${UBOOT_ENV_BINARY}
561 else
562 bbwarn "${STAGING_DIR_HOST}/boot/${UBOOT_ENV_BINARY} not found."
563 fi
564 fi
565
566 #
567 # Step 4: Prepare a setup section. (For x86)
568 #
569 if [ -e arch/${ARCH}/boot/setup.bin ]; then
570 setupcount=1
571 fitimage_emit_section_setup $1 $setupcount arch/${ARCH}/boot/setup.bin
572 fi
573
574 #
575 # Step 5: Prepare a ramdisk section.
576 #
577 if [ "x${ramdiskcount}" = "x1" ] && [ "${INITRAMFS_IMAGE_BUNDLE}" != "1" ]; then
578 # Find and use the first initramfs image archive type we find
579 found=
580 for img in ${FIT_SUPPORTED_INITRAMFS_FSTYPES}; do
581 initramfs_path="${DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE_NAME}.$img"
582 if [ -e "$initramfs_path" ]; then
583 bbnote "Found initramfs image: $initramfs_path"
584 found=true
585 fitimage_emit_section_ramdisk $1 "$ramdiskcount" "$initramfs_path"
586 break
587 else
588 bbnote "Did not find initramfs image: $initramfs_path"
589 fi
590 done
591
592 if [ -z "$found" ]; then
593 bbfatal "Could not find a valid initramfs type for ${INITRAMFS_IMAGE_NAME}, the supported types are: ${FIT_SUPPORTED_INITRAMFS_FSTYPES}"
594 fi
595 fi
596
597 fitimage_emit_section_maint $1 sectend
598
599 # Force the first Kernel and DTB in the default config
600 kernelcount=1
601 if [ -n "$dtbcount" ]; then
602 dtbcount=1
603 fi
604
605 #
606 # Step 6: Prepare a configurations section
607 #
608 fitimage_emit_section_maint $1 confstart
609
610 # kernel-fitimage.bbclass currently only supports a single kernel (no less or
611 # more) to be added to the FIT image along with 0 or more device trees and
612 # 0 or 1 ramdisk.
613 # It is also possible to include an initramfs bundle (kernel and rootfs in one binary)
614 # When the initramfs bundle is used ramdisk is disabled.
615 # If a device tree is to be part of the FIT image, then select
616 # the default configuration to be used is based on the dtbcount. If there is
617 # no dtb present than select the default configuation to be based on
618 # the kernelcount.
619 if [ -n "$DTBS" ]; then
620 i=1
621 for DTB in ${DTBS}; do
622 dtb_ext=${DTB##*.}
623 if [ "$dtb_ext" = "dtbo" ]; then
624 fitimage_emit_section_config $1 "" "$DTB" "" "$bootscr_id" "" "`expr $i = $dtbcount`"
625 else
626 fitimage_emit_section_config $1 $kernelcount "$DTB" "$ramdiskcount" "$bootscr_id" "$setupcount" "`expr $i = $dtbcount`"
627 fi
628 i=`expr $i + 1`
629 done
630 else
631 defaultconfigcount=1
632 fitimage_emit_section_config $1 $kernelcount "" "$ramdiskcount" "$bootscr_id" "$setupcount" $defaultconfigcount
633 fi
634
635 fitimage_emit_section_maint $1 sectend
636
637 fitimage_emit_section_maint $1 fitend
638
639 #
640 # Step 7: Assemble the image
641 #
642 ${UBOOT_MKIMAGE} \
643 ${@'-D "${UBOOT_MKIMAGE_DTCOPTS}"' if len('${UBOOT_MKIMAGE_DTCOPTS}') else ''} \
644 -f $1 \
645 arch/${ARCH}/boot/$2
646
647 #
648 # Step 8: Sign the image and add public key to U-Boot dtb
649 #
650 if [ "x${UBOOT_SIGN_ENABLE}" = "x1" ] ; then
651 add_key_to_u_boot=""
652 if [ -n "${UBOOT_DTB_BINARY}" ]; then
653 # The u-boot.dtb is a symlink to UBOOT_DTB_IMAGE, so we need copy
654 # both of them, and don't dereference the symlink.
655 cp -P ${STAGING_DATADIR}/u-boot*.dtb ${B}
656 add_key_to_u_boot="-K ${B}/${UBOOT_DTB_BINARY}"
657 fi
658 ${UBOOT_MKIMAGE_SIGN} \
659 ${@'-D "${UBOOT_MKIMAGE_DTCOPTS}"' if len('${UBOOT_MKIMAGE_DTCOPTS}') else ''} \
660 -F -k "${UBOOT_SIGN_KEYDIR}" \
661 $add_key_to_u_boot \
662 -r arch/${ARCH}/boot/$2 \
663 ${UBOOT_MKIMAGE_SIGN_ARGS}
664 fi
665}
666
667do_assemble_fitimage() {
668 if echo ${KERNEL_IMAGETYPES} | grep -wq "fitImage"; then
669 cd ${B}
670 fitimage_assemble fit-image.its fitImage ""
671 fi
672}
673
674addtask assemble_fitimage before do_install after do_compile
675
676do_assemble_fitimage_initramfs() {
677 if echo ${KERNEL_IMAGETYPES} | grep -wq "fitImage" && \
678 test -n "${INITRAMFS_IMAGE}" ; then
679 cd ${B}
680 if [ "${INITRAMFS_IMAGE_BUNDLE}" = "1" ]; then
681 fitimage_assemble fit-image-${INITRAMFS_IMAGE}.its fitImage ""
682 else
683 fitimage_assemble fit-image-${INITRAMFS_IMAGE}.its fitImage-${INITRAMFS_IMAGE} 1
684 fi
685 fi
686}
687
688addtask assemble_fitimage_initramfs before do_deploy after do_bundle_initramfs
689
690do_kernel_generate_rsa_keys() {
691 if [ "${UBOOT_SIGN_ENABLE}" = "0" ] && [ "${FIT_GENERATE_KEYS}" = "1" ]; then
692 bbwarn "FIT_GENERATE_KEYS is set to 1 even though UBOOT_SIGN_ENABLE is set to 0. The keys will not be generated as they won't be used."
693 fi
694
695 if [ "${UBOOT_SIGN_ENABLE}" = "1" ] && [ "${FIT_GENERATE_KEYS}" = "1" ]; then
696
697 # Generate keys to sign configuration nodes, only if they don't already exist
698 if [ ! -f "${UBOOT_SIGN_KEYDIR}/${UBOOT_SIGN_KEYNAME}".key ] || \
699 [ ! -f "${UBOOT_SIGN_KEYDIR}/${UBOOT_SIGN_KEYNAME}".crt ]; then
700
701 # make directory if it does not already exist
702 mkdir -p "${UBOOT_SIGN_KEYDIR}"
703
704 bbnote "Generating RSA private key for signing fitImage"
705 openssl genrsa ${FIT_KEY_GENRSA_ARGS} -out \
706 "${UBOOT_SIGN_KEYDIR}/${UBOOT_SIGN_KEYNAME}".key \
707 "${FIT_SIGN_NUMBITS}"
708
709 bbnote "Generating certificate for signing fitImage"
710 openssl req ${FIT_KEY_REQ_ARGS} "${FIT_KEY_SIGN_PKCS}" \
711 -key "${UBOOT_SIGN_KEYDIR}/${UBOOT_SIGN_KEYNAME}".key \
712 -out "${UBOOT_SIGN_KEYDIR}/${UBOOT_SIGN_KEYNAME}".crt
713 fi
714
715 # Generate keys to sign image nodes, only if they don't already exist
716 if [ ! -f "${UBOOT_SIGN_KEYDIR}/${UBOOT_SIGN_IMG_KEYNAME}".key ] || \
717 [ ! -f "${UBOOT_SIGN_KEYDIR}/${UBOOT_SIGN_IMG_KEYNAME}".crt ]; then
718
719 # make directory if it does not already exist
720 mkdir -p "${UBOOT_SIGN_KEYDIR}"
721
722 bbnote "Generating RSA private key for signing fitImage"
723 openssl genrsa ${FIT_KEY_GENRSA_ARGS} -out \
724 "${UBOOT_SIGN_KEYDIR}/${UBOOT_SIGN_IMG_KEYNAME}".key \
725 "${FIT_SIGN_NUMBITS}"
726
727 bbnote "Generating certificate for signing fitImage"
728 openssl req ${FIT_KEY_REQ_ARGS} "${FIT_KEY_SIGN_PKCS}" \
729 -key "${UBOOT_SIGN_KEYDIR}/${UBOOT_SIGN_IMG_KEYNAME}".key \
730 -out "${UBOOT_SIGN_KEYDIR}/${UBOOT_SIGN_IMG_KEYNAME}".crt
731 fi
732 fi
733}
734
735addtask kernel_generate_rsa_keys before do_assemble_fitimage after do_compile
736
737kernel_do_deploy[vardepsexclude] = "DATETIME"
738kernel_do_deploy:append() {
739 # Update deploy directory
740 if echo ${KERNEL_IMAGETYPES} | grep -wq "fitImage"; then
741
742 if [ "${INITRAMFS_IMAGE_BUNDLE}" != "1" ]; then
743 bbnote "Copying fit-image.its source file..."
744 install -m 0644 ${B}/fit-image.its "$deployDir/fitImage-its-${KERNEL_FIT_NAME}.its"
745 if [ -n "${KERNEL_FIT_LINK_NAME}" ] ; then
746 ln -snf fitImage-its-${KERNEL_FIT_NAME}.its "$deployDir/fitImage-its-${KERNEL_FIT_LINK_NAME}"
747 fi
748
749 bbnote "Copying linux.bin file..."
750 install -m 0644 ${B}/linux.bin $deployDir/fitImage-linux.bin-${KERNEL_FIT_NAME}${KERNEL_FIT_BIN_EXT}
751 if [ -n "${KERNEL_FIT_LINK_NAME}" ] ; then
752 ln -snf fitImage-linux.bin-${KERNEL_FIT_NAME}${KERNEL_FIT_BIN_EXT} "$deployDir/fitImage-linux.bin-${KERNEL_FIT_LINK_NAME}"
753 fi
754 fi
755
756 if [ -n "${INITRAMFS_IMAGE}" ]; then
757 bbnote "Copying fit-image-${INITRAMFS_IMAGE}.its source file..."
758 install -m 0644 ${B}/fit-image-${INITRAMFS_IMAGE}.its "$deployDir/fitImage-its-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.its"
759 if [ -n "${KERNEL_FIT_LINK_NAME}" ] ; then
760 ln -snf fitImage-its-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.its "$deployDir/fitImage-its-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_LINK_NAME}"
761 fi
762
763 if [ "${INITRAMFS_IMAGE_BUNDLE}" != "1" ]; then
764 bbnote "Copying fitImage-${INITRAMFS_IMAGE} file..."
765 install -m 0644 ${B}/arch/${ARCH}/boot/fitImage-${INITRAMFS_IMAGE} "$deployDir/fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}${KERNEL_FIT_BIN_EXT}"
766 if [ -n "${KERNEL_FIT_LINK_NAME}" ] ; then
767 ln -snf fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}${KERNEL_FIT_BIN_EXT} "$deployDir/fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_LINK_NAME}"
768 fi
769 fi
770 fi
771 fi
772 if [ "${UBOOT_SIGN_ENABLE}" = "1" -o "${UBOOT_FITIMAGE_ENABLE}" = "1" ] && \
773 [ -n "${UBOOT_DTB_BINARY}" ] ; then
774 # UBOOT_DTB_IMAGE is a realfile, but we can't use
775 # ${UBOOT_DTB_IMAGE} since it contains ${PV} which is aimed
776 # for u-boot, but we are in kernel env now.
777 install -m 0644 ${B}/u-boot-${MACHINE}*.dtb "$deployDir/"
778 fi
779 if [ "${UBOOT_FITIMAGE_ENABLE}" = "1" -a -n "${UBOOT_BINARY}" -a -n "${SPL_DTB_BINARY}" ] ; then
780 # If we're also creating and/or signing the uboot fit, now we need to
781 # deploy it, it's its file, as well as u-boot-spl.dtb
782 install -m 0644 ${B}/u-boot-spl-${MACHINE}*.dtb "$deployDir/"
783 bbnote "Copying u-boot-fitImage file..."
784 install -m 0644 ${B}/u-boot-fitImage-* "$deployDir/"
785 bbnote "Copying u-boot-its file..."
786 install -m 0644 ${B}/u-boot-its-* "$deployDir/"
787 fi
788}
789
790# The function below performs the following in case of initramfs bundles:
791# - Removes do_assemble_fitimage. FIT generation is done through
792# do_assemble_fitimage_initramfs. do_assemble_fitimage is not needed
793# and should not be part of the tasks to be executed.
794# - Since do_kernel_generate_rsa_keys is inserted by default
795# between do_compile and do_assemble_fitimage, this is
796# not suitable in case of initramfs bundles. do_kernel_generate_rsa_keys
797# should be between do_bundle_initramfs and do_assemble_fitimage_initramfs.
798python () {
799 if d.getVar('INITRAMFS_IMAGE_BUNDLE') == "1":
800 bb.build.deltask('do_assemble_fitimage', d)
801 bb.build.deltask('kernel_generate_rsa_keys', d)
802 bb.build.addtask('kernel_generate_rsa_keys', 'do_assemble_fitimage_initramfs', 'do_bundle_initramfs', d)
803}
diff --git a/meta/classes/kernel-grub.bbclass b/meta/classes/kernel-grub.bbclass
deleted file mode 100644
index 2325e635e1..0000000000
--- a/meta/classes/kernel-grub.bbclass
+++ /dev/null
@@ -1,111 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7#
8# While installing a rpm to update kernel on a deployed target, it will update
9# the boot area and the boot menu with the kernel as the priority but allow
10# you to fall back to the original kernel as well.
11#
12# - In kernel-image's preinstall scriptlet, it backs up original kernel to avoid
13# probable confliction with the new one.
14#
15# - In kernel-image's postinstall scriptlet, it modifies grub's config file to
16# updates the new kernel as the boot priority.
17#
18
19python __anonymous () {
20 import re
21
22 preinst = '''
23 # Parsing confliction
24 [ -f "$D/boot/grub/menu.list" ] && grubcfg="$D/boot/grub/menu.list"
25 [ -f "$D/boot/grub/grub.cfg" ] && grubcfg="$D/boot/grub/grub.cfg"
26 if [ -n "$grubcfg" ]; then
27 # Dereference symlink to avoid confliction with new kernel name.
28 if grep -q "/KERNEL_IMAGETYPE \+root=" $grubcfg; then
29 if [ -L "$D/boot/KERNEL_IMAGETYPE" ]; then
30 kimage=`realpath $D/boot/KERNEL_IMAGETYPE 2>/dev/null`
31 if [ -f "$D$kimage" ]; then
32 sed -i "s:KERNEL_IMAGETYPE \+root=:${kimage##*/} root=:" $grubcfg
33 fi
34 fi
35 fi
36
37 # Rename old kernel if it conflicts with new kernel name.
38 if grep -q "/KERNEL_IMAGETYPE-${KERNEL_VERSION} \+root=" $grubcfg; then
39 if [ -f "$D/boot/KERNEL_IMAGETYPE-${KERNEL_VERSION}" ]; then
40 timestamp=`date +%s`
41 kimage="$D/boot/KERNEL_IMAGETYPE-${KERNEL_VERSION}-$timestamp-back"
42 sed -i "s:KERNEL_IMAGETYPE-${KERNEL_VERSION} \+root=:${kimage##*/} root=:" $grubcfg
43 mv "$D/boot/KERNEL_IMAGETYPE-${KERNEL_VERSION}" "$kimage"
44 fi
45 fi
46 fi
47'''
48
49 postinst = '''
50 get_new_grub_cfg() {
51 grubcfg="$1"
52 old_image="$2"
53 title="Update KERNEL_IMAGETYPE-${KERNEL_VERSION}-${PV}"
54 if [ "${grubcfg##*/}" = "grub.cfg" ]; then
55 rootfs=`grep " *linux \+[^ ]\+ \+root=" $grubcfg -m 1 | \
56 sed "s#${old_image}#${old_image%/*}/KERNEL_IMAGETYPE-${KERNEL_VERSION}#"`
57
58 echo "menuentry \"$title\" {"
59 echo " set root=(hd0,1)"
60 echo "$rootfs"
61 echo "}"
62 elif [ "${grubcfg##*/}" = "menu.list" ]; then
63 rootfs=`grep "kernel \+[^ ]\+ \+root=" $grubcfg -m 1 | \
64 sed "s#${old_image}#${old_image%/*}/KERNEL_IMAGETYPE-${KERNEL_VERSION}#"`
65
66 echo "default 0"
67 echo "timeout 30"
68 echo "title $title"
69 echo "root (hd0,0)"
70 echo "$rootfs"
71 fi
72 }
73
74 get_old_grub_cfg() {
75 grubcfg="$1"
76 if [ "${grubcfg##*/}" = "grub.cfg" ]; then
77 cat "$grubcfg"
78 elif [ "${grubcfg##*/}" = "menu.list" ]; then
79 sed -e '/^default/d' -e '/^timeout/d' "$grubcfg"
80 fi
81 }
82
83 if [ -f "$D/boot/grub/grub.cfg" ]; then
84 grubcfg="$D/boot/grub/grub.cfg"
85 old_image=`grep ' *linux \+[^ ]\+ \+root=' -m 1 "$grubcfg" | awk '{print $2}'`
86 elif [ -f "$D/boot/grub/menu.list" ]; then
87 grubcfg="$D/boot/grub/menu.list"
88 old_image=`grep '^kernel \+[^ ]\+ \+root=' -m 1 "$grubcfg" | awk '{print $2}'`
89 fi
90
91 # Don't update grubcfg at first install while old bzImage doesn't exist.
92 if [ -f "$D/boot/${old_image##*/}" ]; then
93 grubcfgtmp="$grubcfg.tmp"
94 get_new_grub_cfg "$grubcfg" "$old_image" > $grubcfgtmp
95 get_old_grub_cfg "$grubcfg" >> $grubcfgtmp
96 mv $grubcfgtmp $grubcfg
97 echo "Caution! Update kernel may affect kernel-module!"
98 fi
99'''
100
101 imagetypes = d.getVar('KERNEL_IMAGETYPES')
102 imagetypes = re.sub(r'\.gz$', '', imagetypes)
103
104 for type in imagetypes.split():
105 typelower = type.lower()
106 preinst_append = preinst.replace('KERNEL_IMAGETYPE', type)
107 postinst_prepend = postinst.replace('KERNEL_IMAGETYPE', type)
108 d.setVar('pkg_preinst:kernel-image-' + typelower + ':append', preinst_append)
109 d.setVar('pkg_postinst:kernel-image-' + typelower + ':prepend', postinst_prepend)
110}
111
diff --git a/meta/classes/kernel-module-split.bbclass b/meta/classes/kernel-module-split.bbclass
deleted file mode 100644
index 1b4c864a63..0000000000
--- a/meta/classes/kernel-module-split.bbclass
+++ /dev/null
@@ -1,197 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7pkg_postinst:modules () {
8if [ -z "$D" ]; then
9 depmod -a ${KERNEL_VERSION}
10else
11 # image.bbclass will call depmodwrapper after everything is installed,
12 # no need to do it here as well
13 :
14fi
15}
16
17pkg_postrm:modules () {
18if [ -z "$D" ]; then
19 depmod -a ${KERNEL_VERSION}
20else
21 depmodwrapper -a -b $D ${KERNEL_VERSION}
22fi
23}
24
25autoload_postinst_fragment() {
26if [ x"$D" = "x" ]; then
27 modprobe %s || true
28fi
29}
30
31PACKAGE_WRITE_DEPS += "kmod-native depmodwrapper-cross"
32
33do_install:append() {
34 install -d ${D}${sysconfdir}/modules-load.d/ ${D}${sysconfdir}/modprobe.d/
35}
36
37KERNEL_SPLIT_MODULES ?= "1"
38PACKAGESPLITFUNCS:prepend = "split_kernel_module_packages "
39
40KERNEL_MODULES_META_PACKAGE ?= "${@ d.getVar("KERNEL_PACKAGE_NAME") or "kernel" }-modules"
41
42KERNEL_MODULE_PACKAGE_PREFIX ?= ""
43KERNEL_MODULE_PACKAGE_SUFFIX ?= "-${KERNEL_VERSION}"
44KERNEL_MODULE_PROVIDE_VIRTUAL ?= "1"
45
46python split_kernel_module_packages () {
47 import re
48
49 modinfoexp = re.compile("([^=]+)=(.*)")
50
51 def extract_modinfo(file):
52 import tempfile, subprocess
53 tempfile.tempdir = d.getVar("WORKDIR")
54 compressed = re.match( r'.*\.(gz|xz|zst)$', file)
55 tf = tempfile.mkstemp()
56 tmpfile = tf[1]
57 if compressed:
58 tmpkofile = tmpfile + ".ko"
59 if compressed.group(1) == 'gz':
60 cmd = "gunzip -dc %s > %s" % (file, tmpkofile)
61 subprocess.check_call(cmd, shell=True)
62 elif compressed.group(1) == 'xz':
63 cmd = "xz -dc %s > %s" % (file, tmpkofile)
64 subprocess.check_call(cmd, shell=True)
65 elif compressed.group(1) == 'zst':
66 cmd = "zstd -dc %s > %s" % (file, tmpkofile)
67 subprocess.check_call(cmd, shell=True)
68 else:
69 msg = "Cannot decompress '%s'" % file
70 raise msg
71 cmd = "%sobjcopy -j .modinfo -O binary %s %s" % (d.getVar("HOST_PREFIX") or "", tmpkofile, tmpfile)
72 else:
73 cmd = "%sobjcopy -j .modinfo -O binary %s %s" % (d.getVar("HOST_PREFIX") or "", file, tmpfile)
74 subprocess.check_call(cmd, shell=True)
75 # errors='replace': Some old kernel versions contain invalid utf-8 characters in mod descriptions (like 0xf6, 'ö')
76 f = open(tmpfile, errors='replace')
77 l = f.read().split("\000")
78 f.close()
79 os.close(tf[0])
80 os.unlink(tmpfile)
81 if compressed:
82 os.unlink(tmpkofile)
83 vals = {}
84 for i in l:
85 m = modinfoexp.match(i)
86 if not m:
87 continue
88 vals[m.group(1)] = m.group(2)
89 return vals
90
91 def frob_metadata(file, pkg, pattern, format, basename):
92 vals = extract_modinfo(file)
93
94 dvar = d.getVar('PKGD')
95
96 # If autoloading is requested, output /etc/modules-load.d/<name>.conf and append
97 # appropriate modprobe commands to the postinst
98 autoloadlist = (d.getVar("KERNEL_MODULE_AUTOLOAD") or "").split()
99 autoload = d.getVar('module_autoload_%s' % basename)
100 if autoload and autoload == basename:
101 bb.warn("module_autoload_%s was replaced by KERNEL_MODULE_AUTOLOAD for cases where basename == module name, please drop it" % basename)
102 if autoload and basename not in autoloadlist:
103 bb.warn("module_autoload_%s is defined but '%s' isn't included in KERNEL_MODULE_AUTOLOAD, please add it there" % (basename, basename))
104 if basename in autoloadlist:
105 name = '%s/etc/modules-load.d/%s.conf' % (dvar, basename)
106 f = open(name, 'w')
107 if autoload:
108 for m in autoload.split():
109 f.write('%s\n' % m)
110 else:
111 f.write('%s\n' % basename)
112 f.close()
113 postinst = d.getVar('pkg_postinst:%s' % pkg)
114 if not postinst:
115 bb.fatal("pkg_postinst:%s not defined" % pkg)
116 postinst += d.getVar('autoload_postinst_fragment') % (autoload or basename)
117 d.setVar('pkg_postinst:%s' % pkg, postinst)
118
119 # Write out any modconf fragment
120 modconflist = (d.getVar("KERNEL_MODULE_PROBECONF") or "").split()
121 modconf = d.getVar('module_conf_%s' % basename)
122 if modconf and basename in modconflist:
123 name = '%s/etc/modprobe.d/%s.conf' % (dvar, basename)
124 f = open(name, 'w')
125 f.write("%s\n" % modconf)
126 f.close()
127 elif modconf:
128 bb.error("Please ensure module %s is listed in KERNEL_MODULE_PROBECONF since module_conf_%s is set" % (basename, basename))
129
130 files = d.getVar('FILES:%s' % pkg)
131 files = "%s /etc/modules-load.d/%s.conf /etc/modprobe.d/%s.conf" % (files, basename, basename)
132 d.setVar('FILES:%s' % pkg, files)
133
134 conffiles = d.getVar('CONFFILES:%s' % pkg)
135 conffiles = "%s /etc/modules-load.d/%s.conf /etc/modprobe.d/%s.conf" % (conffiles, basename, basename)
136 d.setVar('CONFFILES:%s' % pkg, conffiles)
137
138 if "description" in vals:
139 old_desc = d.getVar('DESCRIPTION:' + pkg) or ""
140 d.setVar('DESCRIPTION:' + pkg, old_desc + "; " + vals["description"])
141
142 rdepends = bb.utils.explode_dep_versions2(d.getVar('RDEPENDS:' + pkg) or "")
143 modinfo_deps = []
144 if "depends" in vals and vals["depends"] != "":
145 for dep in vals["depends"].split(","):
146 on = legitimize_package_name(dep)
147 dependency_pkg = format % on
148 modinfo_deps.append(dependency_pkg)
149 for dep in modinfo_deps:
150 if not dep in rdepends:
151 rdepends[dep] = []
152 d.setVar('RDEPENDS:' + pkg, bb.utils.join_deps(rdepends, commasep=False))
153
154 # Avoid automatic -dev recommendations for modules ending with -dev.
155 d.setVarFlag('RRECOMMENDS:' + pkg, 'nodeprrecs', 1)
156
157 # Provide virtual package without postfix
158 providevirt = d.getVar('KERNEL_MODULE_PROVIDE_VIRTUAL')
159 if providevirt == "1":
160 postfix = format.split('%s')[1]
161 d.setVar('RPROVIDES:' + pkg, pkg.replace(postfix, ''))
162
163 kernel_package_name = d.getVar("KERNEL_PACKAGE_NAME") or "kernel"
164 kernel_version = d.getVar("KERNEL_VERSION")
165
166 metapkg = d.getVar('KERNEL_MODULES_META_PACKAGE')
167 splitmods = d.getVar('KERNEL_SPLIT_MODULES')
168 postinst = d.getVar('pkg_postinst:modules')
169 postrm = d.getVar('pkg_postrm:modules')
170
171 if splitmods != '1':
172 etcdir = d.getVar('sysconfdir')
173 d.appendVar('FILES:' + metapkg, '%s/modules-load.d/ %s/modprobe.d/ %s/modules/' % (etcdir, etcdir, d.getVar("nonarch_base_libdir")))
174 d.appendVar('pkg_postinst:%s' % metapkg, postinst)
175 d.prependVar('pkg_postrm:%s' % metapkg, postrm);
176 return
177
178 module_regex = r'^(.*)\.k?o(?:\.(gz|xz|zst))?$'
179
180 module_pattern_prefix = d.getVar('KERNEL_MODULE_PACKAGE_PREFIX')
181 module_pattern_suffix = d.getVar('KERNEL_MODULE_PACKAGE_SUFFIX')
182 module_pattern = module_pattern_prefix + kernel_package_name + '-module-%s' + module_pattern_suffix
183
184 modules = do_split_packages(d, root='${nonarch_base_libdir}/modules', file_regex=module_regex, output_pattern=module_pattern, description='%s kernel module', postinst=postinst, postrm=postrm, recursive=True, hook=frob_metadata, extra_depends='%s-%s' % (kernel_package_name, kernel_version))
185 if modules:
186 d.appendVar('RDEPENDS:' + metapkg, ' '+' '.join(modules))
187
188 # If modules-load.d and modprobe.d are empty at this point, remove them to
189 # avoid warnings. removedirs only raises an OSError if an empty
190 # directory cannot be removed.
191 dvar = d.getVar('PKGD')
192 for dir in ["%s/etc/modprobe.d" % (dvar), "%s/etc/modules-load.d" % (dvar), "%s/etc" % (dvar)]:
193 if len(os.listdir(dir)) == 0:
194 os.rmdir(dir)
195}
196
197do_package[vardeps] += '${@" ".join(map(lambda s: "module_conf_" + s, (d.getVar("KERNEL_MODULE_PROBECONF") or "").split()))}'
diff --git a/meta/classes/kernel-uboot.bbclass b/meta/classes/kernel-uboot.bbclass
deleted file mode 100644
index 4aab02671e..0000000000
--- a/meta/classes/kernel-uboot.bbclass
+++ /dev/null
@@ -1,49 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# fitImage kernel compression algorithm
8FIT_KERNEL_COMP_ALG ?= "gzip"
9FIT_KERNEL_COMP_ALG_EXTENSION ?= ".gz"
10
11# Kernel image type passed to mkimage (i.e. kernel kernel_noload...)
12UBOOT_MKIMAGE_KERNEL_TYPE ?= "kernel"
13
14uboot_prep_kimage() {
15 if [ -e arch/${ARCH}/boot/compressed/vmlinux ]; then
16 vmlinux_path="arch/${ARCH}/boot/compressed/vmlinux"
17 linux_suffix=""
18 linux_comp="none"
19 elif [ -e arch/${ARCH}/boot/vmlinuz.bin ]; then
20 rm -f linux.bin
21 cp -l arch/${ARCH}/boot/vmlinuz.bin linux.bin
22 vmlinux_path=""
23 linux_suffix=""
24 linux_comp="none"
25 else
26 vmlinux_path="vmlinux"
27 # Use vmlinux.initramfs for linux.bin when INITRAMFS_IMAGE_BUNDLE set
28 # As per the implementation in kernel.bbclass.
29 # See do_bundle_initramfs function
30 if [ "${INITRAMFS_IMAGE_BUNDLE}" = "1" ] && [ -e vmlinux.initramfs ]; then
31 vmlinux_path="vmlinux.initramfs"
32 fi
33 linux_suffix="${FIT_KERNEL_COMP_ALG_EXTENSION}"
34 linux_comp="${FIT_KERNEL_COMP_ALG}"
35 fi
36
37 [ -n "${vmlinux_path}" ] && ${OBJCOPY} -O binary -R .note -R .comment -S "${vmlinux_path}" linux.bin
38
39 if [ "${linux_comp}" != "none" ] ; then
40 if [ "${linux_comp}" = "gzip" ] ; then
41 gzip -9 linux.bin
42 elif [ "${linux_comp}" = "lzo" ] ; then
43 lzop -9 linux.bin
44 fi
45 mv -f "linux.bin${linux_suffix}" linux.bin
46 fi
47
48 echo "${linux_comp}"
49}
diff --git a/meta/classes/kernel-uimage.bbclass b/meta/classes/kernel-uimage.bbclass
deleted file mode 100644
index 1a599e656c..0000000000
--- a/meta/classes/kernel-uimage.bbclass
+++ /dev/null
@@ -1,41 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7inherit kernel-uboot
8
9python __anonymous () {
10 if "uImage" in d.getVar('KERNEL_IMAGETYPES'):
11 depends = d.getVar("DEPENDS")
12 depends = "%s u-boot-tools-native" % depends
13 d.setVar("DEPENDS", depends)
14
15 # Override KERNEL_IMAGETYPE_FOR_MAKE variable, which is internal
16 # to kernel.bbclass . We override the variable here, since we need
17 # to build uImage using the kernel build system if and only if
18 # KEEPUIMAGE == yes. Otherwise, we pack compressed vmlinux into
19 # the uImage .
20 if d.getVar("KEEPUIMAGE") != 'yes':
21 typeformake = d.getVar("KERNEL_IMAGETYPE_FOR_MAKE") or ""
22 if "uImage" in typeformake.split():
23 d.setVar('KERNEL_IMAGETYPE_FOR_MAKE', typeformake.replace('uImage', 'vmlinux'))
24
25 # Enable building of uImage with mkimage
26 bb.build.addtask('do_uboot_mkimage', 'do_install', 'do_kernel_link_images', d)
27}
28
29do_uboot_mkimage[dirs] += "${B}"
30do_uboot_mkimage() {
31 uboot_prep_kimage
32
33 ENTRYPOINT=${UBOOT_ENTRYPOINT}
34 if [ -n "${UBOOT_ENTRYSYMBOL}" ]; then
35 ENTRYPOINT=`${HOST_PREFIX}nm ${B}/vmlinux | \
36 awk '$3=="${UBOOT_ENTRYSYMBOL}" {print "0x"$1;exit}'`
37 fi
38
39 uboot-mkimage -A ${UBOOT_ARCH} -O linux -T ${UBOOT_MKIMAGE_KERNEL_TYPE} -C "${linux_comp}" -a ${UBOOT_LOADADDRESS} -e $ENTRYPOINT -n "${DISTRO_NAME}/${PV}/${MACHINE}" -d linux.bin ${B}/arch/${ARCH}/boot/uImage
40 rm -f linux.bin
41}
diff --git a/meta/classes/kernel-yocto.bbclass b/meta/classes/kernel-yocto.bbclass
deleted file mode 100644
index 8eda0dcaf3..0000000000
--- a/meta/classes/kernel-yocto.bbclass
+++ /dev/null
@@ -1,732 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# remove tasks that modify the source tree in case externalsrc is inherited
8SRCTREECOVEREDTASKS += "do_validate_branches do_kernel_configcheck do_kernel_checkout do_fetch do_unpack do_patch"
9PATCH_GIT_USER_EMAIL ?= "kernel-yocto@oe"
10PATCH_GIT_USER_NAME ?= "OpenEmbedded"
11
12# The distro or local.conf should set this, but if nobody cares...
13LINUX_KERNEL_TYPE ??= "standard"
14
15# KMETA ?= ""
16KBRANCH ?= "master"
17KMACHINE ?= "${MACHINE}"
18SRCREV_FORMAT ?= "meta_machine"
19
20# LEVELS:
21# 0: no reporting
22# 1: report options that are specified, but not in the final config
23# 2: report options that are not hardware related, but set by a BSP
24KCONF_AUDIT_LEVEL ?= "1"
25KCONF_BSP_AUDIT_LEVEL ?= "0"
26KMETA_AUDIT ?= "yes"
27KMETA_AUDIT_WERROR ?= ""
28
29# returns local (absolute) path names for all valid patches in the
30# src_uri
31def find_patches(d,subdir):
32 patches = src_patches(d)
33 patch_list=[]
34 for p in patches:
35 _, _, local, _, _, parm = bb.fetch.decodeurl(p)
36 # if patchdir has been passed, we won't be able to apply it so skip
37 # the patch for now, and special processing happens later
38 patchdir = ''
39 if "patchdir" in parm:
40 patchdir = parm["patchdir"]
41 if subdir:
42 if subdir == patchdir:
43 patch_list.append(local)
44 else:
45 # skip the patch if a patchdir was supplied, it won't be handled
46 # properly
47 if not patchdir:
48 patch_list.append(local)
49
50 return patch_list
51
52# returns all the elements from the src uri that are .scc files
53def find_sccs(d):
54 sources=src_patches(d, True)
55 sources_list=[]
56 for s in sources:
57 base, ext = os.path.splitext(os.path.basename(s))
58 if ext and ext in [".scc", ".cfg"]:
59 sources_list.append(s)
60 elif base and 'defconfig' in base:
61 sources_list.append(s)
62
63 return sources_list
64
65# check the SRC_URI for "kmeta" type'd git repositories. Return the name of
66# the repository as it will be found in WORKDIR
67def find_kernel_feature_dirs(d):
68 feature_dirs=[]
69 fetch = bb.fetch2.Fetch([], d)
70 for url in fetch.urls:
71 urldata = fetch.ud[url]
72 parm = urldata.parm
73 type=""
74 if "type" in parm:
75 type = parm["type"]
76 if "destsuffix" in parm:
77 destdir = parm["destsuffix"]
78 if type == "kmeta":
79 feature_dirs.append(destdir)
80
81 return feature_dirs
82
83# find the master/machine source branch. In the same way that the fetcher proceses
84# git repositories in the SRC_URI we take the first repo found, first branch.
85def get_machine_branch(d, default):
86 fetch = bb.fetch2.Fetch([], d)
87 for url in fetch.urls:
88 urldata = fetch.ud[url]
89 parm = urldata.parm
90 if "branch" in parm:
91 branches = urldata.parm.get("branch").split(',')
92 btype = urldata.parm.get("type")
93 if btype != "kmeta":
94 return branches[0]
95
96 return default
97
98# returns a list of all directories that are on FILESEXTRAPATHS (and
99# hence available to the build) that contain .scc or .cfg files
100def get_dirs_with_fragments(d):
101 extrapaths = []
102 extrafiles = []
103 extrapathsvalue = (d.getVar("FILESEXTRAPATHS") or "")
104 # Remove default flag which was used for checking
105 extrapathsvalue = extrapathsvalue.replace("__default:", "")
106 extrapaths = extrapathsvalue.split(":")
107 for path in extrapaths:
108 if path + ":True" not in extrafiles:
109 extrafiles.append(path + ":" + str(os.path.exists(path)))
110
111 return " ".join(extrafiles)
112
113do_kernel_metadata() {
114 set +e
115
116 if [ -n "$1" ]; then
117 mode="$1"
118 else
119 mode="patch"
120 fi
121
122 cd ${S}
123 export KMETA=${KMETA}
124
125 bbnote "do_kernel_metadata: for summary/debug, set KCONF_AUDIT_LEVEL > 0"
126
127 # if kernel tools are available in-tree, they are preferred
128 # and are placed on the path before any external tools. Unless
129 # the external tools flag is set, in that case we do nothing.
130 if [ -f "${S}/scripts/util/configme" ]; then
131 if [ -z "${EXTERNAL_KERNEL_TOOLS}" ]; then
132 PATH=${S}/scripts/util:${PATH}
133 fi
134 fi
135
136 # In a similar manner to the kernel itself:
137 #
138 # defconfig: $(obj)/conf
139 # ifeq ($(KBUILD_DEFCONFIG),)
140 # $< --defconfig $(Kconfig)
141 # else
142 # @echo "*** Default configuration is based on '$(KBUILD_DEFCONFIG)'"
143 # $(Q)$< --defconfig=arch/$(SRCARCH)/configs/$(KBUILD_DEFCONFIG) $(Kconfig)
144 # endif
145 #
146 # If a defconfig is specified via the KBUILD_DEFCONFIG variable, we copy it
147 # from the source tree, into a common location and normalized "defconfig" name,
148 # where the rest of the process will include and incoroporate it into the build
149 #
150 # If the fetcher has already placed a defconfig in WORKDIR (from the SRC_URI),
151 # we don't overwrite it, but instead warn the user that SRC_URI defconfigs take
152 # precendence.
153 #
154 if [ -n "${KBUILD_DEFCONFIG}" ]; then
155 if [ -f "${S}/arch/${ARCH}/configs/${KBUILD_DEFCONFIG}" ]; then
156 if [ -f "${WORKDIR}/defconfig" ]; then
157 # If the two defconfig's are different, warn that we overwrote the
158 # one already placed in WORKDIR
159 cmp "${WORKDIR}/defconfig" "${S}/arch/${ARCH}/configs/${KBUILD_DEFCONFIG}"
160 if [ $? -ne 0 ]; then
161 bbdebug 1 "detected SRC_URI or unpatched defconfig in WORKDIR. ${KBUILD_DEFCONFIG} copied over it"
162 fi
163 cp -f ${S}/arch/${ARCH}/configs/${KBUILD_DEFCONFIG} ${WORKDIR}/defconfig
164 else
165 cp -f ${S}/arch/${ARCH}/configs/${KBUILD_DEFCONFIG} ${WORKDIR}/defconfig
166 fi
167 in_tree_defconfig="${WORKDIR}/defconfig"
168 else
169 bbfatal "A KBUILD_DEFCONFIG '${KBUILD_DEFCONFIG}' was specified, but not present in the source tree (${S}/arch/${ARCH}/configs/)"
170 fi
171 fi
172
173 if [ "$mode" = "patch" ]; then
174 # was anyone trying to patch the kernel meta data ?, we need to do
175 # this here, since the scc commands migrate the .cfg fragments to the
176 # kernel source tree, where they'll be used later.
177 check_git_config
178 patches="${@" ".join(find_patches(d,'kernel-meta'))}"
179 for p in $patches; do
180 (
181 cd ${WORKDIR}/kernel-meta
182 git am -s $p
183 )
184 done
185 fi
186
187 sccs_from_src_uri="${@" ".join(find_sccs(d))}"
188 patches="${@" ".join(find_patches(d,''))}"
189 feat_dirs="${@" ".join(find_kernel_feature_dirs(d))}"
190
191 # a quick check to make sure we don't have duplicate defconfigs If
192 # there's a defconfig in the SRC_URI, did we also have one from the
193 # KBUILD_DEFCONFIG processing above ?
194 src_uri_defconfig=$(echo $sccs_from_src_uri | awk '(match($0, "defconfig") != 0) { print $0 }' RS=' ')
195 # drop and defconfig's from the src_uri variable, we captured it just above here if it existed
196 sccs_from_src_uri=$(echo $sccs_from_src_uri | awk '(match($0, "defconfig") == 0) { print $0 }' RS=' ')
197
198 if [ -n "$in_tree_defconfig" ]; then
199 sccs_defconfig=$in_tree_defconfig
200 if [ -n "$src_uri_defconfig" ]; then
201 bbwarn "[NOTE]: defconfig was supplied both via KBUILD_DEFCONFIG and SRC_URI. Dropping SRC_URI entry $src_uri_defconfig"
202 fi
203 else
204 # if we didn't have an in-tree one, make our defconfig the one
205 # from the src_uri. Note: there may not have been one from the
206 # src_uri, so this can be an empty variable.
207 sccs_defconfig=$src_uri_defconfig
208 fi
209 sccs="$sccs_from_src_uri"
210
211 # check for feature directories/repos/branches that were part of the
212 # SRC_URI. If they were supplied, we convert them into include directives
213 # for the update part of the process
214 for f in ${feat_dirs}; do
215 if [ -d "${WORKDIR}/$f/meta" ]; then
216 includes="$includes -I${WORKDIR}/$f/kernel-meta"
217 elif [ -d "${WORKDIR}/../oe-local-files/$f" ]; then
218 includes="$includes -I${WORKDIR}/../oe-local-files/$f"
219 elif [ -d "${WORKDIR}/$f" ]; then
220 includes="$includes -I${WORKDIR}/$f"
221 fi
222 done
223 for s in ${sccs} ${patches}; do
224 sdir=$(dirname $s)
225 includes="$includes -I${sdir}"
226 # if a SRC_URI passed patch or .scc has a subdir of "kernel-meta",
227 # then we add it to the search path
228 if [ -d "${sdir}/kernel-meta" ]; then
229 includes="$includes -I${sdir}/kernel-meta"
230 fi
231 done
232
233 # expand kernel features into their full path equivalents
234 bsp_definition=$(spp ${includes} --find -DKMACHINE=${KMACHINE} -DKTYPE=${LINUX_KERNEL_TYPE})
235 if [ -z "$bsp_definition" ]; then
236 if [ -z "$sccs_defconfig" ]; then
237 bbfatal_log "Could not locate BSP definition for ${KMACHINE}/${LINUX_KERNEL_TYPE} and no defconfig was provided"
238 fi
239 else
240 # if the bsp definition has "define KMETA_EXTERNAL_BSP t",
241 # then we need to set a flag that will instruct the next
242 # steps to use the BSP as both configuration and patches.
243 grep -q KMETA_EXTERNAL_BSP $bsp_definition
244 if [ $? -eq 0 ]; then
245 KMETA_EXTERNAL_BSPS="t"
246 fi
247 fi
248 meta_dir=$(kgit --meta)
249
250 KERNEL_FEATURES_FINAL=""
251 if [ -n "${KERNEL_FEATURES}" ]; then
252 for feature in ${KERNEL_FEATURES}; do
253 feature_found=f
254 for d in $includes; do
255 path_to_check=$(echo $d | sed 's/^-I//')
256 if [ "$feature_found" = "f" ] && [ -e "$path_to_check/$feature" ]; then
257 feature_found=t
258 fi
259 done
260 if [ "$feature_found" = "f" ]; then
261 if [ -n "${KERNEL_DANGLING_FEATURES_WARN_ONLY}" ]; then
262 bbwarn "Feature '$feature' not found, but KERNEL_DANGLING_FEATURES_WARN_ONLY is set"
263 bbwarn "This may cause runtime issues, dropping feature and allowing configuration to continue"
264 else
265 bberror "Feature '$feature' not found, this will cause configuration failures."
266 bberror "Check the SRC_URI for meta-data repositories or directories that may be missing"
267 bbfatal_log "Set KERNEL_DANGLING_FEATURES_WARN_ONLY to ignore this issue"
268 fi
269 else
270 KERNEL_FEATURES_FINAL="$KERNEL_FEATURES_FINAL $feature"
271 fi
272 done
273 fi
274
275 if [ "$mode" = "config" ]; then
276 # run1: pull all the configuration fragments, no matter where they come from
277 elements="`echo -n ${bsp_definition} $sccs_defconfig ${sccs} ${patches} $KERNEL_FEATURES_FINAL`"
278 if [ -n "${elements}" ]; then
279 echo "${bsp_definition}" > ${S}/${meta_dir}/bsp_definition
280 scc --force -o ${S}/${meta_dir}:cfg,merge,meta ${includes} $sccs_defconfig $bsp_definition $sccs $patches $KERNEL_FEATURES_FINAL
281 if [ $? -ne 0 ]; then
282 bbfatal_log "Could not generate configuration queue for ${KMACHINE}."
283 fi
284 fi
285 fi
286
287 # if KMETA_EXTERNAL_BSPS has been set, or it has been detected from
288 # the bsp definition, then we inject the bsp_definition into the
289 # patch phase below. we'll piggy back on the sccs variable.
290 if [ -n "${KMETA_EXTERNAL_BSPS}" ]; then
291 sccs="${bsp_definition} ${sccs}"
292 fi
293
294 if [ "$mode" = "patch" ]; then
295 # run2: only generate patches for elements that have been passed on the SRC_URI
296 elements="`echo -n ${sccs} ${patches} $KERNEL_FEATURES_FINAL`"
297 if [ -n "${elements}" ]; then
298 scc --force -o ${S}/${meta_dir}:patch --cmds patch ${includes} ${sccs} ${patches} $KERNEL_FEATURES_FINAL
299 if [ $? -ne 0 ]; then
300 bbfatal_log "Could not generate configuration queue for ${KMACHINE}."
301 fi
302 fi
303 fi
304
305 if [ ${KCONF_AUDIT_LEVEL} -gt 0 ]; then
306 bbnote "kernel meta data summary for ${KMACHINE} (${LINUX_KERNEL_TYPE}):"
307 bbnote "======================================================================"
308 if [ -n "${KMETA_EXTERNAL_BSPS}" ]; then
309 bbnote "Non kernel-cache (external) bsp"
310 fi
311 bbnote "BSP entry point / definition: $bsp_definition"
312 if [ -n "$in_tree_defconfig" ]; then
313 bbnote "KBUILD_DEFCONFIG: ${KBUILD_DEFCONFIG}"
314 fi
315 bbnote "Fragments from SRC_URI: $sccs_from_src_uri"
316 bbnote "KERNEL_FEATURES: $KERNEL_FEATURES_FINAL"
317 bbnote "Final scc/cfg list: $sccs_defconfig $bsp_definition $sccs $KERNEL_FEATURES_FINAL"
318 fi
319
320 set -e
321}
322
323do_patch() {
324 set +e
325 cd ${S}
326
327 check_git_config
328 meta_dir=$(kgit --meta)
329 (cd ${meta_dir}; ln -sf patch.queue series)
330 if [ -f "${meta_dir}/series" ]; then
331 kgit_extra_args=""
332 if [ "${KERNEL_DEBUG_TIMESTAMPS}" != "1" ]; then
333 kgit_extra_args="--commit-sha author"
334 fi
335 kgit-s2q --gen -v $kgit_extra_args --patches .kernel-meta/
336 if [ $? -ne 0 ]; then
337 bberror "Could not apply patches for ${KMACHINE}."
338 bbfatal_log "Patch failures can be resolved in the linux source directory ${S})"
339 fi
340 fi
341
342 if [ -f "${meta_dir}/merge.queue" ]; then
343 # we need to merge all these branches
344 for b in $(cat ${meta_dir}/merge.queue); do
345 git show-ref --verify --quiet refs/heads/${b}
346 if [ $? -eq 0 ]; then
347 bbnote "Merging branch ${b}"
348 git merge -q --no-ff -m "Merge branch ${b}" ${b}
349 else
350 bbfatal "branch ${b} does not exist, cannot merge"
351 fi
352 done
353 fi
354
355 set -e
356}
357
358do_kernel_checkout() {
359 set +e
360
361 source_dir=`echo ${S} | sed 's%/$%%'`
362 source_workdir="${WORKDIR}/git"
363 if [ -d "${WORKDIR}/git/" ]; then
364 # case: git repository
365 # if S is WORKDIR/git, then we shouldn't be moving or deleting the tree.
366 if [ "${source_dir}" != "${source_workdir}" ]; then
367 if [ -d "${source_workdir}/.git" ]; then
368 # regular git repository with .git
369 rm -rf ${S}
370 mv ${WORKDIR}/git ${S}
371 else
372 # create source for bare cloned git repository
373 git clone ${WORKDIR}/git ${S}
374 rm -rf ${WORKDIR}/git
375 fi
376 fi
377 cd ${S}
378
379 # convert any remote branches to local tracking ones
380 for i in `git branch -a --no-color | grep remotes | grep -v HEAD`; do
381 b=`echo $i | cut -d' ' -f2 | sed 's%remotes/origin/%%'`;
382 git show-ref --quiet --verify -- "refs/heads/$b"
383 if [ $? -ne 0 ]; then
384 git branch $b $i > /dev/null
385 fi
386 done
387
388 # Create a working tree copy of the kernel by checking out a branch
389 machine_branch="${@ get_machine_branch(d, "${KBRANCH}" )}"
390
391 # checkout and clobber any unimportant files
392 git checkout -f ${machine_branch}
393 else
394 # case: we have no git repository at all.
395 # To support low bandwidth options for building the kernel, we'll just
396 # convert the tree to a git repo and let the rest of the process work unchanged
397
398 # if ${S} hasn't been set to the proper subdirectory a default of "linux" is
399 # used, but we can't initialize that empty directory. So check it and throw a
400 # clear error
401
402 cd ${S}
403 if [ ! -f "Makefile" ]; then
404 bberror "S is not set to the linux source directory. Check "
405 bbfatal "the recipe and set S to the proper extracted subdirectory"
406 fi
407 rm -f .gitignore
408 git init
409 check_git_config
410 git add .
411 git commit -q -m "baseline commit: creating repo for ${PN}-${PV}"
412 git clean -d -f
413 fi
414
415 set -e
416}
417do_kernel_checkout[dirs] = "${S} ${WORKDIR}"
418
419addtask kernel_checkout before do_kernel_metadata after do_symlink_kernsrc
420addtask kernel_metadata after do_validate_branches do_unpack before do_patch
421do_kernel_metadata[depends] = "kern-tools-native:do_populate_sysroot"
422do_kernel_metadata[file-checksums] = " ${@get_dirs_with_fragments(d)}"
423do_validate_branches[depends] = "kern-tools-native:do_populate_sysroot"
424
425do_kernel_configme[depends] += "virtual/${TARGET_PREFIX}binutils:do_populate_sysroot"
426do_kernel_configme[depends] += "virtual/${TARGET_PREFIX}gcc:do_populate_sysroot"
427do_kernel_configme[depends] += "bc-native:do_populate_sysroot bison-native:do_populate_sysroot"
428do_kernel_configme[depends] += "kern-tools-native:do_populate_sysroot"
429do_kernel_configme[dirs] += "${S} ${B}"
430do_kernel_configme() {
431 do_kernel_metadata config
432
433 # translate the kconfig_mode into something that merge_config.sh
434 # understands
435 case ${KCONFIG_MODE} in
436 *allnoconfig)
437 config_flags="-n"
438 ;;
439 *alldefconfig)
440 config_flags=""
441 ;;
442 *)
443 if [ -f ${WORKDIR}/defconfig ]; then
444 config_flags="-n"
445 fi
446 ;;
447 esac
448
449 cd ${S}
450
451 meta_dir=$(kgit --meta)
452 configs="$(scc --configs -o ${meta_dir})"
453 if [ $? -ne 0 ]; then
454 bberror "${configs}"
455 bbfatal_log "Could not find configuration queue (${meta_dir}/config.queue)"
456 fi
457
458 CFLAGS="${CFLAGS} ${TOOLCHAIN_OPTIONS}" HOSTCC="${BUILD_CC} ${BUILD_CFLAGS} ${BUILD_LDFLAGS}" HOSTCPP="${BUILD_CPP}" CC="${KERNEL_CC}" LD="${KERNEL_LD}" ARCH=${ARCH} merge_config.sh -O ${B} ${config_flags} ${configs} > ${meta_dir}/cfg/merge_config_build.log 2>&1
459 if [ $? -ne 0 -o ! -f ${B}/.config ]; then
460 bberror "Could not generate a .config for ${KMACHINE}-${LINUX_KERNEL_TYPE}"
461 if [ ${KCONF_AUDIT_LEVEL} -gt 1 ]; then
462 bbfatal_log "`cat ${meta_dir}/cfg/merge_config_build.log`"
463 else
464 bbfatal_log "Details can be found at: ${S}/${meta_dir}/cfg/merge_config_build.log"
465 fi
466 fi
467
468 if [ ! -z "${LINUX_VERSION_EXTENSION}" ]; then
469 echo "# Global settings from linux recipe" >> ${B}/.config
470 echo "CONFIG_LOCALVERSION="\"${LINUX_VERSION_EXTENSION}\" >> ${B}/.config
471 fi
472}
473
474addtask kernel_configme before do_configure after do_patch
475addtask config_analysis
476
477do_config_analysis[depends] = "virtual/kernel:do_configure"
478do_config_analysis[depends] += "kern-tools-native:do_populate_sysroot"
479
480CONFIG_AUDIT_FILE ?= "${WORKDIR}/config-audit.txt"
481CONFIG_ANALYSIS_FILE ?= "${WORKDIR}/config-analysis.txt"
482
483python do_config_analysis() {
484 import re, string, sys, subprocess
485
486 s = d.getVar('S')
487
488 env = os.environ.copy()
489 env['PATH'] = "%s:%s%s" % (d.getVar('PATH'), s, "/scripts/util/")
490 env['LD'] = d.getVar('KERNEL_LD')
491 env['CC'] = d.getVar('KERNEL_CC')
492 env['ARCH'] = d.getVar('ARCH')
493 env['srctree'] = s
494
495 # read specific symbols from the kernel recipe or from local.conf
496 # i.e.: CONFIG_ANALYSIS:pn-linux-yocto-dev = 'NF_CONNTRACK LOCALVERSION'
497 config = d.getVar( 'CONFIG_ANALYSIS' )
498 if not config:
499 config = [ "" ]
500 else:
501 config = config.split()
502
503 for c in config:
504 for action in ["analysis","audit"]:
505 if action == "analysis":
506 try:
507 analysis = subprocess.check_output(['symbol_why.py', '--dotconfig', '{}'.format( d.getVar('B') + '/.config' ), '--blame', c], cwd=s, env=env ).decode('utf-8')
508 except subprocess.CalledProcessError as e:
509 bb.fatal( "config analysis failed: %s" % e.output.decode('utf-8'))
510
511 outfile = d.getVar( 'CONFIG_ANALYSIS_FILE' )
512
513 if action == "audit":
514 try:
515 analysis = subprocess.check_output(['symbol_why.py', '--dotconfig', '{}'.format( d.getVar('B') + '/.config' ), '--summary', '--extended', '--sanity', c], cwd=s, env=env ).decode('utf-8')
516 except subprocess.CalledProcessError as e:
517 bb.fatal( "config analysis failed: %s" % e.output.decode('utf-8'))
518
519 outfile = d.getVar( 'CONFIG_AUDIT_FILE' )
520
521 if c:
522 outdir = os.path.dirname( outfile )
523 outname = os.path.basename( outfile )
524 outfile = outdir + '/'+ c + '-' + outname
525
526 if config and os.path.isfile(outfile):
527 os.remove(outfile)
528
529 with open(outfile, 'w+') as f:
530 f.write( analysis )
531
532 bb.warn( "Configuration {} executed, see: {} for details".format(action,outfile ))
533 if c:
534 bb.warn( analysis )
535}
536
537python do_kernel_configcheck() {
538 import re, string, sys, subprocess
539
540 s = d.getVar('S')
541
542 # if KMETA isn't set globally by a recipe using this routine, use kgit to
543 # locate or create the meta directory. Otherwise, kconf_check is not
544 # passed a valid meta-series for processing
545 kmeta = d.getVar("KMETA")
546 if not kmeta or not os.path.exists('{}/{}'.format(s,kmeta)):
547 kmeta = subprocess.check_output(['kgit', '--meta'], cwd=d.getVar('S')).decode('utf-8').rstrip()
548
549 env = os.environ.copy()
550 env['PATH'] = "%s:%s%s" % (d.getVar('PATH'), s, "/scripts/util/")
551 env['LD'] = d.getVar('KERNEL_LD')
552 env['CC'] = d.getVar('KERNEL_CC')
553 env['ARCH'] = d.getVar('ARCH')
554 env['srctree'] = s
555
556 try:
557 configs = subprocess.check_output(['scc', '--configs', '-o', s + '/.kernel-meta'], env=env).decode('utf-8')
558 except subprocess.CalledProcessError as e:
559 bb.fatal( "Cannot gather config fragments for audit: %s" % e.output.decode("utf-8") )
560
561 config_check_visibility = int(d.getVar("KCONF_AUDIT_LEVEL") or 0)
562 bsp_check_visibility = int(d.getVar("KCONF_BSP_AUDIT_LEVEL") or 0)
563 kmeta_audit_werror = d.getVar("KMETA_AUDIT_WERROR") or ""
564 warnings_detected = False
565
566 # if config check visibility is "1", that's the lowest level of audit. So
567 # we add the --classify option to the run, since classification will
568 # streamline the output to only report options that could be boot issues,
569 # or are otherwise required for proper operation.
570 extra_params = ""
571 if config_check_visibility == 1:
572 extra_params = "--classify"
573
574 # category #1: mismatches
575 try:
576 analysis = subprocess.check_output(['symbol_why.py', '--dotconfig', '{}'.format( d.getVar('B') + '/.config' ), '--mismatches', extra_params], cwd=s, env=env ).decode('utf-8')
577 except subprocess.CalledProcessError as e:
578 bb.fatal( "config analysis failed: %s" % e.output.decode('utf-8'))
579
580 if analysis:
581 outfile = "{}/{}/cfg/mismatch.txt".format( s, kmeta )
582 if os.path.isfile(outfile):
583 os.remove(outfile)
584 with open(outfile, 'w+') as f:
585 f.write( analysis )
586
587 if config_check_visibility and os.stat(outfile).st_size > 0:
588 with open (outfile, "r") as myfile:
589 results = myfile.read()
590 bb.warn( "[kernel config]: specified values did not make it into the kernel's final configuration:\n\n%s" % results)
591 warnings_detected = True
592
593 # category #2: invalid fragment elements
594 extra_params = ""
595 if bsp_check_visibility > 1:
596 extra_params = "--strict"
597 try:
598 analysis = subprocess.check_output(['symbol_why.py', '--dotconfig', '{}'.format( d.getVar('B') + '/.config' ), '--invalid', extra_params], cwd=s, env=env ).decode('utf-8')
599 except subprocess.CalledProcessError as e:
600 bb.fatal( "config analysis failed: %s" % e.output.decode('utf-8'))
601
602 if analysis:
603 outfile = "{}/{}/cfg/invalid.txt".format(s,kmeta)
604 if os.path.isfile(outfile):
605 os.remove(outfile)
606 with open(outfile, 'w+') as f:
607 f.write( analysis )
608
609 if bsp_check_visibility and os.stat(outfile).st_size > 0:
610 with open (outfile, "r") as myfile:
611 results = myfile.read()
612 bb.warn( "[kernel config]: This BSP contains fragments with warnings:\n\n%s" % results)
613 warnings_detected = True
614
615 # category #3: redefined options (this is pretty verbose and is debug only)
616 try:
617 analysis = subprocess.check_output(['symbol_why.py', '--dotconfig', '{}'.format( d.getVar('B') + '/.config' ), '--sanity'], cwd=s, env=env ).decode('utf-8')
618 except subprocess.CalledProcessError as e:
619 bb.fatal( "config analysis failed: %s" % e.output.decode('utf-8'))
620
621 if analysis:
622 outfile = "{}/{}/cfg/redefinition.txt".format(s,kmeta)
623 if os.path.isfile(outfile):
624 os.remove(outfile)
625 with open(outfile, 'w+') as f:
626 f.write( analysis )
627
628 # if the audit level is greater than two, we report if a fragment has overriden
629 # a value from a base fragment. This is really only used for new kernel introduction
630 if bsp_check_visibility > 2 and os.stat(outfile).st_size > 0:
631 with open (outfile, "r") as myfile:
632 results = myfile.read()
633 bb.warn( "[kernel config]: This BSP has configuration options defined in more than one config, with differing values:\n\n%s" % results)
634 warnings_detected = True
635
636 if warnings_detected and kmeta_audit_werror:
637 bb.fatal( "configuration warnings detected, werror is set, promoting to fatal" )
638}
639
640# Ensure that the branches (BSP and meta) are on the locations specified by
641# their SRCREV values. If they are NOT on the right commits, the branches
642# are corrected to the proper commit.
643do_validate_branches() {
644 set +e
645 cd ${S}
646
647 machine_branch="${@ get_machine_branch(d, "${KBRANCH}" )}"
648 machine_srcrev="${SRCREV_machine}"
649
650 # if SRCREV is AUTOREV it shows up as AUTOINC there's nothing to
651 # check and we can exit early
652 if [ "${machine_srcrev}" = "AUTOINC" ]; then
653 linux_yocto_dev='${@oe.utils.conditional("PREFERRED_PROVIDER_virtual/kernel", "linux-yocto-dev", "1", "", d)}'
654 if [ -n "$linux_yocto_dev" ]; then
655 git checkout -q -f ${machine_branch}
656 ver=$(grep "^VERSION =" ${S}/Makefile | sed s/.*=\ *//)
657 patchlevel=$(grep "^PATCHLEVEL =" ${S}/Makefile | sed s/.*=\ *//)
658 sublevel=$(grep "^SUBLEVEL =" ${S}/Makefile | sed s/.*=\ *//)
659 kver="$ver.$patchlevel"
660 bbnote "dev kernel: performing version -> branch -> SRCREV validation"
661 bbnote "dev kernel: recipe version ${LINUX_VERSION}, src version: $kver"
662 echo "${LINUX_VERSION}" | grep -q $kver
663 if [ $? -ne 0 ]; then
664 version="$(echo ${LINUX_VERSION} | sed 's/\+.*$//g')"
665 versioned_branch="v$version/$machine_branch"
666
667 machine_branch=$versioned_branch
668 force_srcrev="$(git rev-parse $machine_branch 2> /dev/null)"
669 if [ $? -ne 0 ]; then
670 bbfatal "kernel version mismatch detected, and no valid branch $machine_branch detected"
671 fi
672
673 bbnote "dev kernel: adjusting branch to $machine_branch, srcrev to: $force_srcrev"
674 fi
675 else
676 bbnote "SRCREV validation is not required for AUTOREV"
677 fi
678 elif [ "${machine_srcrev}" = "" ]; then
679 if [ "${SRCREV}" != "AUTOINC" ] && [ "${SRCREV}" != "INVALID" ]; then
680 # SRCREV_machine_<MACHINE> was not set. This means that a custom recipe
681 # that doesn't use the SRCREV_FORMAT "machine_meta" is being built. In
682 # this case, we need to reset to the give SRCREV before heading to patching
683 bbnote "custom recipe is being built, forcing SRCREV to ${SRCREV}"
684 force_srcrev="${SRCREV}"
685 fi
686 else
687 git cat-file -t ${machine_srcrev} > /dev/null
688 if [ $? -ne 0 ]; then
689 bberror "${machine_srcrev} is not a valid commit ID."
690 bbfatal_log "The kernel source tree may be out of sync"
691 fi
692 force_srcrev=${machine_srcrev}
693 fi
694
695 git checkout -q -f ${machine_branch}
696 if [ -n "${force_srcrev}" ]; then
697 # see if the branch we are about to patch has been properly reset to the defined
698 # SRCREV .. if not, we reset it.
699 branch_head=`git rev-parse HEAD`
700 if [ "${force_srcrev}" != "${branch_head}" ]; then
701 current_branch=`git rev-parse --abbrev-ref HEAD`
702 git branch "$current_branch-orig"
703 git reset --hard ${force_srcrev}
704 # We've checked out HEAD, make sure we cleanup kgit-s2q fence post check
705 # so the patches are applied as expected otherwise no patching
706 # would be done in some corner cases.
707 kgit-s2q --clean
708 fi
709 fi
710
711 set -e
712}
713
714OE_TERMINAL_EXPORTS += "KBUILD_OUTPUT"
715KBUILD_OUTPUT = "${B}"
716
717python () {
718 # If diffconfig is available, ensure it runs after kernel_configme
719 if 'do_diffconfig' in d:
720 bb.build.addtask('do_diffconfig', None, 'do_kernel_configme', d)
721
722 externalsrc = d.getVar('EXTERNALSRC')
723 if externalsrc:
724 # If we deltask do_patch, do_kernel_configme is left without
725 # dependencies and runs too early
726 d.setVarFlag('do_kernel_configme', 'deps', (d.getVarFlag('do_kernel_configme', 'deps', False) or []) + ['do_unpack'])
727}
728
729# extra tasks
730addtask kernel_version_sanity_check after do_kernel_metadata do_kernel_checkout before do_compile
731addtask validate_branches before do_patch after do_kernel_checkout
732addtask kernel_configcheck after do_configure before do_compile
diff --git a/meta/classes/kernel.bbclass b/meta/classes/kernel.bbclass
deleted file mode 100644
index 3463179395..0000000000
--- a/meta/classes/kernel.bbclass
+++ /dev/null
@@ -1,821 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7inherit linux-kernel-base kernel-module-split
8
9COMPATIBLE_HOST = ".*-linux"
10
11KERNEL_PACKAGE_NAME ??= "kernel"
12KERNEL_DEPLOYSUBDIR ??= "${@ "" if (d.getVar("KERNEL_PACKAGE_NAME") == "kernel") else d.getVar("KERNEL_PACKAGE_NAME") }"
13
14PROVIDES += "virtual/kernel"
15DEPENDS += "virtual/${TARGET_PREFIX}binutils virtual/${TARGET_PREFIX}gcc kmod-native bc-native bison-native"
16DEPENDS += "${@bb.utils.contains("INITRAMFS_FSTYPES", "cpio.lzo", "lzop-native", "", d)}"
17DEPENDS += "${@bb.utils.contains("INITRAMFS_FSTYPES", "cpio.lz4", "lz4-native", "", d)}"
18DEPENDS += "${@bb.utils.contains("INITRAMFS_FSTYPES", "cpio.zst", "zstd-native", "", d)}"
19PACKAGE_WRITE_DEPS += "depmodwrapper-cross"
20
21do_deploy[depends] += "depmodwrapper-cross:do_populate_sysroot gzip-native:do_populate_sysroot"
22do_clean[depends] += "make-mod-scripts:do_clean"
23
24CVE_PRODUCT ?= "linux_kernel"
25
26S = "${STAGING_KERNEL_DIR}"
27B = "${WORKDIR}/build"
28KBUILD_OUTPUT = "${B}"
29OE_TERMINAL_EXPORTS += "KBUILD_OUTPUT"
30
31# we include gcc above, we dont need virtual/libc
32INHIBIT_DEFAULT_DEPS = "1"
33
34KERNEL_IMAGETYPE ?= "zImage"
35INITRAMFS_IMAGE ?= ""
36INITRAMFS_IMAGE_NAME ?= "${@['${INITRAMFS_IMAGE}-${MACHINE}', ''][d.getVar('INITRAMFS_IMAGE') == '']}"
37INITRAMFS_TASK ?= ""
38INITRAMFS_IMAGE_BUNDLE ?= ""
39INITRAMFS_DEPLOY_DIR_IMAGE ?= "${DEPLOY_DIR_IMAGE}"
40INITRAMFS_MULTICONFIG ?= ""
41
42# KERNEL_VERSION is extracted from source code. It is evaluated as
43# None for the first parsing, since the code has not been fetched.
44# After the code is fetched, it will be evaluated as real version
45# number and cause kernel to be rebuilt. To avoid this, make
46# KERNEL_VERSION_NAME and KERNEL_VERSION_PKG_NAME depend on
47# LINUX_VERSION which is a constant.
48KERNEL_VERSION_NAME = "${@d.getVar('KERNEL_VERSION') or ""}"
49KERNEL_VERSION_NAME[vardepvalue] = "${LINUX_VERSION}"
50KERNEL_VERSION_PKG_NAME = "${@legitimize_package_name(d.getVar('KERNEL_VERSION'))}"
51KERNEL_VERSION_PKG_NAME[vardepvalue] = "${LINUX_VERSION}"
52
53python __anonymous () {
54 pn = d.getVar("PN")
55 kpn = d.getVar("KERNEL_PACKAGE_NAME")
56
57 # XXX Remove this after bug 11905 is resolved
58 # FILES:${KERNEL_PACKAGE_NAME}-dev doesn't expand correctly
59 if kpn == pn:
60 bb.warn("Some packages (E.g. *-dev) might be missing due to "
61 "bug 11905 (variable KERNEL_PACKAGE_NAME == PN)")
62
63 # The default kernel recipe builds in a shared location defined by
64 # bitbake/distro confs: STAGING_KERNEL_DIR and STAGING_KERNEL_BUILDDIR.
65 # Set these variables to directories under ${WORKDIR} in alternate
66 # kernel recipes (I.e. where KERNEL_PACKAGE_NAME != kernel) so that they
67 # may build in parallel with the default kernel without clobbering.
68 if kpn != "kernel":
69 workdir = d.getVar("WORKDIR")
70 sourceDir = os.path.join(workdir, 'kernel-source')
71 artifactsDir = os.path.join(workdir, 'kernel-build-artifacts')
72 d.setVar("STAGING_KERNEL_DIR", sourceDir)
73 d.setVar("STAGING_KERNEL_BUILDDIR", artifactsDir)
74
75 # Merge KERNEL_IMAGETYPE and KERNEL_ALT_IMAGETYPE into KERNEL_IMAGETYPES
76 type = d.getVar('KERNEL_IMAGETYPE') or ""
77 alttype = d.getVar('KERNEL_ALT_IMAGETYPE') or ""
78 types = d.getVar('KERNEL_IMAGETYPES') or ""
79 if type not in types.split():
80 types = (type + ' ' + types).strip()
81 if alttype not in types.split():
82 types = (alttype + ' ' + types).strip()
83 d.setVar('KERNEL_IMAGETYPES', types)
84
85 # KERNEL_IMAGETYPES may contain a mixture of image types supported directly
86 # by the kernel build system and types which are created by post-processing
87 # the output of the kernel build system (e.g. compressing vmlinux ->
88 # vmlinux.gz in kernel_do_transform_kernel()).
89 # KERNEL_IMAGETYPE_FOR_MAKE should contain only image types supported
90 # directly by the kernel build system.
91 if not d.getVar('KERNEL_IMAGETYPE_FOR_MAKE'):
92 typeformake = set()
93 for type in types.split():
94 if type == 'vmlinux.gz':
95 type = 'vmlinux'
96 typeformake.add(type)
97
98 d.setVar('KERNEL_IMAGETYPE_FOR_MAKE', ' '.join(sorted(typeformake)))
99
100 kname = d.getVar('KERNEL_PACKAGE_NAME') or "kernel"
101 imagedest = d.getVar('KERNEL_IMAGEDEST')
102
103 for type in types.split():
104 if bb.data.inherits_class('nopackages', d):
105 continue
106 typelower = type.lower()
107 d.appendVar('PACKAGES', ' %s-image-%s' % (kname, typelower))
108 d.setVar('FILES:' + kname + '-image-' + typelower, '/' + imagedest + '/' + type + '-${KERNEL_VERSION_NAME}' + ' /' + imagedest + '/' + type)
109 d.appendVar('RDEPENDS:%s-image' % kname, ' %s-image-%s (= ${EXTENDPKGV})' % (kname, typelower))
110 splitmods = d.getVar("KERNEL_SPLIT_MODULES")
111 if splitmods != '1':
112 d.appendVar('RDEPENDS:%s-image' % kname, ' %s-modules (= ${EXTENDPKGV})' % kname)
113 d.appendVar('RDEPENDS:%s-image-%s' % (kname, typelower), ' %s-modules-${KERNEL_VERSION_PKG_NAME} (= ${EXTENDPKGV})' % kname)
114 d.setVar('PKG:%s-modules' % kname, '%s-modules-${KERNEL_VERSION_PKG_NAME}' % kname)
115 d.appendVar('RPROVIDES:%s-modules' % kname, '%s-modules-${KERNEL_VERSION_PKG_NAME}' % kname)
116
117 d.setVar('PKG:%s-image-%s' % (kname,typelower), '%s-image-%s-${KERNEL_VERSION_PKG_NAME}' % (kname, typelower))
118 d.setVar('ALLOW_EMPTY:%s-image-%s' % (kname, typelower), '1')
119 d.prependVar('pkg_postinst:%s-image-%s' % (kname,typelower), """set +e
120if [ -n "$D" ]; then
121 ln -sf %s-${KERNEL_VERSION} $D/${KERNEL_IMAGEDEST}/%s > /dev/null 2>&1
122else
123 ln -sf %s-${KERNEL_VERSION} ${KERNEL_IMAGEDEST}/%s > /dev/null 2>&1
124 if [ $? -ne 0 ]; then
125 echo "Filesystem on ${KERNEL_IMAGEDEST}/ doesn't support symlinks, falling back to copied image (%s)."
126 install -m 0644 ${KERNEL_IMAGEDEST}/%s-${KERNEL_VERSION} ${KERNEL_IMAGEDEST}/%s
127 fi
128fi
129set -e
130""" % (type, type, type, type, type, type, type))
131 d.setVar('pkg_postrm:%s-image-%s' % (kname,typelower), """set +e
132if [ -f "${KERNEL_IMAGEDEST}/%s" -o -L "${KERNEL_IMAGEDEST}/%s" ]; then
133 rm -f ${KERNEL_IMAGEDEST}/%s > /dev/null 2>&1
134fi
135set -e
136""" % (type, type, type))
137
138
139 image = d.getVar('INITRAMFS_IMAGE')
140 # If the INTIRAMFS_IMAGE is set but the INITRAMFS_IMAGE_BUNDLE is set to 0,
141 # the do_bundle_initramfs does nothing, but the INITRAMFS_IMAGE is built
142 # standalone for use by wic and other tools.
143 if image:
144 if d.getVar('INITRAMFS_MULTICONFIG'):
145 d.appendVarFlag('do_bundle_initramfs', 'mcdepends', ' mc::${INITRAMFS_MULTICONFIG}:${INITRAMFS_IMAGE}:do_image_complete')
146 else:
147 d.appendVarFlag('do_bundle_initramfs', 'depends', ' ${INITRAMFS_IMAGE}:do_image_complete')
148 if image and bb.utils.to_boolean(d.getVar('INITRAMFS_IMAGE_BUNDLE')):
149 bb.build.addtask('do_transform_bundled_initramfs', 'do_deploy', 'do_bundle_initramfs', d)
150
151 # NOTE: setting INITRAMFS_TASK is for backward compatibility
152 # The preferred method is to set INITRAMFS_IMAGE, because
153 # this INITRAMFS_TASK has circular dependency problems
154 # if the initramfs requires kernel modules
155 image_task = d.getVar('INITRAMFS_TASK')
156 if image_task:
157 d.appendVarFlag('do_configure', 'depends', ' ${INITRAMFS_TASK}')
158}
159
160# Here we pull in all various kernel image types which we support.
161#
162# In case you're wondering why kernel.bbclass inherits the other image
163# types instead of the other way around, the reason for that is to
164# maintain compatibility with various currently existing meta-layers.
165# By pulling in the various kernel image types here, we retain the
166# original behavior of kernel.bbclass, so no meta-layers should get
167# broken.
168#
169# KERNEL_CLASSES by default pulls in kernel-uimage.bbclass, since this
170# used to be the default behavior when only uImage was supported. This
171# variable can be appended by users who implement support for new kernel
172# image types.
173
174KERNEL_CLASSES ?= " kernel-uimage "
175inherit ${KERNEL_CLASSES}
176
177# Old style kernels may set ${S} = ${WORKDIR}/git for example
178# We need to move these over to STAGING_KERNEL_DIR. We can't just
179# create the symlink in advance as the git fetcher can't cope with
180# the symlink.
181do_unpack[cleandirs] += " ${S} ${STAGING_KERNEL_DIR} ${B} ${STAGING_KERNEL_BUILDDIR}"
182do_clean[cleandirs] += " ${S} ${STAGING_KERNEL_DIR} ${B} ${STAGING_KERNEL_BUILDDIR}"
183python do_symlink_kernsrc () {
184 s = d.getVar("S")
185 if s[-1] == '/':
186 # drop trailing slash, so that os.symlink(kernsrc, s) doesn't use s as directory name and fail
187 s=s[:-1]
188 kernsrc = d.getVar("STAGING_KERNEL_DIR")
189 if s != kernsrc:
190 bb.utils.mkdirhier(kernsrc)
191 bb.utils.remove(kernsrc, recurse=True)
192 if d.getVar("EXTERNALSRC"):
193 # With EXTERNALSRC S will not be wiped so we can symlink to it
194 os.symlink(s, kernsrc)
195 else:
196 import shutil
197 shutil.move(s, kernsrc)
198 os.symlink(kernsrc, s)
199}
200# do_patch is normally ordered before do_configure, but
201# externalsrc.bbclass deletes do_patch, breaking the dependency of
202# do_configure on do_symlink_kernsrc.
203addtask symlink_kernsrc before do_patch do_configure after do_unpack
204
205inherit kernel-arch deploy
206
207PACKAGES_DYNAMIC += "^${KERNEL_PACKAGE_NAME}-module-.*"
208PACKAGES_DYNAMIC += "^${KERNEL_PACKAGE_NAME}-image-.*"
209PACKAGES_DYNAMIC += "^${KERNEL_PACKAGE_NAME}-firmware-.*"
210
211export OS = "${TARGET_OS}"
212export CROSS_COMPILE = "${TARGET_PREFIX}"
213export KBUILD_BUILD_VERSION = "1"
214export KBUILD_BUILD_USER ?= "oe-user"
215export KBUILD_BUILD_HOST ?= "oe-host"
216
217KERNEL_RELEASE ?= "${KERNEL_VERSION}"
218
219# The directory where built kernel lies in the kernel tree
220KERNEL_OUTPUT_DIR ?= "arch/${ARCH}/boot"
221KERNEL_IMAGEDEST ?= "boot"
222
223#
224# configuration
225#
226export CMDLINE_CONSOLE = "console=${@d.getVar("KERNEL_CONSOLE") or "ttyS0"}"
227
228KERNEL_VERSION = "${@get_kernelversion_headers('${B}')}"
229
230# kernels are generally machine specific
231PACKAGE_ARCH = "${MACHINE_ARCH}"
232
233# U-Boot support
234UBOOT_ENTRYPOINT ?= "20008000"
235UBOOT_LOADADDRESS ?= "${UBOOT_ENTRYPOINT}"
236
237# Some Linux kernel configurations need additional parameters on the command line
238KERNEL_EXTRA_ARGS ?= ""
239
240EXTRA_OEMAKE = " HOSTCC="${BUILD_CC}" HOSTCFLAGS="${BUILD_CFLAGS}" HOSTLDFLAGS="${BUILD_LDFLAGS}" HOSTCPP="${BUILD_CPP}""
241EXTRA_OEMAKE += " HOSTCXX="${BUILD_CXX}" HOSTCXXFLAGS="${BUILD_CXXFLAGS}" PAHOLE=false"
242
243KERNEL_ALT_IMAGETYPE ??= ""
244
245copy_initramfs() {
246 echo "Copying initramfs into ./usr ..."
247 # In case the directory is not created yet from the first pass compile:
248 mkdir -p ${B}/usr
249 # Find and use the first initramfs image archive type we find
250 rm -f ${B}/usr/${INITRAMFS_IMAGE_NAME}.cpio
251 for img in cpio cpio.gz cpio.lz4 cpio.lzo cpio.lzma cpio.xz cpio.zst; do
252 if [ -e "${INITRAMFS_DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE_NAME}.$img" ]; then
253 cp ${INITRAMFS_DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE_NAME}.$img ${B}/usr/.
254 case $img in
255 *gz)
256 echo "gzip decompressing image"
257 gunzip -f ${B}/usr/${INITRAMFS_IMAGE_NAME}.$img
258 break
259 ;;
260 *lz4)
261 echo "lz4 decompressing image"
262 lz4 -df ${B}/usr/${INITRAMFS_IMAGE_NAME}.$img ${B}/usr/${INITRAMFS_IMAGE_NAME}.cpio
263 break
264 ;;
265 *lzo)
266 echo "lzo decompressing image"
267 lzop -df ${B}/usr/${INITRAMFS_IMAGE_NAME}.$img
268 break
269 ;;
270 *lzma)
271 echo "lzma decompressing image"
272 lzma -df ${B}/usr/${INITRAMFS_IMAGE_NAME}.$img
273 break
274 ;;
275 *xz)
276 echo "xz decompressing image"
277 xz -df ${B}/usr/${INITRAMFS_IMAGE_NAME}.$img
278 break
279 ;;
280 *zst)
281 echo "zst decompressing image"
282 zstd -df ${B}/usr/${INITRAMFS_IMAGE_NAME}.$img
283 break
284 ;;
285 esac
286 break
287 fi
288 done
289 # Verify that the above loop found a initramfs, fail otherwise
290 [ -f ${B}/usr/${INITRAMFS_IMAGE_NAME}.cpio ] && echo "Finished copy of initramfs into ./usr" || die "Could not find any ${INITRAMFS_DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE_NAME}.cpio{.gz|.lz4|.lzo|.lzma|.xz|.zst) for bundling; INITRAMFS_IMAGE_NAME might be wrong."
291}
292
293do_bundle_initramfs () {
294 if [ ! -z "${INITRAMFS_IMAGE}" -a x"${INITRAMFS_IMAGE_BUNDLE}" = x1 ]; then
295 echo "Creating a kernel image with a bundled initramfs..."
296 copy_initramfs
297 # Backing up kernel image relies on its type(regular file or symbolic link)
298 tmp_path=""
299 for imageType in ${KERNEL_IMAGETYPE_FOR_MAKE} ; do
300 if [ -h ${KERNEL_OUTPUT_DIR}/$imageType ] ; then
301 linkpath=`readlink -n ${KERNEL_OUTPUT_DIR}/$imageType`
302 realpath=`readlink -fn ${KERNEL_OUTPUT_DIR}/$imageType`
303 mv -f $realpath $realpath.bak
304 tmp_path=$tmp_path" "$imageType"#"$linkpath"#"$realpath
305 elif [ -f ${KERNEL_OUTPUT_DIR}/$imageType ]; then
306 mv -f ${KERNEL_OUTPUT_DIR}/$imageType ${KERNEL_OUTPUT_DIR}/$imageType.bak
307 tmp_path=$tmp_path" "$imageType"##"
308 fi
309 done
310 use_alternate_initrd=CONFIG_INITRAMFS_SOURCE=${B}/usr/${INITRAMFS_IMAGE_NAME}.cpio
311 kernel_do_compile
312 # Restoring kernel image
313 for tp in $tmp_path ; do
314 imageType=`echo $tp|cut -d "#" -f 1`
315 linkpath=`echo $tp|cut -d "#" -f 2`
316 realpath=`echo $tp|cut -d "#" -f 3`
317 if [ -n "$realpath" ]; then
318 mv -f $realpath $realpath.initramfs
319 mv -f $realpath.bak $realpath
320 ln -sf $linkpath.initramfs ${B}/${KERNEL_OUTPUT_DIR}/$imageType.initramfs
321 else
322 mv -f ${KERNEL_OUTPUT_DIR}/$imageType ${KERNEL_OUTPUT_DIR}/$imageType.initramfs
323 mv -f ${KERNEL_OUTPUT_DIR}/$imageType.bak ${KERNEL_OUTPUT_DIR}/$imageType
324 fi
325 done
326 fi
327}
328do_bundle_initramfs[dirs] = "${B}"
329
330kernel_do_transform_bundled_initramfs() {
331 # vmlinux.gz is not built by kernel
332 if (echo "${KERNEL_IMAGETYPES}" | grep -wq "vmlinux\.gz"); then
333 gzip -9cn < ${KERNEL_OUTPUT_DIR}/vmlinux.initramfs > ${KERNEL_OUTPUT_DIR}/vmlinux.gz.initramfs
334 fi
335}
336do_transform_bundled_initramfs[dirs] = "${B}"
337
338python do_devshell:prepend () {
339 os.environ["LDFLAGS"] = ''
340}
341
342addtask bundle_initramfs after do_install before do_deploy
343
344KERNEL_DEBUG_TIMESTAMPS ??= "0"
345
346kernel_do_compile() {
347 unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS MACHINE
348
349 # setup native pkg-config variables (kconfig scripts call pkg-config directly, cannot generically be overriden to pkg-config-native)
350 export PKG_CONFIG_DIR="${STAGING_DIR_NATIVE}${libdir_native}/pkgconfig"
351 export PKG_CONFIG_PATH="$PKG_CONFIG_DIR:${STAGING_DATADIR_NATIVE}/pkgconfig"
352 export PKG_CONFIG_LIBDIR="$PKG_CONFIG_DIR"
353 export PKG_CONFIG_SYSROOT_DIR=""
354
355 if [ "${KERNEL_DEBUG_TIMESTAMPS}" != "1" ]; then
356 # kernel sources do not use do_unpack, so SOURCE_DATE_EPOCH may not
357 # be set....
358 if [ "${SOURCE_DATE_EPOCH}" = "" -o "${SOURCE_DATE_EPOCH}" = "0" ]; then
359 # The source directory is not necessarily a git repository, so we
360 # specify the git-dir to ensure that git does not query a
361 # repository in any parent directory.
362 SOURCE_DATE_EPOCH=`git --git-dir="${S}/.git" log -1 --pretty=%ct 2>/dev/null || echo "${REPRODUCIBLE_TIMESTAMP_ROOTFS}"`
363 fi
364
365 ts=`LC_ALL=C date -d @$SOURCE_DATE_EPOCH`
366 export KBUILD_BUILD_TIMESTAMP="$ts"
367 export KCONFIG_NOTIMESTAMP=1
368 bbnote "KBUILD_BUILD_TIMESTAMP: $ts"
369 fi
370 # The $use_alternate_initrd is only set from
371 # do_bundle_initramfs() This variable is specifically for the
372 # case where we are making a second pass at the kernel
373 # compilation and we want to force the kernel build to use a
374 # different initramfs image. The way to do that in the kernel
375 # is to specify:
376 # make ...args... CONFIG_INITRAMFS_SOURCE=some_other_initramfs.cpio
377 if [ "$use_alternate_initrd" = "" ] && [ "${INITRAMFS_TASK}" != "" ] ; then
378 # The old style way of copying an prebuilt image and building it
379 # is turned on via INTIRAMFS_TASK != ""
380 copy_initramfs
381 use_alternate_initrd=CONFIG_INITRAMFS_SOURCE=${B}/usr/${INITRAMFS_IMAGE_NAME}.cpio
382 fi
383 for typeformake in ${KERNEL_IMAGETYPE_FOR_MAKE} ; do
384 oe_runmake ${typeformake} CC="${KERNEL_CC}" LD="${KERNEL_LD}" ${KERNEL_EXTRA_ARGS} $use_alternate_initrd
385 done
386}
387
388kernel_do_transform_kernel() {
389 # vmlinux.gz is not built by kernel
390 if (echo "${KERNEL_IMAGETYPES}" | grep -wq "vmlinux\.gz"); then
391 mkdir -p "${KERNEL_OUTPUT_DIR}"
392 gzip -9cn < ${B}/vmlinux > "${KERNEL_OUTPUT_DIR}/vmlinux.gz"
393 fi
394}
395do_transform_kernel[dirs] = "${B}"
396addtask transform_kernel after do_compile before do_install
397
398do_compile_kernelmodules() {
399 unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS MACHINE
400 if [ "${KERNEL_DEBUG_TIMESTAMPS}" != "1" ]; then
401 # kernel sources do not use do_unpack, so SOURCE_DATE_EPOCH may not
402 # be set....
403 if [ "${SOURCE_DATE_EPOCH}" = "" -o "${SOURCE_DATE_EPOCH}" = "0" ]; then
404 # The source directory is not necessarily a git repository, so we
405 # specify the git-dir to ensure that git does not query a
406 # repository in any parent directory.
407 SOURCE_DATE_EPOCH=`git --git-dir="${S}/.git" log -1 --pretty=%ct 2>/dev/null || echo "${REPRODUCIBLE_TIMESTAMP_ROOTFS}"`
408 fi
409
410 ts=`LC_ALL=C date -d @$SOURCE_DATE_EPOCH`
411 export KBUILD_BUILD_TIMESTAMP="$ts"
412 export KCONFIG_NOTIMESTAMP=1
413 bbnote "KBUILD_BUILD_TIMESTAMP: $ts"
414 fi
415 if (grep -q -i -e '^CONFIG_MODULES=y$' ${B}/.config); then
416 oe_runmake -C ${B} ${PARALLEL_MAKE} modules CC="${KERNEL_CC}" LD="${KERNEL_LD}" ${KERNEL_EXTRA_ARGS}
417
418 # Module.symvers gets updated during the
419 # building of the kernel modules. We need to
420 # update this in the shared workdir since some
421 # external kernel modules has a dependency on
422 # other kernel modules and will look at this
423 # file to do symbol lookups
424 cp ${B}/Module.symvers ${STAGING_KERNEL_BUILDDIR}/
425 # 5.10+ kernels have module.lds that we need to copy for external module builds
426 if [ -e "${B}/scripts/module.lds" ]; then
427 install -Dm 0644 ${B}/scripts/module.lds ${STAGING_KERNEL_BUILDDIR}/scripts/module.lds
428 fi
429 else
430 bbnote "no modules to compile"
431 fi
432}
433addtask compile_kernelmodules after do_compile before do_strip
434
435kernel_do_install() {
436 #
437 # First install the modules
438 #
439 unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS MACHINE
440 if (grep -q -i -e '^CONFIG_MODULES=y$' .config); then
441 oe_runmake DEPMOD=echo MODLIB=${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION} INSTALL_FW_PATH=${D}${nonarch_base_libdir}/firmware modules_install
442 rm "${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION}/build"
443 rm "${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION}/source"
444 # If the kernel/ directory is empty remove it to prevent QA issues
445 rmdir --ignore-fail-on-non-empty "${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION}/kernel"
446 else
447 bbnote "no modules to install"
448 fi
449
450 #
451 # Install various kernel output (zImage, map file, config, module support files)
452 #
453 install -d ${D}/${KERNEL_IMAGEDEST}
454
455 #
456 # When including an initramfs bundle inside a FIT image, the fitImage is created after the install task
457 # by do_assemble_fitimage_initramfs.
458 # This happens after the generation of the initramfs bundle (done by do_bundle_initramfs).
459 # So, at the level of the install task we should not try to install the fitImage. fitImage is still not
460 # generated yet.
461 # After the generation of the fitImage, the deploy task copies the fitImage from the build directory to
462 # the deploy folder.
463 #
464
465 for imageType in ${KERNEL_IMAGETYPES} ; do
466 if [ $imageType != "fitImage" ] || [ "${INITRAMFS_IMAGE_BUNDLE}" != "1" ] ; then
467 install -m 0644 ${KERNEL_OUTPUT_DIR}/$imageType ${D}/${KERNEL_IMAGEDEST}/$imageType-${KERNEL_VERSION}
468 fi
469 done
470
471 install -m 0644 System.map ${D}/${KERNEL_IMAGEDEST}/System.map-${KERNEL_VERSION}
472 install -m 0644 .config ${D}/${KERNEL_IMAGEDEST}/config-${KERNEL_VERSION}
473 install -m 0644 vmlinux ${D}/${KERNEL_IMAGEDEST}/vmlinux-${KERNEL_VERSION}
474 [ -e Module.symvers ] && install -m 0644 Module.symvers ${D}/${KERNEL_IMAGEDEST}/Module.symvers-${KERNEL_VERSION}
475 install -d ${D}${sysconfdir}/modules-load.d
476 install -d ${D}${sysconfdir}/modprobe.d
477}
478
479# Must be ran no earlier than after do_kernel_checkout or else Makefile won't be in ${S}/Makefile
480do_kernel_version_sanity_check() {
481 if [ "x${KERNEL_VERSION_SANITY_SKIP}" = "x1" ]; then
482 exit 0
483 fi
484
485 # The Makefile determines the kernel version shown at runtime
486 # Don't use KERNEL_VERSION because the headers it grabs the version from aren't generated until do_compile
487 VERSION=$(grep "^VERSION =" ${S}/Makefile | sed s/.*=\ *//)
488 PATCHLEVEL=$(grep "^PATCHLEVEL =" ${S}/Makefile | sed s/.*=\ *//)
489 SUBLEVEL=$(grep "^SUBLEVEL =" ${S}/Makefile | sed s/.*=\ *//)
490 EXTRAVERSION=$(grep "^EXTRAVERSION =" ${S}/Makefile | sed s/.*=\ *//)
491
492 # Build a string for regex and a plain version string
493 reg="^${VERSION}\.${PATCHLEVEL}"
494 vers="${VERSION}.${PATCHLEVEL}"
495 if [ -n "${SUBLEVEL}" ]; then
496 # Ignoring a SUBLEVEL of zero is fine
497 if [ "${SUBLEVEL}" = "0" ]; then
498 reg="${reg}(\.${SUBLEVEL})?"
499 else
500 reg="${reg}\.${SUBLEVEL}"
501 vers="${vers}.${SUBLEVEL}"
502 fi
503 fi
504 vers="${vers}${EXTRAVERSION}"
505 reg="${reg}${EXTRAVERSION}"
506
507 if [ -z `echo ${PV} | grep -E "${reg}"` ]; then
508 bbfatal "Package Version (${PV}) does not match of kernel being built (${vers}). Please update the PV variable to match the kernel source or set KERNEL_VERSION_SANITY_SKIP=\"1\" in your recipe."
509 fi
510 exit 0
511}
512
513addtask shared_workdir after do_compile before do_compile_kernelmodules
514addtask shared_workdir_setscene
515
516do_shared_workdir_setscene () {
517 exit 1
518}
519
520emit_depmod_pkgdata() {
521 # Stash data for depmod
522 install -d ${PKGDESTWORK}/${KERNEL_PACKAGE_NAME}-depmod/
523 echo "${KERNEL_VERSION}" > ${PKGDESTWORK}/${KERNEL_PACKAGE_NAME}-depmod/${KERNEL_PACKAGE_NAME}-abiversion
524 cp ${B}/System.map ${PKGDESTWORK}/${KERNEL_PACKAGE_NAME}-depmod/System.map-${KERNEL_VERSION}
525}
526
527PACKAGEFUNCS += "emit_depmod_pkgdata"
528
529do_shared_workdir[cleandirs] += " ${STAGING_KERNEL_BUILDDIR}"
530do_shared_workdir () {
531 cd ${B}
532
533 kerneldir=${STAGING_KERNEL_BUILDDIR}
534 install -d $kerneldir
535
536 #
537 # Store the kernel version in sysroots for module-base.bbclass
538 #
539
540 echo "${KERNEL_VERSION}" > $kerneldir/${KERNEL_PACKAGE_NAME}-abiversion
541
542 # Copy files required for module builds
543 cp System.map $kerneldir/System.map-${KERNEL_VERSION}
544 [ -e Module.symvers ] && cp Module.symvers $kerneldir/
545 cp .config $kerneldir/
546 mkdir -p $kerneldir/include/config
547 cp include/config/kernel.release $kerneldir/include/config/kernel.release
548 if [ -e certs/signing_key.x509 ]; then
549 # The signing_key.* files are stored in the certs/ dir in
550 # newer Linux kernels
551 mkdir -p $kerneldir/certs
552 cp certs/signing_key.* $kerneldir/certs/
553 elif [ -e signing_key.priv ]; then
554 cp signing_key.* $kerneldir/
555 fi
556
557 # We can also copy over all the generated files and avoid special cases
558 # like version.h, but we've opted to keep this small until file creep starts
559 # to happen
560 if [ -e include/linux/version.h ]; then
561 mkdir -p $kerneldir/include/linux
562 cp include/linux/version.h $kerneldir/include/linux/version.h
563 fi
564
565 # As of Linux kernel version 3.0.1, the clean target removes
566 # arch/powerpc/lib/crtsavres.o which is present in
567 # KBUILD_LDFLAGS_MODULE, making it required to build external modules.
568 if [ ${ARCH} = "powerpc" ]; then
569 if [ -e arch/powerpc/lib/crtsavres.o ]; then
570 mkdir -p $kerneldir/arch/powerpc/lib/
571 cp arch/powerpc/lib/crtsavres.o $kerneldir/arch/powerpc/lib/crtsavres.o
572 fi
573 fi
574
575 if [ -d include/generated ]; then
576 mkdir -p $kerneldir/include/generated/
577 cp -fR include/generated/* $kerneldir/include/generated/
578 fi
579
580 if [ -d arch/${ARCH}/include/generated ]; then
581 mkdir -p $kerneldir/arch/${ARCH}/include/generated/
582 cp -fR arch/${ARCH}/include/generated/* $kerneldir/arch/${ARCH}/include/generated/
583 fi
584
585 if (grep -q -i -e '^CONFIG_UNWINDER_ORC=y$' $kerneldir/.config); then
586 # With CONFIG_UNWINDER_ORC (the default in 4.14), objtool is required for
587 # out-of-tree modules to be able to generate object files.
588 if [ -x tools/objtool/objtool ]; then
589 mkdir -p ${kerneldir}/tools/objtool
590 cp tools/objtool/objtool ${kerneldir}/tools/objtool/
591 fi
592 fi
593}
594
595# We don't need to stage anything, not the modules/firmware since those would clash with linux-firmware
596sysroot_stage_all () {
597 :
598}
599
600KERNEL_CONFIG_COMMAND ?= "oe_runmake_call -C ${S} CC="${KERNEL_CC}" LD="${KERNEL_LD}" O=${B} olddefconfig || oe_runmake -C ${S} O=${B} CC="${KERNEL_CC}" LD="${KERNEL_LD}" oldnoconfig"
601
602python check_oldest_kernel() {
603 oldest_kernel = d.getVar('OLDEST_KERNEL')
604 kernel_version = d.getVar('KERNEL_VERSION')
605 tclibc = d.getVar('TCLIBC')
606 if tclibc == 'glibc':
607 kernel_version = kernel_version.split('-', 1)[0]
608 if oldest_kernel and kernel_version:
609 if bb.utils.vercmp_string(kernel_version, oldest_kernel) < 0:
610 bb.warn('%s: OLDEST_KERNEL is "%s" but the version of the kernel you are building is "%s" - therefore %s as built may not be compatible with this kernel. Either set OLDEST_KERNEL to an older version, or build a newer kernel.' % (d.getVar('PN'), oldest_kernel, kernel_version, tclibc))
611}
612
613check_oldest_kernel[vardepsexclude] += "OLDEST_KERNEL KERNEL_VERSION"
614do_configure[prefuncs] += "check_oldest_kernel"
615
616kernel_do_configure() {
617 # fixes extra + in /lib/modules/2.6.37+
618 # $ scripts/setlocalversion . => +
619 # $ make kernelversion => 2.6.37
620 # $ make kernelrelease => 2.6.37+
621 touch ${B}/.scmversion ${S}/.scmversion
622
623 if [ "${S}" != "${B}" ] && [ -f "${S}/.config" ] && [ ! -f "${B}/.config" ]; then
624 mv "${S}/.config" "${B}/.config"
625 fi
626
627 # Copy defconfig to .config if .config does not exist. This allows
628 # recipes to manage the .config themselves in do_configure:prepend().
629 if [ -f "${WORKDIR}/defconfig" ] && [ ! -f "${B}/.config" ]; then
630 cp "${WORKDIR}/defconfig" "${B}/.config"
631 fi
632
633 ${KERNEL_CONFIG_COMMAND}
634}
635
636do_savedefconfig() {
637 bbplain "Saving defconfig to:\n${B}/defconfig"
638 oe_runmake -C ${B} LD='${KERNEL_LD}' savedefconfig
639}
640do_savedefconfig[nostamp] = "1"
641addtask savedefconfig after do_configure
642
643inherit cml1
644
645KCONFIG_CONFIG_COMMAND:append = " PAHOLE=false LD='${KERNEL_LD}' HOSTLDFLAGS='${BUILD_LDFLAGS}'"
646
647EXPORT_FUNCTIONS do_compile do_transform_kernel do_transform_bundled_initramfs do_install do_configure
648
649# kernel-base becomes kernel-${KERNEL_VERSION}
650# kernel-image becomes kernel-image-${KERNEL_VERSION}
651PACKAGES = "${KERNEL_PACKAGE_NAME} ${KERNEL_PACKAGE_NAME}-base ${KERNEL_PACKAGE_NAME}-vmlinux ${KERNEL_PACKAGE_NAME}-image ${KERNEL_PACKAGE_NAME}-dev ${KERNEL_PACKAGE_NAME}-modules ${KERNEL_PACKAGE_NAME}-dbg"
652FILES:${PN} = ""
653FILES:${KERNEL_PACKAGE_NAME}-base = "${nonarch_base_libdir}/modules/${KERNEL_VERSION}/modules.order ${nonarch_base_libdir}/modules/${KERNEL_VERSION}/modules.builtin ${nonarch_base_libdir}/modules/${KERNEL_VERSION}/modules.builtin.modinfo"
654FILES:${KERNEL_PACKAGE_NAME}-image = ""
655FILES:${KERNEL_PACKAGE_NAME}-dev = "/${KERNEL_IMAGEDEST}/System.map* /${KERNEL_IMAGEDEST}/Module.symvers* /${KERNEL_IMAGEDEST}/config* ${KERNEL_SRC_PATH} ${nonarch_base_libdir}/modules/${KERNEL_VERSION}/build"
656FILES:${KERNEL_PACKAGE_NAME}-vmlinux = "/${KERNEL_IMAGEDEST}/vmlinux-${KERNEL_VERSION_NAME}"
657FILES:${KERNEL_PACKAGE_NAME}-modules = ""
658FILES:${KERNEL_PACKAGE_NAME}-dbg = "/usr/lib/debug /usr/src/debug"
659RDEPENDS:${KERNEL_PACKAGE_NAME} = "${KERNEL_PACKAGE_NAME}-base (= ${EXTENDPKGV})"
660# Allow machines to override this dependency if kernel image files are
661# not wanted in images as standard
662RRECOMMENDS:${KERNEL_PACKAGE_NAME}-base ?= "${KERNEL_PACKAGE_NAME}-image (= ${EXTENDPKGV})"
663PKG:${KERNEL_PACKAGE_NAME}-image = "${KERNEL_PACKAGE_NAME}-image-${@legitimize_package_name(d.getVar('KERNEL_VERSION'))}"
664RDEPENDS:${KERNEL_PACKAGE_NAME}-image += "${@oe.utils.conditional('KERNEL_IMAGETYPE', 'vmlinux', '${KERNEL_PACKAGE_NAME}-vmlinux (= ${EXTENDPKGV})', '', d)}"
665PKG:${KERNEL_PACKAGE_NAME}-base = "${KERNEL_PACKAGE_NAME}-${@legitimize_package_name(d.getVar('KERNEL_VERSION'))}"
666RPROVIDES:${KERNEL_PACKAGE_NAME}-base += "${KERNEL_PACKAGE_NAME}-${KERNEL_VERSION}"
667ALLOW_EMPTY:${KERNEL_PACKAGE_NAME} = "1"
668ALLOW_EMPTY:${KERNEL_PACKAGE_NAME}-base = "1"
669ALLOW_EMPTY:${KERNEL_PACKAGE_NAME}-image = "1"
670ALLOW_EMPTY:${KERNEL_PACKAGE_NAME}-modules = "1"
671DESCRIPTION:${KERNEL_PACKAGE_NAME}-modules = "Kernel modules meta package"
672
673pkg_postinst:${KERNEL_PACKAGE_NAME}-base () {
674 if [ ! -e "$D/lib/modules/${KERNEL_VERSION}" ]; then
675 mkdir -p $D/lib/modules/${KERNEL_VERSION}
676 fi
677 if [ -n "$D" ]; then
678 depmodwrapper -a -b $D ${KERNEL_VERSION}
679 else
680 depmod -a ${KERNEL_VERSION}
681 fi
682}
683
684PACKAGESPLITFUNCS:prepend = "split_kernel_packages "
685
686python split_kernel_packages () {
687 do_split_packages(d, root='${nonarch_base_libdir}/firmware', file_regex=r'^(.*)\.(bin|fw|cis|csp|dsp)$', output_pattern='${KERNEL_PACKAGE_NAME}-firmware-%s', description='Firmware for %s', recursive=True, extra_depends='')
688}
689
690# Many scripts want to look in arch/$arch/boot for the bootable
691# image. This poses a problem for vmlinux and vmlinuz based
692# booting. This task arranges to have vmlinux and vmlinuz appear
693# in the normalized directory location.
694do_kernel_link_images() {
695 if [ ! -d "${B}/arch/${ARCH}/boot" ]; then
696 mkdir ${B}/arch/${ARCH}/boot
697 fi
698 cd ${B}/arch/${ARCH}/boot
699 ln -sf ../../../vmlinux
700 if [ -f ../../../vmlinuz ]; then
701 ln -sf ../../../vmlinuz
702 fi
703 if [ -f ../../../vmlinuz.bin ]; then
704 ln -sf ../../../vmlinuz.bin
705 fi
706 if [ -f ../../../vmlinux.64 ]; then
707 ln -sf ../../../vmlinux.64
708 fi
709}
710addtask kernel_link_images after do_compile before do_strip
711
712python do_strip() {
713 import shutil
714
715 strip = d.getVar('STRIP')
716 extra_sections = d.getVar('KERNEL_IMAGE_STRIP_EXTRA_SECTIONS')
717 kernel_image = d.getVar('B') + "/" + d.getVar('KERNEL_OUTPUT_DIR') + "/vmlinux"
718
719 if (extra_sections and kernel_image.find(d.getVar('KERNEL_IMAGEDEST') + '/vmlinux') != -1):
720 kernel_image_stripped = kernel_image + ".stripped"
721 shutil.copy2(kernel_image, kernel_image_stripped)
722 oe.package.runstrip((kernel_image_stripped, 8, strip, extra_sections))
723 bb.debug(1, "KERNEL_IMAGE_STRIP_EXTRA_SECTIONS is set, stripping sections: " + \
724 extra_sections)
725}
726do_strip[dirs] = "${B}"
727
728addtask strip before do_sizecheck after do_kernel_link_images
729
730# Support checking the kernel size since some kernels need to reside in partitions
731# with a fixed length or there is a limit in transferring the kernel to memory.
732# If more than one image type is enabled, warn on any that don't fit but only fail
733# if none fit.
734do_sizecheck() {
735 if [ ! -z "${KERNEL_IMAGE_MAXSIZE}" ]; then
736 invalid=`echo ${KERNEL_IMAGE_MAXSIZE} | sed 's/[0-9]//g'`
737 if [ -n "$invalid" ]; then
738 die "Invalid KERNEL_IMAGE_MAXSIZE: ${KERNEL_IMAGE_MAXSIZE}, should be an integer (The unit is Kbytes)"
739 fi
740 at_least_one_fits=
741 for imageType in ${KERNEL_IMAGETYPES} ; do
742 size=`du -ks ${B}/${KERNEL_OUTPUT_DIR}/$imageType | awk '{print $1}'`
743 if [ $size -gt ${KERNEL_IMAGE_MAXSIZE} ]; then
744 bbwarn "This kernel $imageType (size=$size(K) > ${KERNEL_IMAGE_MAXSIZE}(K)) is too big for your device."
745 else
746 at_least_one_fits=y
747 fi
748 done
749 if [ -z "$at_least_one_fits" ]; then
750 die "All kernel images are too big for your device. Please reduce the size of the kernel by making more of it modular."
751 fi
752 fi
753}
754do_sizecheck[dirs] = "${B}"
755
756addtask sizecheck before do_install after do_strip
757
758inherit kernel-artifact-names
759
760kernel_do_deploy() {
761 deployDir="${DEPLOYDIR}"
762 if [ -n "${KERNEL_DEPLOYSUBDIR}" ]; then
763 deployDir="${DEPLOYDIR}/${KERNEL_DEPLOYSUBDIR}"
764 mkdir "$deployDir"
765 fi
766
767 for imageType in ${KERNEL_IMAGETYPES} ; do
768 baseName=$imageType-${KERNEL_IMAGE_NAME}
769
770 if [ -s ${KERNEL_OUTPUT_DIR}/$imageType.stripped ] ; then
771 install -m 0644 ${KERNEL_OUTPUT_DIR}/$imageType.stripped $deployDir/$baseName${KERNEL_IMAGE_BIN_EXT}
772 else
773 install -m 0644 ${KERNEL_OUTPUT_DIR}/$imageType $deployDir/$baseName${KERNEL_IMAGE_BIN_EXT}
774 fi
775 if [ -n "${KERNEL_IMAGE_LINK_NAME}" ] ; then
776 ln -sf $baseName${KERNEL_IMAGE_BIN_EXT} $deployDir/$imageType-${KERNEL_IMAGE_LINK_NAME}${KERNEL_IMAGE_BIN_EXT}
777 fi
778 if [ "${KERNEL_IMAGETYPE_SYMLINK}" = "1" ] ; then
779 ln -sf $baseName${KERNEL_IMAGE_BIN_EXT} $deployDir/$imageType
780 fi
781 done
782
783 if [ ${MODULE_TARBALL_DEPLOY} = "1" ] && (grep -q -i -e '^CONFIG_MODULES=y$' .config); then
784 mkdir -p ${D}${root_prefix}/lib
785 if [ -n "${SOURCE_DATE_EPOCH}" ]; then
786 TAR_ARGS="--sort=name --clamp-mtime --mtime=@${SOURCE_DATE_EPOCH}"
787 else
788 TAR_ARGS=""
789 fi
790 TAR_ARGS="$TAR_ARGS --owner=0 --group=0"
791 tar $TAR_ARGS -cv -C ${D}${root_prefix} lib | gzip -9n > $deployDir/modules-${MODULE_TARBALL_NAME}.tgz
792
793 if [ -n "${MODULE_TARBALL_LINK_NAME}" ] ; then
794 ln -sf modules-${MODULE_TARBALL_NAME}.tgz $deployDir/modules-${MODULE_TARBALL_LINK_NAME}.tgz
795 fi
796 fi
797
798 if [ ! -z "${INITRAMFS_IMAGE}" -a x"${INITRAMFS_IMAGE_BUNDLE}" = x1 ]; then
799 for imageType in ${KERNEL_IMAGETYPES} ; do
800 if [ "$imageType" = "fitImage" ] ; then
801 continue
802 fi
803 initramfsBaseName=$imageType-${INITRAMFS_NAME}
804 install -m 0644 ${KERNEL_OUTPUT_DIR}/$imageType.initramfs $deployDir/$initramfsBaseName${KERNEL_IMAGE_BIN_EXT}
805 if [ -n "${INITRAMFS_LINK_NAME}" ] ; then
806 ln -sf $initramfsBaseName${KERNEL_IMAGE_BIN_EXT} $deployDir/$imageType-${INITRAMFS_LINK_NAME}${KERNEL_IMAGE_BIN_EXT}
807 fi
808 done
809 fi
810}
811
812# We deploy to filenames that include PKGV and PKGR, read the saved data to
813# ensure we get the right values for both
814do_deploy[prefuncs] += "read_subpackage_metadata"
815
816addtask deploy after do_populate_sysroot do_packagedata
817
818EXPORT_FUNCTIONS do_deploy
819
820# Add using Device Tree support
821inherit kernel-devicetree
diff --git a/meta/classes/kernelsrc.bbclass b/meta/classes/kernelsrc.bbclass
deleted file mode 100644
index a32882a5d2..0000000000
--- a/meta/classes/kernelsrc.bbclass
+++ /dev/null
@@ -1,16 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7S = "${STAGING_KERNEL_DIR}"
8deltask do_fetch
9deltask do_unpack
10do_patch[depends] += "virtual/kernel:do_shared_workdir"
11do_patch[noexec] = "1"
12do_package[depends] += "virtual/kernel:do_populate_sysroot"
13KERNEL_VERSION = "${@get_kernelversion_file("${STAGING_KERNEL_BUILDDIR}")}"
14
15inherit linux-kernel-base
16
diff --git a/meta/classes/lib_package.bbclass b/meta/classes/lib_package.bbclass
deleted file mode 100644
index 6d110155e5..0000000000
--- a/meta/classes/lib_package.bbclass
+++ /dev/null
@@ -1,12 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6#
7# ${PN}-bin is defined in bitbake.conf
8#
9# We need to allow the other packages to be greedy with what they
10# want out of /usr/bin and /usr/sbin before ${PN}-bin gets greedy.
11#
12PACKAGE_BEFORE_PN = "${PN}-bin"
diff --git a/meta/classes/libc-package.bbclass b/meta/classes/libc-package.bbclass
deleted file mode 100644
index de3d4223a8..0000000000
--- a/meta/classes/libc-package.bbclass
+++ /dev/null
@@ -1,390 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7#
8# This class knows how to package up [e]glibc. Its shared since prebuild binary toolchains
9# may need packaging and its pointless to duplicate this code.
10#
11# Caller should set GLIBC_INTERNAL_USE_BINARY_LOCALE to one of:
12# "compile" - Use QEMU to generate the binary locale files
13# "precompiled" - The binary locale files are pregenerated and already present
14# "ondevice" - The device will build the locale files upon first boot through the postinst
15
16GLIBC_INTERNAL_USE_BINARY_LOCALE ?= "ondevice"
17
18GLIBC_SPLIT_LC_PACKAGES ?= "0"
19
20python __anonymous () {
21 enabled = d.getVar("ENABLE_BINARY_LOCALE_GENERATION")
22
23 pn = d.getVar("PN")
24 if pn.endswith("-initial"):
25 enabled = False
26
27 if enabled and int(enabled):
28 import re
29
30 target_arch = d.getVar("TARGET_ARCH")
31 binary_arches = d.getVar("BINARY_LOCALE_ARCHES") or ""
32 use_cross_localedef = d.getVar("LOCALE_GENERATION_WITH_CROSS-LOCALEDEF") or ""
33
34 for regexp in binary_arches.split(" "):
35 r = re.compile(regexp)
36
37 if r.match(target_arch):
38 depends = d.getVar("DEPENDS")
39 if use_cross_localedef == "1" :
40 depends = "%s cross-localedef-native" % depends
41 else:
42 depends = "%s qemu-native" % depends
43 d.setVar("DEPENDS", depends)
44 d.setVar("GLIBC_INTERNAL_USE_BINARY_LOCALE", "compile")
45 break
46}
47
48# try to fix disable charsets/locales/locale-code compile fail
49PACKAGE_NO_GCONV ?= "0"
50
51OVERRIDES:append = ":${TARGET_ARCH}-${TARGET_OS}"
52
53locale_base_postinst_ontarget() {
54localedef --inputfile=${datadir}/i18n/locales/%s --charmap=%s %s
55}
56
57locale_base_postrm() {
58#!/bin/sh
59localedef --delete-from-archive --inputfile=${datadir}/locales/%s --charmap=%s %s
60}
61
62LOCALETREESRC ?= "${PKGD}"
63
64do_prep_locale_tree() {
65 treedir=${WORKDIR}/locale-tree
66 rm -rf $treedir
67 mkdir -p $treedir/${base_bindir} $treedir/${base_libdir} $treedir/${datadir} $treedir/${localedir}
68 tar -cf - -C ${LOCALETREESRC}${datadir} -p i18n | tar -xf - -C $treedir/${datadir}
69 # unzip to avoid parsing errors
70 for i in $treedir/${datadir}/i18n/charmaps/*gz; do
71 gunzip $i
72 done
73 # The extract pattern "./l*.so*" is carefully selected so that it will
74 # match ld*.so and lib*.so*, but not any files in the gconv directory
75 # (if it exists). This makes sure we only unpack the files we need.
76 # This is important in case usrmerge is set in DISTRO_FEATURES, which
77 # means ${base_libdir} == ${libdir}.
78 tar -cf - -C ${LOCALETREESRC}${base_libdir} -p . | tar -xf - -C $treedir/${base_libdir} --wildcards './l*.so*'
79 if [ -f ${STAGING_LIBDIR_NATIVE}/libgcc_s.* ]; then
80 tar -cf - -C ${STAGING_LIBDIR_NATIVE} -p libgcc_s.* | tar -xf - -C $treedir/${base_libdir}
81 fi
82 install -m 0755 ${LOCALETREESRC}${bindir}/localedef $treedir/${base_bindir}
83}
84
85do_collect_bins_from_locale_tree() {
86 treedir=${WORKDIR}/locale-tree
87
88 parent=$(dirname ${localedir})
89 mkdir -p ${PKGD}/$parent
90 tar -cf - -C $treedir/$parent -p $(basename ${localedir}) | tar -xf - -C ${PKGD}$parent
91
92 # Finalize tree by chaning all duplicate files into hard links
93 cross-localedef-hardlink -c -v ${WORKDIR}/locale-tree
94}
95
96inherit qemu
97
98python package_do_split_gconvs () {
99 import re
100 if (d.getVar('PACKAGE_NO_GCONV') == '1'):
101 bb.note("package requested not splitting gconvs")
102 return
103
104 if not d.getVar('PACKAGES'):
105 return
106
107 mlprefix = d.getVar("MLPREFIX") or ""
108
109 bpn = d.getVar('BPN')
110 libdir = d.getVar('libdir')
111 if not libdir:
112 bb.error("libdir not defined")
113 return
114 datadir = d.getVar('datadir')
115 if not datadir:
116 bb.error("datadir not defined")
117 return
118
119 gconv_libdir = oe.path.join(libdir, "gconv")
120 charmap_dir = oe.path.join(datadir, "i18n", "charmaps")
121 locales_dir = oe.path.join(datadir, "i18n", "locales")
122 binary_locales_dir = d.getVar('localedir')
123
124 def calc_gconv_deps(fn, pkg, file_regex, output_pattern, group):
125 deps = []
126 f = open(fn, "rb")
127 c_re = re.compile(r'^copy "(.*)"')
128 i_re = re.compile(r'^include "(\w+)".*')
129 for l in f.readlines():
130 l = l.decode("latin-1")
131 m = c_re.match(l) or i_re.match(l)
132 if m:
133 dp = legitimize_package_name('%s%s-gconv-%s' % (mlprefix, bpn, m.group(1)))
134 if not dp in deps:
135 deps.append(dp)
136 f.close()
137 if deps != []:
138 d.setVar('RDEPENDS:%s' % pkg, " ".join(deps))
139 if bpn != 'glibc':
140 d.setVar('RPROVIDES:%s' % pkg, pkg.replace(bpn, 'glibc'))
141
142 do_split_packages(d, gconv_libdir, file_regex=r'^(.*)\.so$', output_pattern=bpn+'-gconv-%s', \
143 description='gconv module for character set %s', hook=calc_gconv_deps, \
144 extra_depends=bpn+'-gconv')
145
146 def calc_charmap_deps(fn, pkg, file_regex, output_pattern, group):
147 deps = []
148 f = open(fn, "rb")
149 c_re = re.compile(r'^copy "(.*)"')
150 i_re = re.compile(r'^include "(\w+)".*')
151 for l in f.readlines():
152 l = l.decode("latin-1")
153 m = c_re.match(l) or i_re.match(l)
154 if m:
155 dp = legitimize_package_name('%s%s-charmap-%s' % (mlprefix, bpn, m.group(1)))
156 if not dp in deps:
157 deps.append(dp)
158 f.close()
159 if deps != []:
160 d.setVar('RDEPENDS:%s' % pkg, " ".join(deps))
161 if bpn != 'glibc':
162 d.setVar('RPROVIDES:%s' % pkg, pkg.replace(bpn, 'glibc'))
163
164 do_split_packages(d, charmap_dir, file_regex=r'^(.*)\.gz$', output_pattern=bpn+'-charmap-%s', \
165 description='character map for %s encoding', hook=calc_charmap_deps, extra_depends='')
166
167 def calc_locale_deps(fn, pkg, file_regex, output_pattern, group):
168 deps = []
169 f = open(fn, "rb")
170 c_re = re.compile(r'^copy "(.*)"')
171 i_re = re.compile(r'^include "(\w+)".*')
172 for l in f.readlines():
173 l = l.decode("latin-1")
174 m = c_re.match(l) or i_re.match(l)
175 if m:
176 dp = legitimize_package_name(mlprefix+bpn+'-localedata-%s' % m.group(1))
177 if not dp in deps:
178 deps.append(dp)
179 f.close()
180 if deps != []:
181 d.setVar('RDEPENDS:%s' % pkg, " ".join(deps))
182 if bpn != 'glibc':
183 d.setVar('RPROVIDES:%s' % pkg, pkg.replace(bpn, 'glibc'))
184
185 do_split_packages(d, locales_dir, file_regex=r'(.*)', output_pattern=bpn+'-localedata-%s', \
186 description='locale definition for %s', hook=calc_locale_deps, extra_depends='')
187 d.setVar('PACKAGES', d.getVar('PACKAGES', False) + ' ' + d.getVar('MLPREFIX', False) + bpn + '-gconv')
188
189 use_bin = d.getVar("GLIBC_INTERNAL_USE_BINARY_LOCALE")
190
191 dot_re = re.compile(r"(.*)\.(.*)")
192
193 # Read in supported locales and associated encodings
194 supported = {}
195 with open(oe.path.join(d.getVar('WORKDIR'), "SUPPORTED")) as f:
196 for line in f.readlines():
197 try:
198 locale, charset = line.rstrip().split()
199 except ValueError:
200 continue
201 supported[locale] = charset
202
203 # GLIBC_GENERATE_LOCALES var specifies which locales to be generated. empty or "all" means all locales
204 to_generate = d.getVar('GLIBC_GENERATE_LOCALES')
205 if not to_generate or to_generate == 'all':
206 to_generate = sorted(supported.keys())
207 else:
208 to_generate = to_generate.split()
209 for locale in to_generate:
210 if locale not in supported:
211 if '.' in locale:
212 charset = locale.split('.')[1]
213 else:
214 charset = 'UTF-8'
215 bb.warn("Unsupported locale '%s', assuming encoding '%s'" % (locale, charset))
216 supported[locale] = charset
217
218 def output_locale_source(name, pkgname, locale, encoding):
219 d.setVar('RDEPENDS:%s' % pkgname, '%slocaledef %s-localedata-%s %s-charmap-%s' % \
220 (mlprefix, mlprefix+bpn, legitimize_package_name(locale), mlprefix+bpn, legitimize_package_name(encoding)))
221 d.setVar('pkg_postinst_ontarget:%s' % pkgname, d.getVar('locale_base_postinst_ontarget') \
222 % (locale, encoding, locale))
223 d.setVar('pkg_postrm:%s' % pkgname, d.getVar('locale_base_postrm') % \
224 (locale, encoding, locale))
225
226 def output_locale_binary_rdepends(name, pkgname, locale, encoding):
227 dep = legitimize_package_name('%s-binary-localedata-%s' % (bpn, name))
228 lcsplit = d.getVar('GLIBC_SPLIT_LC_PACKAGES')
229 if lcsplit and int(lcsplit):
230 d.appendVar('PACKAGES', ' ' + dep)
231 d.setVar('ALLOW_EMPTY:%s' % dep, '1')
232 d.setVar('RDEPENDS:%s' % pkgname, mlprefix + dep)
233
234 commands = {}
235
236 def output_locale_binary(name, pkgname, locale, encoding):
237 treedir = oe.path.join(d.getVar("WORKDIR"), "locale-tree")
238 ldlibdir = oe.path.join(treedir, d.getVar("base_libdir"))
239 path = d.getVar("PATH")
240 i18npath = oe.path.join(treedir, datadir, "i18n")
241 gconvpath = oe.path.join(treedir, "iconvdata")
242 outputpath = oe.path.join(treedir, binary_locales_dir)
243
244 use_cross_localedef = d.getVar("LOCALE_GENERATION_WITH_CROSS-LOCALEDEF") or "0"
245 if use_cross_localedef == "1":
246 target_arch = d.getVar('TARGET_ARCH')
247 locale_arch_options = { \
248 "arc": " --uint32-align=4 --little-endian ", \
249 "arceb": " --uint32-align=4 --big-endian ", \
250 "arm": " --uint32-align=4 --little-endian ", \
251 "armeb": " --uint32-align=4 --big-endian ", \
252 "aarch64": " --uint32-align=4 --little-endian ", \
253 "aarch64_be": " --uint32-align=4 --big-endian ", \
254 "sh4": " --uint32-align=4 --big-endian ", \
255 "powerpc": " --uint32-align=4 --big-endian ", \
256 "powerpc64": " --uint32-align=4 --big-endian ", \
257 "powerpc64le": " --uint32-align=4 --little-endian ", \
258 "mips": " --uint32-align=4 --big-endian ", \
259 "mipsisa32r6": " --uint32-align=4 --big-endian ", \
260 "mips64": " --uint32-align=4 --big-endian ", \
261 "mipsisa64r6": " --uint32-align=4 --big-endian ", \
262 "mipsel": " --uint32-align=4 --little-endian ", \
263 "mipsisa32r6el": " --uint32-align=4 --little-endian ", \
264 "mips64el":" --uint32-align=4 --little-endian ", \
265 "mipsisa64r6el":" --uint32-align=4 --little-endian ", \
266 "riscv64": " --uint32-align=4 --little-endian ", \
267 "riscv32": " --uint32-align=4 --little-endian ", \
268 "i586": " --uint32-align=4 --little-endian ", \
269 "i686": " --uint32-align=4 --little-endian ", \
270 "x86_64": " --uint32-align=4 --little-endian " }
271
272 if target_arch in locale_arch_options:
273 localedef_opts = locale_arch_options[target_arch]
274 else:
275 bb.error("locale_arch_options not found for target_arch=" + target_arch)
276 bb.fatal("unknown arch:" + target_arch + " for locale_arch_options")
277
278 localedef_opts += " --force --no-hard-links --no-archive --prefix=%s \
279 --inputfile=%s/%s/i18n/locales/%s --charmap=%s %s/%s" \
280 % (treedir, treedir, datadir, locale, encoding, outputpath, name)
281
282 cmd = "PATH=\"%s\" I18NPATH=\"%s\" GCONV_PATH=\"%s\" cross-localedef %s" % \
283 (path, i18npath, gconvpath, localedef_opts)
284 else: # earlier slower qemu way
285 qemu = qemu_target_binary(d)
286 localedef_opts = "--force --no-hard-links --no-archive --prefix=%s \
287 --inputfile=%s/i18n/locales/%s --charmap=%s %s" \
288 % (treedir, datadir, locale, encoding, name)
289
290 qemu_options = d.getVar('QEMU_OPTIONS')
291
292 cmd = "PSEUDO_RELOADED=YES PATH=\"%s\" I18NPATH=\"%s\" %s -L %s \
293 -E LD_LIBRARY_PATH=%s %s %s${base_bindir}/localedef %s" % \
294 (path, i18npath, qemu, treedir, ldlibdir, qemu_options, treedir, localedef_opts)
295
296 commands["%s/%s" % (outputpath, name)] = cmd
297
298 bb.note("generating locale %s (%s)" % (locale, encoding))
299
300 def output_locale(name, locale, encoding):
301 pkgname = d.getVar('MLPREFIX', False) + 'locale-base-' + legitimize_package_name(name)
302 d.setVar('ALLOW_EMPTY:%s' % pkgname, '1')
303 d.setVar('PACKAGES', '%s %s' % (pkgname, d.getVar('PACKAGES')))
304 rprovides = ' %svirtual-locale-%s' % (mlprefix, legitimize_package_name(name))
305 m = re.match(r"(.*)_(.*)", name)
306 if m:
307 rprovides += ' %svirtual-locale-%s' % (mlprefix, m.group(1))
308 d.setVar('RPROVIDES:%s' % pkgname, rprovides)
309
310 if use_bin == "compile":
311 output_locale_binary_rdepends(name, pkgname, locale, encoding)
312 output_locale_binary(name, pkgname, locale, encoding)
313 elif use_bin == "precompiled":
314 output_locale_binary_rdepends(name, pkgname, locale, encoding)
315 else:
316 output_locale_source(name, pkgname, locale, encoding)
317
318 if use_bin == "compile":
319 bb.note("preparing tree for binary locale generation")
320 bb.build.exec_func("do_prep_locale_tree", d)
321
322 utf8_only = int(d.getVar('LOCALE_UTF8_ONLY') or 0)
323 utf8_is_default = int(d.getVar('LOCALE_UTF8_IS_DEFAULT') or 0)
324
325 encodings = {}
326 for locale in to_generate:
327 charset = supported[locale]
328 if utf8_only and charset != 'UTF-8':
329 continue
330
331 m = dot_re.match(locale)
332 if m:
333 base = m.group(1)
334 else:
335 base = locale
336
337 # Non-precompiled locales may be renamed so that the default
338 # (non-suffixed) encoding is always UTF-8, i.e., instead of en_US and
339 # en_US.UTF-8, we have en_US and en_US.ISO-8859-1. This implicitly
340 # contradicts SUPPORTED.
341 if use_bin == "precompiled" or not utf8_is_default:
342 output_locale(locale, base, charset)
343 else:
344 if charset == 'UTF-8':
345 output_locale(base, base, charset)
346 else:
347 output_locale('%s.%s' % (base, charset), base, charset)
348
349 def metapkg_hook(file, pkg, pattern, format, basename):
350 name = basename.split('/', 1)[0]
351 metapkg = legitimize_package_name('%s-binary-localedata-%s' % (mlprefix+bpn, name))
352 d.appendVar('RDEPENDS:%s' % metapkg, ' ' + pkg)
353
354 if use_bin == "compile":
355 makefile = oe.path.join(d.getVar("WORKDIR"), "locale-tree", "Makefile")
356 with open(makefile, "w") as m:
357 m.write("all: %s\n\n" % " ".join(commands.keys()))
358 total = len(commands)
359 for i, (maketarget, makerecipe) in enumerate(commands.items()):
360 m.write(maketarget + ":\n")
361 m.write("\t@echo 'Progress %d/%d'\n" % (i, total))
362 m.write("\t" + makerecipe + "\n\n")
363 d.setVar("EXTRA_OEMAKE", "-C %s ${PARALLEL_MAKE}" % (os.path.dirname(makefile)))
364 d.setVarFlag("oe_runmake", "progress", r"outof:Progress\s(\d+)/(\d+)")
365 bb.note("Executing binary locale generation makefile")
366 bb.build.exec_func("oe_runmake", d)
367 bb.note("collecting binary locales from locale tree")
368 bb.build.exec_func("do_collect_bins_from_locale_tree", d)
369
370 if use_bin in ('compile', 'precompiled'):
371 lcsplit = d.getVar('GLIBC_SPLIT_LC_PACKAGES')
372 if lcsplit and int(lcsplit):
373 do_split_packages(d, binary_locales_dir, file_regex=r'^(.*/LC_\w+)', \
374 output_pattern=bpn+'-binary-localedata-%s', \
375 description='binary locale definition for %s', recursive=True,
376 hook=metapkg_hook, extra_depends='', allow_dirs=True, match_path=True)
377 else:
378 do_split_packages(d, binary_locales_dir, file_regex=r'(.*)', \
379 output_pattern=bpn+'-binary-localedata-%s', \
380 description='binary locale definition for %s', extra_depends='', allow_dirs=True)
381 else:
382 bb.note("generation of binary locales disabled. this may break i18n!")
383
384}
385
386# We want to do this indirection so that we can safely 'return'
387# from the called function even though we're prepending
388python populate_packages:prepend () {
389 bb.build.exec_func('package_do_split_gconvs', d)
390}
diff --git a/meta/classes/license.bbclass b/meta/classes/license.bbclass
deleted file mode 100644
index 560acb8b6f..0000000000
--- a/meta/classes/license.bbclass
+++ /dev/null
@@ -1,426 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# Populates LICENSE_DIRECTORY as set in distro config with the license files as set by
8# LIC_FILES_CHKSUM.
9# TODO:
10# - There is a real issue revolving around license naming standards.
11
12LICENSE_DIRECTORY ??= "${DEPLOY_DIR}/licenses"
13LICSSTATEDIR = "${WORKDIR}/license-destdir/"
14
15# Create extra package with license texts and add it to RRECOMMENDS:${PN}
16LICENSE_CREATE_PACKAGE[type] = "boolean"
17LICENSE_CREATE_PACKAGE ??= "0"
18LICENSE_PACKAGE_SUFFIX ??= "-lic"
19LICENSE_FILES_DIRECTORY ??= "${datadir}/licenses/"
20
21addtask populate_lic after do_patch before do_build
22do_populate_lic[dirs] = "${LICSSTATEDIR}/${PN}"
23do_populate_lic[cleandirs] = "${LICSSTATEDIR}"
24
25python do_populate_lic() {
26 """
27 Populate LICENSE_DIRECTORY with licenses.
28 """
29 lic_files_paths = find_license_files(d)
30
31 # The base directory we wrangle licenses to
32 destdir = os.path.join(d.getVar('LICSSTATEDIR'), d.getVar('PN'))
33 copy_license_files(lic_files_paths, destdir)
34 info = get_recipe_info(d)
35 with open(os.path.join(destdir, "recipeinfo"), "w") as f:
36 for key in sorted(info.keys()):
37 f.write("%s: %s\n" % (key, info[key]))
38 oe.qa.exit_if_errors(d)
39}
40
41PSEUDO_IGNORE_PATHS .= ",${@','.join(((d.getVar('COMMON_LICENSE_DIR') or '') + ' ' + (d.getVar('LICENSE_PATH') or '') + ' ' + d.getVar('COREBASE') + '/meta/COPYING').split())}"
42# it would be better to copy them in do_install:append, but find_license_filesa is python
43python perform_packagecopy:prepend () {
44 enabled = oe.data.typed_value('LICENSE_CREATE_PACKAGE', d)
45 if d.getVar('CLASSOVERRIDE') == 'class-target' and enabled:
46 lic_files_paths = find_license_files(d)
47
48 # LICENSE_FILES_DIRECTORY starts with '/' so os.path.join cannot be used to join D and LICENSE_FILES_DIRECTORY
49 destdir = d.getVar('D') + os.path.join(d.getVar('LICENSE_FILES_DIRECTORY'), d.getVar('PN'))
50 copy_license_files(lic_files_paths, destdir)
51 add_package_and_files(d)
52}
53perform_packagecopy[vardeps] += "LICENSE_CREATE_PACKAGE"
54
55def get_recipe_info(d):
56 info = {}
57 info["PV"] = d.getVar("PV")
58 info["PR"] = d.getVar("PR")
59 info["LICENSE"] = d.getVar("LICENSE")
60 return info
61
62def add_package_and_files(d):
63 packages = d.getVar('PACKAGES')
64 files = d.getVar('LICENSE_FILES_DIRECTORY')
65 pn = d.getVar('PN')
66 pn_lic = "%s%s" % (pn, d.getVar('LICENSE_PACKAGE_SUFFIX', False))
67 if pn_lic in packages.split():
68 bb.warn("%s package already existed in %s." % (pn_lic, pn))
69 else:
70 # first in PACKAGES to be sure that nothing else gets LICENSE_FILES_DIRECTORY
71 d.setVar('PACKAGES', "%s %s" % (pn_lic, packages))
72 d.setVar('FILES:' + pn_lic, files)
73
74def copy_license_files(lic_files_paths, destdir):
75 import shutil
76 import errno
77
78 bb.utils.mkdirhier(destdir)
79 for (basename, path, beginline, endline) in lic_files_paths:
80 try:
81 src = path
82 dst = os.path.join(destdir, basename)
83 if os.path.exists(dst):
84 os.remove(dst)
85 if os.path.islink(src):
86 src = os.path.realpath(src)
87 canlink = os.access(src, os.W_OK) and (os.stat(src).st_dev == os.stat(destdir).st_dev) and beginline is None and endline is None
88 if canlink:
89 try:
90 os.link(src, dst)
91 except OSError as err:
92 if err.errno == errno.EXDEV:
93 # Copy license files if hardlink is not possible even if st_dev is the
94 # same on source and destination (docker container with device-mapper?)
95 canlink = False
96 else:
97 raise
98 # Only chown if we did hardlink and we're running under pseudo
99 if canlink and os.environ.get('PSEUDO_DISABLED') == '0':
100 os.chown(dst,0,0)
101 if not canlink:
102 begin_idx = max(0, int(beginline) - 1) if beginline is not None else None
103 end_idx = max(0, int(endline)) if endline is not None else None
104 if begin_idx is None and end_idx is None:
105 shutil.copyfile(src, dst)
106 else:
107 with open(src, 'rb') as src_f:
108 with open(dst, 'wb') as dst_f:
109 dst_f.write(b''.join(src_f.readlines()[begin_idx:end_idx]))
110
111 except Exception as e:
112 bb.warn("Could not copy license file %s to %s: %s" % (src, dst, e))
113
114def find_license_files(d):
115 """
116 Creates list of files used in LIC_FILES_CHKSUM and generic LICENSE files.
117 """
118 import shutil
119 import oe.license
120 from collections import defaultdict, OrderedDict
121
122 # All the license files for the package
123 lic_files = d.getVar('LIC_FILES_CHKSUM') or ""
124 pn = d.getVar('PN')
125 # The license files are located in S/LIC_FILE_CHECKSUM.
126 srcdir = d.getVar('S')
127 # Directory we store the generic licenses as set in the distro configuration
128 generic_directory = d.getVar('COMMON_LICENSE_DIR')
129 # List of basename, path tuples
130 lic_files_paths = []
131 # hash for keep track generic lics mappings
132 non_generic_lics = {}
133 # Entries from LIC_FILES_CHKSUM
134 lic_chksums = {}
135 license_source_dirs = []
136 license_source_dirs.append(generic_directory)
137 try:
138 additional_lic_dirs = d.getVar('LICENSE_PATH').split()
139 for lic_dir in additional_lic_dirs:
140 license_source_dirs.append(lic_dir)
141 except:
142 pass
143
144 class FindVisitor(oe.license.LicenseVisitor):
145 def visit_Str(self, node):
146 #
147 # Until I figure out what to do with
148 # the two modifiers I support (or greater = +
149 # and "with exceptions" being *
150 # we'll just strip out the modifier and put
151 # the base license.
152 find_license(node.s.replace("+", "").replace("*", ""))
153 self.generic_visit(node)
154
155 def visit_Constant(self, node):
156 find_license(node.value.replace("+", "").replace("*", ""))
157 self.generic_visit(node)
158
159 def find_license(license_type):
160 try:
161 bb.utils.mkdirhier(gen_lic_dest)
162 except:
163 pass
164 spdx_generic = None
165 license_source = None
166 # If the generic does not exist we need to check to see if there is an SPDX mapping to it,
167 # unless NO_GENERIC_LICENSE is set.
168 for lic_dir in license_source_dirs:
169 if not os.path.isfile(os.path.join(lic_dir, license_type)):
170 if d.getVarFlag('SPDXLICENSEMAP', license_type) != None:
171 # Great, there is an SPDXLICENSEMAP. We can copy!
172 bb.debug(1, "We need to use a SPDXLICENSEMAP for %s" % (license_type))
173 spdx_generic = d.getVarFlag('SPDXLICENSEMAP', license_type)
174 license_source = lic_dir
175 break
176 elif os.path.isfile(os.path.join(lic_dir, license_type)):
177 spdx_generic = license_type
178 license_source = lic_dir
179 break
180
181 non_generic_lic = d.getVarFlag('NO_GENERIC_LICENSE', license_type)
182 if spdx_generic and license_source:
183 # we really should copy to generic_ + spdx_generic, however, that ends up messing the manifest
184 # audit up. This should be fixed in emit_pkgdata (or, we actually got and fix all the recipes)
185
186 lic_files_paths.append(("generic_" + license_type, os.path.join(license_source, spdx_generic),
187 None, None))
188
189 # The user may attempt to use NO_GENERIC_LICENSE for a generic license which doesn't make sense
190 # and should not be allowed, warn the user in this case.
191 if d.getVarFlag('NO_GENERIC_LICENSE', license_type):
192 oe.qa.handle_error("license-no-generic",
193 "%s: %s is a generic license, please don't use NO_GENERIC_LICENSE for it." % (pn, license_type), d)
194
195 elif non_generic_lic and non_generic_lic in lic_chksums:
196 # if NO_GENERIC_LICENSE is set, we copy the license files from the fetched source
197 # of the package rather than the license_source_dirs.
198 lic_files_paths.append(("generic_" + license_type,
199 os.path.join(srcdir, non_generic_lic), None, None))
200 non_generic_lics[non_generic_lic] = license_type
201 else:
202 # Explicitly avoid the CLOSED license because this isn't generic
203 if license_type != 'CLOSED':
204 # And here is where we warn people that their licenses are lousy
205 oe.qa.handle_error("license-exists",
206 "%s: No generic license file exists for: %s in any provider" % (pn, license_type), d)
207 pass
208
209 if not generic_directory:
210 bb.fatal("COMMON_LICENSE_DIR is unset. Please set this in your distro config")
211
212 for url in lic_files.split():
213 try:
214 (method, host, path, user, pswd, parm) = bb.fetch.decodeurl(url)
215 if method != "file" or not path:
216 raise bb.fetch.MalformedUrl()
217 except bb.fetch.MalformedUrl:
218 bb.fatal("%s: LIC_FILES_CHKSUM contains an invalid URL: %s" % (d.getVar('PF'), url))
219 # We want the license filename and path
220 chksum = parm.get('md5', None)
221 beginline = parm.get('beginline')
222 endline = parm.get('endline')
223 lic_chksums[path] = (chksum, beginline, endline)
224
225 v = FindVisitor()
226 try:
227 v.visit_string(d.getVar('LICENSE'))
228 except oe.license.InvalidLicense as exc:
229 bb.fatal('%s: %s' % (d.getVar('PF'), exc))
230 except SyntaxError:
231 oe.qa.handle_error("license-syntax",
232 "%s: Failed to parse it's LICENSE field." % (d.getVar('PF')), d)
233 # Add files from LIC_FILES_CHKSUM to list of license files
234 lic_chksum_paths = defaultdict(OrderedDict)
235 for path, data in sorted(lic_chksums.items()):
236 lic_chksum_paths[os.path.basename(path)][data] = (os.path.join(srcdir, path), data[1], data[2])
237 for basename, files in lic_chksum_paths.items():
238 if len(files) == 1:
239 # Don't copy again a LICENSE already handled as non-generic
240 if basename in non_generic_lics:
241 continue
242 data = list(files.values())[0]
243 lic_files_paths.append(tuple([basename] + list(data)))
244 else:
245 # If there are multiple different license files with identical
246 # basenames we rename them to <file>.0, <file>.1, ...
247 for i, data in enumerate(files.values()):
248 lic_files_paths.append(tuple(["%s.%d" % (basename, i)] + list(data)))
249
250 return lic_files_paths
251
252def return_spdx(d, license):
253 """
254 This function returns the spdx mapping of a license if it exists.
255 """
256 return d.getVarFlag('SPDXLICENSEMAP', license)
257
258def canonical_license(d, license):
259 """
260 Return the canonical (SPDX) form of the license if available (so GPLv3
261 becomes GPL-3.0-only) or the passed license if there is no canonical form.
262 """
263 return d.getVarFlag('SPDXLICENSEMAP', license) or license
264
265def expand_wildcard_licenses(d, wildcard_licenses):
266 """
267 There are some common wildcard values users may want to use. Support them
268 here.
269 """
270 licenses = set(wildcard_licenses)
271 mapping = {
272 "AGPL-3.0*" : ["AGPL-3.0-only", "AGPL-3.0-or-later"],
273 "GPL-3.0*" : ["GPL-3.0-only", "GPL-3.0-or-later"],
274 "LGPL-3.0*" : ["LGPL-3.0-only", "LGPL-3.0-or-later"],
275 }
276 for k in mapping:
277 if k in wildcard_licenses:
278 licenses.remove(k)
279 for item in mapping[k]:
280 licenses.add(item)
281
282 for l in licenses:
283 if l in oe.license.obsolete_license_list():
284 bb.fatal("Error, %s is an obsolete license, please use an SPDX reference in INCOMPATIBLE_LICENSE" % l)
285 if "*" in l:
286 bb.fatal("Error, %s is an invalid license wildcard entry" % l)
287
288 return list(licenses)
289
290def incompatible_license_contains(license, truevalue, falsevalue, d):
291 license = canonical_license(d, license)
292 bad_licenses = (d.getVar('INCOMPATIBLE_LICENSE') or "").split()
293 bad_licenses = expand_wildcard_licenses(d, bad_licenses)
294 return truevalue if license in bad_licenses else falsevalue
295
296def incompatible_pkg_license(d, dont_want_licenses, license):
297 # Handles an "or" or two license sets provided by
298 # flattened_licenses(), pick one that works if possible.
299 def choose_lic_set(a, b):
300 return a if all(oe.license.license_ok(canonical_license(d, lic),
301 dont_want_licenses) for lic in a) else b
302
303 try:
304 licenses = oe.license.flattened_licenses(license, choose_lic_set)
305 except oe.license.LicenseError as exc:
306 bb.fatal('%s: %s' % (d.getVar('P'), exc))
307
308 incompatible_lic = []
309 for l in licenses:
310 license = canonical_license(d, l)
311 if not oe.license.license_ok(license, dont_want_licenses):
312 incompatible_lic.append(license)
313
314 return sorted(incompatible_lic)
315
316def incompatible_license(d, dont_want_licenses, package=None):
317 """
318 This function checks if a recipe has only incompatible licenses. It also
319 take into consideration 'or' operand. dont_want_licenses should be passed
320 as canonical (SPDX) names.
321 """
322 import oe.license
323 license = d.getVar("LICENSE:%s" % package) if package else None
324 if not license:
325 license = d.getVar('LICENSE')
326
327 return incompatible_pkg_license(d, dont_want_licenses, license)
328
329def check_license_flags(d):
330 """
331 This function checks if a recipe has any LICENSE_FLAGS that
332 aren't acceptable.
333
334 If it does, it returns the all LICENSE_FLAGS missing from the list
335 of acceptable license flags, or all of the LICENSE_FLAGS if there
336 is no list of acceptable flags.
337
338 If everything is is acceptable, it returns None.
339 """
340
341 def license_flag_matches(flag, acceptlist, pn):
342 """
343 Return True if flag matches something in acceptlist, None if not.
344
345 Before we test a flag against the acceptlist, we append _${PN}
346 to it. We then try to match that string against the
347 acceptlist. This covers the normal case, where we expect
348 LICENSE_FLAGS to be a simple string like 'commercial', which
349 the user typically matches exactly in the acceptlist by
350 explicitly appending the package name e.g 'commercial_foo'.
351 If we fail the match however, we then split the flag across
352 '_' and append each fragment and test until we either match or
353 run out of fragments.
354 """
355 flag_pn = ("%s_%s" % (flag, pn))
356 for candidate in acceptlist:
357 if flag_pn == candidate:
358 return True
359
360 flag_cur = ""
361 flagments = flag_pn.split("_")
362 flagments.pop() # we've already tested the full string
363 for flagment in flagments:
364 if flag_cur:
365 flag_cur += "_"
366 flag_cur += flagment
367 for candidate in acceptlist:
368 if flag_cur == candidate:
369 return True
370 return False
371
372 def all_license_flags_match(license_flags, acceptlist):
373 """ Return all unmatched flags, None if all flags match """
374 pn = d.getVar('PN')
375 split_acceptlist = acceptlist.split()
376 flags = []
377 for flag in license_flags.split():
378 if not license_flag_matches(flag, split_acceptlist, pn):
379 flags.append(flag)
380 return flags if flags else None
381
382 license_flags = d.getVar('LICENSE_FLAGS')
383 if license_flags:
384 acceptlist = d.getVar('LICENSE_FLAGS_ACCEPTED')
385 if not acceptlist:
386 return license_flags.split()
387 unmatched_flags = all_license_flags_match(license_flags, acceptlist)
388 if unmatched_flags:
389 return unmatched_flags
390 return None
391
392def check_license_format(d):
393 """
394 This function checks if LICENSE is well defined,
395 Validate operators in LICENSES.
396 No spaces are allowed between LICENSES.
397 """
398 pn = d.getVar('PN')
399 licenses = d.getVar('LICENSE')
400 from oe.license import license_operator, license_operator_chars, license_pattern
401
402 elements = list(filter(lambda x: x.strip(), license_operator.split(licenses)))
403 for pos, element in enumerate(elements):
404 if license_pattern.match(element):
405 if pos > 0 and license_pattern.match(elements[pos - 1]):
406 oe.qa.handle_error('license-format',
407 '%s: LICENSE value "%s" has an invalid format - license names ' \
408 'must be separated by the following characters to indicate ' \
409 'the license selection: %s' %
410 (pn, licenses, license_operator_chars), d)
411 elif not license_operator.match(element):
412 oe.qa.handle_error('license-format',
413 '%s: LICENSE value "%s" has an invalid separator "%s" that is not ' \
414 'in the valid list of separators (%s)' %
415 (pn, licenses, element, license_operator_chars), d)
416
417SSTATETASKS += "do_populate_lic"
418do_populate_lic[sstate-inputdirs] = "${LICSSTATEDIR}"
419do_populate_lic[sstate-outputdirs] = "${LICENSE_DIRECTORY}/"
420
421IMAGE_CLASSES:append = " license_image"
422
423python do_populate_lic_setscene () {
424 sstate_setscene(d)
425}
426addtask do_populate_lic_setscene
diff --git a/meta/classes/license_image.bbclass b/meta/classes/license_image.bbclass
deleted file mode 100644
index b60d6e44f4..0000000000
--- a/meta/classes/license_image.bbclass
+++ /dev/null
@@ -1,295 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7ROOTFS_LICENSE_DIR = "${IMAGE_ROOTFS}/usr/share/common-licenses"
8
9# This requires LICENSE_CREATE_PACKAGE=1 to work too
10COMPLEMENTARY_GLOB[lic-pkgs] = "*-lic"
11
12python() {
13 if not oe.data.typed_value('LICENSE_CREATE_PACKAGE', d):
14 features = set(oe.data.typed_value('IMAGE_FEATURES', d))
15 if 'lic-pkgs' in features:
16 bb.error("'lic-pkgs' in IMAGE_FEATURES but LICENSE_CREATE_PACKAGE not enabled to generate -lic packages")
17}
18
19python write_package_manifest() {
20 # Get list of installed packages
21 license_image_dir = d.expand('${LICENSE_DIRECTORY}/${IMAGE_NAME}')
22 bb.utils.mkdirhier(license_image_dir)
23 from oe.rootfs import image_list_installed_packages
24 from oe.utils import format_pkg_list
25
26 pkgs = image_list_installed_packages(d)
27 output = format_pkg_list(pkgs)
28 with open(os.path.join(license_image_dir, 'package.manifest'), "w+") as package_manifest:
29 package_manifest.write(output)
30}
31
32python license_create_manifest() {
33 import oe.packagedata
34 from oe.rootfs import image_list_installed_packages
35
36 build_images_from_feeds = d.getVar('BUILD_IMAGES_FROM_FEEDS')
37 if build_images_from_feeds == "1":
38 return 0
39
40 pkg_dic = {}
41 for pkg in sorted(image_list_installed_packages(d)):
42 pkg_info = os.path.join(d.getVar('PKGDATA_DIR'),
43 'runtime-reverse', pkg)
44 pkg_name = os.path.basename(os.readlink(pkg_info))
45
46 pkg_dic[pkg_name] = oe.packagedata.read_pkgdatafile(pkg_info)
47 if not "LICENSE" in pkg_dic[pkg_name].keys():
48 pkg_lic_name = "LICENSE:" + pkg_name
49 pkg_dic[pkg_name]["LICENSE"] = pkg_dic[pkg_name][pkg_lic_name]
50
51 rootfs_license_manifest = os.path.join(d.getVar('LICENSE_DIRECTORY'),
52 d.getVar('IMAGE_NAME'), 'license.manifest')
53 write_license_files(d, rootfs_license_manifest, pkg_dic, rootfs=True)
54}
55
56def write_license_files(d, license_manifest, pkg_dic, rootfs=True):
57 import re
58 import stat
59
60 bad_licenses = (d.getVar("INCOMPATIBLE_LICENSE") or "").split()
61 bad_licenses = expand_wildcard_licenses(d, bad_licenses)
62
63 exceptions = (d.getVar("INCOMPATIBLE_LICENSE_EXCEPTIONS") or "").split()
64 with open(license_manifest, "w") as license_file:
65 for pkg in sorted(pkg_dic):
66 remaining_bad_licenses = oe.license.apply_pkg_license_exception(pkg, bad_licenses, exceptions)
67 incompatible_licenses = incompatible_pkg_license(d, remaining_bad_licenses, pkg_dic[pkg]["LICENSE"])
68 if incompatible_licenses:
69 bb.fatal("Package %s cannot be installed into the image because it has incompatible license(s): %s" %(pkg, ' '.join(incompatible_licenses)))
70 else:
71 incompatible_licenses = incompatible_pkg_license(d, bad_licenses, pkg_dic[pkg]["LICENSE"])
72 if incompatible_licenses:
73 oe.qa.handle_error('license-incompatible', "Including %s with incompatible license(s) %s into the image, because it has been allowed by exception list." %(pkg, ' '.join(incompatible_licenses)), d)
74 try:
75 (pkg_dic[pkg]["LICENSE"], pkg_dic[pkg]["LICENSES"]) = \
76 oe.license.manifest_licenses(pkg_dic[pkg]["LICENSE"],
77 remaining_bad_licenses, canonical_license, d)
78 except oe.license.LicenseError as exc:
79 bb.fatal('%s: %s' % (d.getVar('P'), exc))
80
81 if not "IMAGE_MANIFEST" in pkg_dic[pkg]:
82 # Rootfs manifest
83 license_file.write("PACKAGE NAME: %s\n" % pkg)
84 license_file.write("PACKAGE VERSION: %s\n" % pkg_dic[pkg]["PV"])
85 license_file.write("RECIPE NAME: %s\n" % pkg_dic[pkg]["PN"])
86 license_file.write("LICENSE: %s\n\n" % pkg_dic[pkg]["LICENSE"])
87
88 # If the package doesn't contain any file, that is, its size is 0, the license
89 # isn't relevant as far as the final image is concerned. So doing license check
90 # doesn't make much sense, skip it.
91 if pkg_dic[pkg]["PKGSIZE:%s" % pkg] == "0":
92 continue
93 else:
94 # Image manifest
95 license_file.write("RECIPE NAME: %s\n" % pkg_dic[pkg]["PN"])
96 license_file.write("VERSION: %s\n" % pkg_dic[pkg]["PV"])
97 license_file.write("LICENSE: %s\n" % pkg_dic[pkg]["LICENSE"])
98 license_file.write("FILES: %s\n\n" % pkg_dic[pkg]["FILES"])
99
100 for lic in pkg_dic[pkg]["LICENSES"]:
101 lic_file = os.path.join(d.getVar('LICENSE_DIRECTORY'),
102 pkg_dic[pkg]["PN"], "generic_%s" %
103 re.sub(r'\+', '', lic))
104 # add explicity avoid of CLOSED license because isn't generic
105 if lic == "CLOSED":
106 continue
107
108 if not os.path.exists(lic_file):
109 oe.qa.handle_error('license-file-missing',
110 "The license listed %s was not in the "\
111 "licenses collected for recipe %s"
112 % (lic, pkg_dic[pkg]["PN"]), d)
113 oe.qa.exit_if_errors(d)
114
115 # Two options here:
116 # - Just copy the manifest
117 # - Copy the manifest and the license directories
118 # With both options set we see a .5 M increase in core-image-minimal
119 copy_lic_manifest = d.getVar('COPY_LIC_MANIFEST')
120 copy_lic_dirs = d.getVar('COPY_LIC_DIRS')
121 if rootfs and copy_lic_manifest == "1":
122 rootfs_license_dir = d.getVar('ROOTFS_LICENSE_DIR')
123 bb.utils.mkdirhier(rootfs_license_dir)
124 rootfs_license_manifest = os.path.join(rootfs_license_dir,
125 os.path.split(license_manifest)[1])
126 if not os.path.exists(rootfs_license_manifest):
127 oe.path.copyhardlink(license_manifest, rootfs_license_manifest)
128
129 if copy_lic_dirs == "1":
130 for pkg in sorted(pkg_dic):
131 pkg_rootfs_license_dir = os.path.join(rootfs_license_dir, pkg)
132 bb.utils.mkdirhier(pkg_rootfs_license_dir)
133 pkg_license_dir = os.path.join(d.getVar('LICENSE_DIRECTORY'),
134 pkg_dic[pkg]["PN"])
135
136 pkg_manifest_licenses = [canonical_license(d, lic) \
137 for lic in pkg_dic[pkg]["LICENSES"]]
138
139 licenses = os.listdir(pkg_license_dir)
140 for lic in licenses:
141 pkg_license = os.path.join(pkg_license_dir, lic)
142 pkg_rootfs_license = os.path.join(pkg_rootfs_license_dir, lic)
143
144 if re.match(r"^generic_.*$", lic):
145 generic_lic = canonical_license(d,
146 re.search(r"^generic_(.*)$", lic).group(1))
147
148 # Do not copy generic license into package if isn't
149 # declared into LICENSES of the package.
150 if not re.sub(r'\+$', '', generic_lic) in \
151 [re.sub(r'\+', '', lic) for lic in \
152 pkg_manifest_licenses]:
153 continue
154
155 if oe.license.license_ok(generic_lic,
156 bad_licenses) == False:
157 continue
158
159 # Make sure we use only canonical name for the license file
160 generic_lic_file = "generic_%s" % generic_lic
161 rootfs_license = os.path.join(rootfs_license_dir, generic_lic_file)
162 if not os.path.exists(rootfs_license):
163 oe.path.copyhardlink(pkg_license, rootfs_license)
164
165 if not os.path.exists(pkg_rootfs_license):
166 os.symlink(os.path.join('..', generic_lic_file), pkg_rootfs_license)
167 else:
168 if (oe.license.license_ok(canonical_license(d,
169 lic), bad_licenses) == False or
170 os.path.exists(pkg_rootfs_license)):
171 continue
172
173 oe.path.copyhardlink(pkg_license, pkg_rootfs_license)
174 # Fixup file ownership and permissions
175 for walkroot, dirs, files in os.walk(rootfs_license_dir):
176 for f in files:
177 p = os.path.join(walkroot, f)
178 os.lchown(p, 0, 0)
179 if not os.path.islink(p):
180 os.chmod(p, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH)
181 for dir in dirs:
182 p = os.path.join(walkroot, dir)
183 os.lchown(p, 0, 0)
184 os.chmod(p, stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH)
185
186
187
188def license_deployed_manifest(d):
189 """
190 Write the license manifest for the deployed recipes.
191 The deployed recipes usually includes the bootloader
192 and extra files to boot the target.
193 """
194
195 dep_dic = {}
196 man_dic = {}
197 lic_dir = d.getVar("LICENSE_DIRECTORY")
198
199 dep_dic = get_deployed_dependencies(d)
200 for dep in dep_dic.keys():
201 man_dic[dep] = {}
202 # It is necessary to mark this will be used for image manifest
203 man_dic[dep]["IMAGE_MANIFEST"] = True
204 man_dic[dep]["PN"] = dep
205 man_dic[dep]["FILES"] = \
206 " ".join(get_deployed_files(dep_dic[dep]))
207 with open(os.path.join(lic_dir, dep, "recipeinfo"), "r") as f:
208 for line in f.readlines():
209 key,val = line.split(": ", 1)
210 man_dic[dep][key] = val[:-1]
211
212 lic_manifest_dir = os.path.join(d.getVar('LICENSE_DIRECTORY'),
213 d.getVar('IMAGE_NAME'))
214 bb.utils.mkdirhier(lic_manifest_dir)
215 image_license_manifest = os.path.join(lic_manifest_dir, 'image_license.manifest')
216 write_license_files(d, image_license_manifest, man_dic, rootfs=False)
217
218 link_name = d.getVar('IMAGE_LINK_NAME')
219 if link_name:
220 lic_manifest_symlink_dir = os.path.join(d.getVar('LICENSE_DIRECTORY'),
221 link_name)
222 # remove old symlink
223 if os.path.islink(lic_manifest_symlink_dir):
224 os.unlink(lic_manifest_symlink_dir)
225
226 # create the image dir symlink
227 if lic_manifest_dir != lic_manifest_symlink_dir:
228 os.symlink(lic_manifest_dir, lic_manifest_symlink_dir)
229
230def get_deployed_dependencies(d):
231 """
232 Get all the deployed dependencies of an image
233 """
234
235 deploy = {}
236 # Get all the dependencies for the current task (rootfs).
237 taskdata = d.getVar("BB_TASKDEPDATA", False)
238 pn = d.getVar("PN", True)
239 depends = list(set([dep[0] for dep
240 in list(taskdata.values())
241 if not dep[0].endswith("-native") and not dep[0] == pn]))
242
243 # To verify what was deployed it checks the rootfs dependencies against
244 # the SSTATE_MANIFESTS for "deploy" task.
245 # The manifest file name contains the arch. Because we are not running
246 # in the recipe context it is necessary to check every arch used.
247 sstate_manifest_dir = d.getVar("SSTATE_MANIFESTS")
248 archs = list(set(d.getVar("SSTATE_ARCHS").split()))
249 for dep in depends:
250 for arch in archs:
251 sstate_manifest_file = os.path.join(sstate_manifest_dir,
252 "manifest-%s-%s.deploy" % (arch, dep))
253 if os.path.exists(sstate_manifest_file):
254 deploy[dep] = sstate_manifest_file
255 break
256
257 return deploy
258get_deployed_dependencies[vardepsexclude] = "BB_TASKDEPDATA"
259
260def get_deployed_files(man_file):
261 """
262 Get the files deployed from the sstate manifest
263 """
264
265 dep_files = []
266 excluded_files = []
267 with open(man_file, "r") as manifest:
268 all_files = manifest.read()
269 for f in all_files.splitlines():
270 if ((not (os.path.islink(f) or os.path.isdir(f))) and
271 not os.path.basename(f) in excluded_files):
272 dep_files.append(os.path.basename(f))
273 return dep_files
274
275ROOTFS_POSTPROCESS_COMMAND:prepend = "write_package_manifest; license_create_manifest; "
276do_rootfs[recrdeptask] += "do_populate_lic"
277
278python do_populate_lic_deploy() {
279 license_deployed_manifest(d)
280 oe.qa.exit_if_errors(d)
281}
282
283addtask populate_lic_deploy before do_build after do_image_complete
284do_populate_lic_deploy[recrdeptask] += "do_populate_lic do_deploy"
285
286python license_qa_dead_symlink() {
287 import os
288
289 for root, dirs, files in os.walk(d.getVar('ROOTFS_LICENSE_DIR')):
290 for file in files:
291 full_path = root + "/" + file
292 if os.path.islink(full_path) and not os.path.exists(full_path):
293 bb.error("broken symlink: " + full_path)
294}
295IMAGE_QA_COMMANDS += "license_qa_dead_symlink"
diff --git a/meta/classes/linux-dummy.bbclass b/meta/classes/linux-dummy.bbclass
deleted file mode 100644
index 9291533cf9..0000000000
--- a/meta/classes/linux-dummy.bbclass
+++ /dev/null
@@ -1,31 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7python __anonymous () {
8 if d.getVar('PREFERRED_PROVIDER_virtual/kernel') == 'linux-dummy':
9 # copy part codes from kernel.bbclass
10 kname = d.getVar('KERNEL_PACKAGE_NAME') or "kernel"
11
12 # set an empty package of kernel-devicetree
13 d.appendVar('PACKAGES', ' %s-devicetree' % kname)
14 d.setVar('ALLOW_EMPTY:%s-devicetree' % kname, '1')
15
16 # Merge KERNEL_IMAGETYPE and KERNEL_ALT_IMAGETYPE into KERNEL_IMAGETYPES
17 type = d.getVar('KERNEL_IMAGETYPE') or ""
18 alttype = d.getVar('KERNEL_ALT_IMAGETYPE') or ""
19 types = d.getVar('KERNEL_IMAGETYPES') or ""
20 if type not in types.split():
21 types = (type + ' ' + types).strip()
22 if alttype not in types.split():
23 types = (alttype + ' ' + types).strip()
24
25 # set empty packages of kernel-image-*
26 for type in types.split():
27 typelower = type.lower()
28 d.appendVar('PACKAGES', ' %s-image-%s' % (kname, typelower))
29 d.setVar('ALLOW_EMPTY:%s-image-%s' % (kname, typelower), '1')
30}
31
diff --git a/meta/classes/linux-kernel-base.bbclass b/meta/classes/linux-kernel-base.bbclass
deleted file mode 100644
index cb2212c948..0000000000
--- a/meta/classes/linux-kernel-base.bbclass
+++ /dev/null
@@ -1,47 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# parse kernel ABI version out of <linux/version.h>
8def get_kernelversion_headers(p):
9 import re
10
11 fn = p + '/include/linux/utsrelease.h'
12 if not os.path.isfile(fn):
13 # after 2.6.33-rc1
14 fn = p + '/include/generated/utsrelease.h'
15 if not os.path.isfile(fn):
16 fn = p + '/include/linux/version.h'
17
18 try:
19 f = open(fn, 'r')
20 except IOError:
21 return None
22
23 l = f.readlines()
24 f.close()
25 r = re.compile("#define UTS_RELEASE \"(.*)\"")
26 for s in l:
27 m = r.match(s)
28 if m:
29 return m.group(1)
30 return None
31
32
33def get_kernelversion_file(p):
34 fn = p + '/kernel-abiversion'
35
36 try:
37 with open(fn, 'r') as f:
38 return f.readlines()[0].strip()
39 except IOError:
40 return None
41
42def linux_module_packages(s, d):
43 suffix = ""
44 return " ".join(map(lambda s: "kernel-module-%s%s" % (s.lower().replace('_', '-').replace('@', '+'), suffix), s.split()))
45
46# that's all
47
diff --git a/meta/classes/linuxloader.bbclass b/meta/classes/linuxloader.bbclass
deleted file mode 100644
index 1dfb95e31d..0000000000
--- a/meta/classes/linuxloader.bbclass
+++ /dev/null
@@ -1,82 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7def get_musl_loader_arch(d):
8 import re
9 ldso_arch = "NotSupported"
10
11 targetarch = d.getVar("TARGET_ARCH")
12 if targetarch.startswith("microblaze"):
13 ldso_arch = "microblaze${@bb.utils.contains('TUNE_FEATURES', 'bigendian', '', 'el', d)}"
14 elif targetarch.startswith("mips"):
15 ldso_arch = "mips${ABIEXTENSION}${MIPSPKGSFX_BYTE}${MIPSPKGSFX_R6}${MIPSPKGSFX_ENDIAN}${@['', '-sf'][d.getVar('TARGET_FPU') == 'soft']}"
16 elif targetarch == "powerpc":
17 ldso_arch = "powerpc${@['', '-sf'][d.getVar('TARGET_FPU') == 'soft']}"
18 elif targetarch.startswith("powerpc64"):
19 ldso_arch = "powerpc64${@bb.utils.contains('TUNE_FEATURES', 'bigendian', '', 'le', d)}"
20 elif targetarch == "x86_64":
21 ldso_arch = "x86_64"
22 elif re.search("i.86", targetarch):
23 ldso_arch = "i386"
24 elif targetarch.startswith("arm"):
25 ldso_arch = "arm${ARMPKGSFX_ENDIAN}${ARMPKGSFX_EABI}"
26 elif targetarch.startswith("aarch64"):
27 ldso_arch = "aarch64${ARMPKGSFX_ENDIAN_64}"
28 elif targetarch.startswith("riscv64"):
29 ldso_arch = "riscv64${@['', '-sf'][d.getVar('TARGET_FPU') == 'soft']}"
30 elif targetarch.startswith("riscv32"):
31 ldso_arch = "riscv32${@['', '-sf'][d.getVar('TARGET_FPU') == 'soft']}"
32 return ldso_arch
33
34def get_musl_loader(d):
35 import re
36 return "/lib/ld-musl-" + get_musl_loader_arch(d) + ".so.1"
37
38def get_glibc_loader(d):
39 import re
40
41 dynamic_loader = "NotSupported"
42 targetarch = d.getVar("TARGET_ARCH")
43 if targetarch in ["powerpc", "microblaze"]:
44 dynamic_loader = "${base_libdir}/ld.so.1"
45 elif targetarch in ["mipsisa32r6el", "mipsisa32r6", "mipsisa64r6el", "mipsisa64r6"]:
46 dynamic_loader = "${base_libdir}/ld-linux-mipsn8.so.1"
47 elif targetarch.startswith("mips"):
48 dynamic_loader = "${base_libdir}/ld.so.1"
49 elif targetarch == "powerpc64le":
50 dynamic_loader = "${base_libdir}/ld64.so.2"
51 elif targetarch == "powerpc64":
52 dynamic_loader = "${base_libdir}/ld64.so.1"
53 elif targetarch == "x86_64":
54 dynamic_loader = "${base_libdir}/ld-linux-x86-64.so.2"
55 elif re.search("i.86", targetarch):
56 dynamic_loader = "${base_libdir}/ld-linux.so.2"
57 elif targetarch == "arm":
58 dynamic_loader = "${base_libdir}/ld-linux${@['-armhf', ''][d.getVar('TARGET_FPU') == 'soft']}.so.3"
59 elif targetarch.startswith("aarch64"):
60 dynamic_loader = "${base_libdir}/ld-linux-aarch64${ARMPKGSFX_ENDIAN_64}.so.1"
61 elif targetarch.startswith("riscv64"):
62 dynamic_loader = "${base_libdir}/ld-linux-riscv64-lp64${@['d', ''][d.getVar('TARGET_FPU') == 'soft']}.so.1"
63 elif targetarch.startswith("riscv32"):
64 dynamic_loader = "${base_libdir}/ld-linux-riscv32-ilp32${@['d', ''][d.getVar('TARGET_FPU') == 'soft']}.so.1"
65 return dynamic_loader
66
67def get_linuxloader(d):
68 overrides = d.getVar("OVERRIDES").split(":")
69
70 if "libc-baremetal" in overrides:
71 return "NotSupported"
72
73 if "libc-musl" in overrides:
74 dynamic_loader = get_musl_loader(d)
75 else:
76 dynamic_loader = get_glibc_loader(d)
77 return dynamic_loader
78
79get_linuxloader[vardepvalue] = "${@get_linuxloader(d)}"
80get_musl_loader[vardepvalue] = "${@get_musl_loader(d)}"
81get_musl_loader_arch[vardepvalue] = "${@get_musl_loader_arch(d)}"
82get_glibc_loader[vardepvalue] = "${@get_glibc_loader(d)}"
diff --git a/meta/classes/live-vm-common.bbclass b/meta/classes/live-vm-common.bbclass
deleted file mode 100644
index b619f3a4be..0000000000
--- a/meta/classes/live-vm-common.bbclass
+++ /dev/null
@@ -1,100 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# Some of the vars for vm and live image are conflicted, this function
8# is used for fixing the problem.
9def set_live_vm_vars(d, suffix):
10 vars = ['GRUB_CFG', 'SYSLINUX_CFG', 'ROOT', 'LABELS', 'INITRD']
11 for var in vars:
12 var_with_suffix = var + '_' + suffix
13 if d.getVar(var):
14 bb.warn('Found potential conflicted var %s, please use %s rather than %s' % \
15 (var, var_with_suffix, var))
16 elif d.getVar(var_with_suffix):
17 d.setVar(var, d.getVar(var_with_suffix))
18
19
20EFI = "${@bb.utils.contains("MACHINE_FEATURES", "efi", "1", "0", d)}"
21EFI_PROVIDER ?= "grub-efi"
22EFI_CLASS = "${@bb.utils.contains("MACHINE_FEATURES", "efi", "${EFI_PROVIDER}", "", d)}"
23
24MKDOSFS_EXTRAOPTS ??= "-S 512"
25
26# Include legacy boot if MACHINE_FEATURES includes "pcbios" or if it does not
27# contain "efi". This way legacy is supported by default if neither is
28# specified, maintaining the original behavior.
29def pcbios(d):
30 pcbios = bb.utils.contains("MACHINE_FEATURES", "pcbios", "1", "0", d)
31 if pcbios == "0":
32 pcbios = bb.utils.contains("MACHINE_FEATURES", "efi", "0", "1", d)
33 return pcbios
34
35PCBIOS = "${@pcbios(d)}"
36PCBIOS_CLASS = "${@['','syslinux'][d.getVar('PCBIOS') == '1']}"
37
38# efi_populate_common DEST BOOTLOADER
39efi_populate_common() {
40 # DEST must be the root of the image so that EFIDIR is not
41 # nested under a top level directory.
42 DEST=$1
43
44 install -d ${DEST}${EFIDIR}
45
46 install -m 0644 ${DEPLOY_DIR_IMAGE}/$2-${EFI_BOOT_IMAGE} ${DEST}${EFIDIR}/${EFI_BOOT_IMAGE}
47 EFIPATH=$(echo "${EFIDIR}" | sed 's/\//\\/g')
48 printf 'fs0:%s\%s\n' "$EFIPATH" "${EFI_BOOT_IMAGE}" >${DEST}/startup.nsh
49}
50
51efi_iso_populate() {
52 iso_dir=$1
53 efi_populate $iso_dir
54 # Build a EFI directory to create efi.img
55 mkdir -p ${EFIIMGDIR}/${EFIDIR}
56 cp $iso_dir/${EFIDIR}/* ${EFIIMGDIR}${EFIDIR}
57 cp $iso_dir/${KERNEL_IMAGETYPE} ${EFIIMGDIR}
58
59 EFIPATH=$(echo "${EFIDIR}" | sed 's/\//\\/g')
60 printf 'fs0:%s\%s\n' "$EFIPATH" "${EFI_BOOT_IMAGE}" >${EFIIMGDIR}/startup.nsh
61
62 if [ -f "$iso_dir/initrd" ] ; then
63 cp $iso_dir/initrd ${EFIIMGDIR}
64 fi
65}
66
67efi_hddimg_populate() {
68 efi_populate $1
69}
70
71inherit ${EFI_CLASS}
72inherit ${PCBIOS_CLASS}
73
74populate_kernel() {
75 dest=$1
76 install -d $dest
77
78 # Install bzImage, initrd, and rootfs.img in DEST for all loaders to use.
79 bbnote "Trying to install ${DEPLOY_DIR_IMAGE}/${KERNEL_IMAGETYPE} as $dest/${KERNEL_IMAGETYPE}"
80 if [ -e ${DEPLOY_DIR_IMAGE}/${KERNEL_IMAGETYPE} ]; then
81 install -m 0644 ${DEPLOY_DIR_IMAGE}/${KERNEL_IMAGETYPE} $dest/${KERNEL_IMAGETYPE}
82 else
83 bbwarn "${DEPLOY_DIR_IMAGE}/${KERNEL_IMAGETYPE} doesn't exist"
84 fi
85
86 # initrd is made of concatenation of multiple filesystem images
87 if [ -n "${INITRD}" ]; then
88 rm -f $dest/initrd
89 for fs in ${INITRD}
90 do
91 if [ -s "$fs" ]; then
92 cat $fs >> $dest/initrd
93 else
94 bbfatal "$fs is invalid. initrd image creation failed."
95 fi
96 done
97 chmod 0644 $dest/initrd
98 fi
99}
100
diff --git a/meta/classes/logging.bbclass b/meta/classes/logging.bbclass
deleted file mode 100644
index ce03abfe42..0000000000
--- a/meta/classes/logging.bbclass
+++ /dev/null
@@ -1,107 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# The following logging mechanisms are to be used in bash functions of recipes.
8# They are intended to map one to one in intention and output format with the
9# python recipe logging functions of a similar naming convention: bb.plain(),
10# bb.note(), etc.
11
12LOGFIFO = "${T}/fifo.${@os.getpid()}"
13
14# Print the output exactly as it is passed in. Typically used for output of
15# tasks that should be seen on the console. Use sparingly.
16# Output: logs console
17bbplain() {
18 if [ -p ${LOGFIFO} ] ; then
19 printf "%b\0" "bbplain $*" > ${LOGFIFO}
20 else
21 echo "$*"
22 fi
23}
24
25# Notify the user of a noteworthy condition.
26# Output: logs
27bbnote() {
28 if [ -p ${LOGFIFO} ] ; then
29 printf "%b\0" "bbnote $*" > ${LOGFIFO}
30 else
31 echo "NOTE: $*"
32 fi
33}
34
35# Print a warning to the log. Warnings are non-fatal, and do not
36# indicate a build failure.
37# Output: logs console
38bbwarn() {
39 if [ -p ${LOGFIFO} ] ; then
40 printf "%b\0" "bbwarn $*" > ${LOGFIFO}
41 else
42 echo "WARNING: $*"
43 fi
44}
45
46# Print an error to the log. Errors are non-fatal in that the build can
47# continue, but they do indicate a build failure.
48# Output: logs console
49bberror() {
50 if [ -p ${LOGFIFO} ] ; then
51 printf "%b\0" "bberror $*" > ${LOGFIFO}
52 else
53 echo "ERROR: $*"
54 fi
55}
56
57# Print a fatal error to the log. Fatal errors indicate build failure
58# and halt the build, exiting with an error code.
59# Output: logs console
60bbfatal() {
61 if [ -p ${LOGFIFO} ] ; then
62 printf "%b\0" "bbfatal $*" > ${LOGFIFO}
63 else
64 echo "ERROR: $*"
65 fi
66 exit 1
67}
68
69# Like bbfatal, except prevents the suppression of the error log by
70# bitbake's UI.
71# Output: logs console
72bbfatal_log() {
73 if [ -p ${LOGFIFO} ] ; then
74 printf "%b\0" "bbfatal_log $*" > ${LOGFIFO}
75 else
76 echo "ERROR: $*"
77 fi
78 exit 1
79}
80
81# Print debug messages. These are appropriate for progress checkpoint
82# messages to the logs. Depending on the debug log level, they may also
83# go to the console.
84# Output: logs console
85# Usage: bbdebug 1 "first level debug message"
86# bbdebug 2 "second level debug message"
87bbdebug() {
88 USAGE='Usage: bbdebug [123] "message"'
89 if [ $# -lt 2 ]; then
90 bbfatal "$USAGE"
91 fi
92
93 # Strip off the debug level and ensure it is an integer
94 DBGLVL=$1; shift
95 NONDIGITS=$(echo "$DBGLVL" | tr -d "[:digit:]")
96 if [ "$NONDIGITS" ]; then
97 bbfatal "$USAGE"
98 fi
99
100 # All debug output is printed to the logs
101 if [ -p ${LOGFIFO} ] ; then
102 printf "%b\0" "bbdebug $DBGLVL $*" > ${LOGFIFO}
103 else
104 echo "DEBUG: $*"
105 fi
106}
107
diff --git a/meta/classes/manpages.bbclass b/meta/classes/manpages.bbclass
deleted file mode 100644
index 693fb53671..0000000000
--- a/meta/classes/manpages.bbclass
+++ /dev/null
@@ -1,51 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# Inherit this class to enable or disable building and installation of manpages
8# depending on whether 'api-documentation' is in DISTRO_FEATURES. Such building
9# tends to pull in the entire XML stack and other tools, so it's not enabled
10# by default.
11PACKAGECONFIG:append:class-target = " ${@bb.utils.contains('DISTRO_FEATURES', 'api-documentation', 'manpages', '', d)}"
12
13inherit qemu
14
15# usually manual files are packaged to ${PN}-doc except man-pages
16MAN_PKG ?= "${PN}-doc"
17
18# only add man-db to RDEPENDS when manual files are built and installed
19RDEPENDS:${MAN_PKG} += "${@bb.utils.contains('PACKAGECONFIG', 'manpages', 'man-db', '', d)}"
20
21pkg_postinst:${MAN_PKG}:append () {
22 # only update manual page index caches when manual files are built and installed
23 if ${@bb.utils.contains('PACKAGECONFIG', 'manpages', 'true', 'false', d)}; then
24 if test -n "$D"; then
25 if ${@bb.utils.contains('MACHINE_FEATURES', 'qemu-usermode', 'true', 'false', d)}; then
26 sed "s:\(\s\)/:\1$D/:g" $D${sysconfdir}/man_db.conf | ${@qemu_run_binary(d, '$D', '${bindir}/mandb')} -C - -u -q $D${mandir}
27 chown -R root:root $D${mandir}
28
29 mkdir -p $D${localstatedir}/cache/man
30 cd $D${mandir}
31 find . -name index.db | while read index; do
32 mkdir -p $D${localstatedir}/cache/man/$(dirname ${index})
33 mv ${index} $D${localstatedir}/cache/man/${index}
34 chown man:man $D${localstatedir}/cache/man/${index}
35 done
36 cd -
37 else
38 $INTERCEPT_DIR/postinst_intercept delay_to_first_boot ${PKG} mlprefix=${MLPREFIX}
39 fi
40 else
41 mandb -q
42 fi
43 fi
44}
45
46pkg_postrm:${MAN_PKG}:append () {
47 # only update manual page index caches when manual files are built and installed
48 if ${@bb.utils.contains('PACKAGECONFIG', 'manpages', 'true', 'false', d)}; then
49 mandb -q
50 fi
51}
diff --git a/meta/classes/meson-routines.bbclass b/meta/classes/meson-routines.bbclass
deleted file mode 100644
index 6086fce9d9..0000000000
--- a/meta/classes/meson-routines.bbclass
+++ /dev/null
@@ -1,57 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7inherit siteinfo
8
9def meson_array(var, d):
10 items = d.getVar(var).split()
11 return repr(items[0] if len(items) == 1 else items)
12
13# Map our ARCH values to what Meson expects:
14# http://mesonbuild.com/Reference-tables.html#cpu-families
15def meson_cpu_family(var, d):
16 import re
17 arch = d.getVar(var)
18 if arch == 'powerpc':
19 return 'ppc'
20 elif arch == 'powerpc64' or arch == 'powerpc64le':
21 return 'ppc64'
22 elif arch == 'armeb':
23 return 'arm'
24 elif arch == 'aarch64_be':
25 return 'aarch64'
26 elif arch == 'mipsel':
27 return 'mips'
28 elif arch == 'mips64el':
29 return 'mips64'
30 elif re.match(r"i[3-6]86", arch):
31 return "x86"
32 elif arch == "microblazeel":
33 return "microblaze"
34 else:
35 return arch
36
37# Map our OS values to what Meson expects:
38# https://mesonbuild.com/Reference-tables.html#operating-system-names
39def meson_operating_system(var, d):
40 os = d.getVar(var)
41 if "mingw" in os:
42 return "windows"
43 # avoid e.g 'linux-gnueabi'
44 elif "linux" in os:
45 return "linux"
46 else:
47 return os
48
49def meson_endian(prefix, d):
50 arch, os = d.getVar(prefix + "_ARCH"), d.getVar(prefix + "_OS")
51 sitedata = siteinfo_data_for_machine(arch, os, d)
52 if "endian-little" in sitedata:
53 return "little"
54 elif "endian-big" in sitedata:
55 return "big"
56 else:
57 bb.fatal("Cannot determine endianism for %s-%s" % (arch, os))
diff --git a/meta/classes/meson.bbclass b/meta/classes/meson.bbclass
deleted file mode 100644
index 765e81bc4f..0000000000
--- a/meta/classes/meson.bbclass
+++ /dev/null
@@ -1,179 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7inherit python3native meson-routines qemu
8
9DEPENDS:append = " meson-native ninja-native"
10
11EXEWRAPPER_ENABLED:class-native = "False"
12EXEWRAPPER_ENABLED:class-nativesdk = "False"
13EXEWRAPPER_ENABLED ?= "${@bb.utils.contains('MACHINE_FEATURES', 'qemu-usermode', 'True', 'False', d)}"
14DEPENDS:append = "${@' qemu-native' if d.getVar('EXEWRAPPER_ENABLED') == 'True' else ''}"
15
16# As Meson enforces out-of-tree builds we can just use cleandirs
17B = "${WORKDIR}/build"
18do_configure[cleandirs] = "${B}"
19
20# Where the meson.build build configuration is
21MESON_SOURCEPATH = "${S}"
22
23def noprefix(var, d):
24 return d.getVar(var).replace(d.getVar('prefix') + '/', '', 1)
25
26MESON_BUILDTYPE ?= "${@oe.utils.vartrue('DEBUG_BUILD', 'debug', 'plain', d)}"
27MESON_BUILDTYPE[vardeps] += "DEBUG_BUILD"
28MESONOPTS = " --prefix ${prefix} \
29 --buildtype ${MESON_BUILDTYPE} \
30 --bindir ${@noprefix('bindir', d)} \
31 --sbindir ${@noprefix('sbindir', d)} \
32 --datadir ${@noprefix('datadir', d)} \
33 --libdir ${@noprefix('libdir', d)} \
34 --libexecdir ${@noprefix('libexecdir', d)} \
35 --includedir ${@noprefix('includedir', d)} \
36 --mandir ${@noprefix('mandir', d)} \
37 --infodir ${@noprefix('infodir', d)} \
38 --sysconfdir ${sysconfdir} \
39 --localstatedir ${localstatedir} \
40 --sharedstatedir ${sharedstatedir} \
41 --wrap-mode nodownload \
42 --native-file ${WORKDIR}/meson.native"
43
44EXTRA_OEMESON:append = " ${PACKAGECONFIG_CONFARGS}"
45
46MESON_CROSS_FILE = ""
47MESON_CROSS_FILE:class-target = "--cross-file ${WORKDIR}/meson.cross"
48MESON_CROSS_FILE:class-nativesdk = "--cross-file ${WORKDIR}/meson.cross"
49
50# Needed to set up qemu wrapper below
51export STAGING_DIR_HOST
52
53def rust_tool(d, target_var):
54 rustc = d.getVar('RUSTC')
55 if not rustc:
56 return ""
57 cmd = [rustc, "--target", d.getVar(target_var)] + d.getVar("RUSTFLAGS").split()
58 return "rust = %s" % repr(cmd)
59
60addtask write_config before do_configure
61do_write_config[vardeps] += "CC CXX LD AR NM STRIP READELF CFLAGS CXXFLAGS LDFLAGS RUSTC RUSTFLAGS"
62do_write_config() {
63 # This needs to be Py to split the args into single-element lists
64 cat >${WORKDIR}/meson.cross <<EOF
65[binaries]
66c = ${@meson_array('CC', d)}
67cpp = ${@meson_array('CXX', d)}
68cython = 'cython3'
69ar = ${@meson_array('AR', d)}
70nm = ${@meson_array('NM', d)}
71strip = ${@meson_array('STRIP', d)}
72readelf = ${@meson_array('READELF', d)}
73objcopy = ${@meson_array('OBJCOPY', d)}
74pkgconfig = 'pkg-config'
75llvm-config = 'llvm-config${LLVMVERSION}'
76cups-config = 'cups-config'
77g-ir-scanner = '${STAGING_BINDIR}/g-ir-scanner-wrapper'
78g-ir-compiler = '${STAGING_BINDIR}/g-ir-compiler-wrapper'
79${@rust_tool(d, "HOST_SYS")}
80${@"exe_wrapper = '${WORKDIR}/meson-qemuwrapper'" if d.getVar('EXEWRAPPER_ENABLED') == 'True' else ""}
81
82[built-in options]
83c_args = ${@meson_array('CFLAGS', d)}
84c_link_args = ${@meson_array('LDFLAGS', d)}
85cpp_args = ${@meson_array('CXXFLAGS', d)}
86cpp_link_args = ${@meson_array('LDFLAGS', d)}
87
88[properties]
89needs_exe_wrapper = true
90
91[host_machine]
92system = '${@meson_operating_system('HOST_OS', d)}'
93cpu_family = '${@meson_cpu_family('HOST_ARCH', d)}'
94cpu = '${HOST_ARCH}'
95endian = '${@meson_endian('HOST', d)}'
96
97[target_machine]
98system = '${@meson_operating_system('TARGET_OS', d)}'
99cpu_family = '${@meson_cpu_family('TARGET_ARCH', d)}'
100cpu = '${TARGET_ARCH}'
101endian = '${@meson_endian('TARGET', d)}'
102EOF
103
104 cat >${WORKDIR}/meson.native <<EOF
105[binaries]
106c = ${@meson_array('BUILD_CC', d)}
107cpp = ${@meson_array('BUILD_CXX', d)}
108cython = 'cython3'
109ar = ${@meson_array('BUILD_AR', d)}
110nm = ${@meson_array('BUILD_NM', d)}
111strip = ${@meson_array('BUILD_STRIP', d)}
112readelf = ${@meson_array('BUILD_READELF', d)}
113objcopy = ${@meson_array('BUILD_OBJCOPY', d)}
114pkgconfig = 'pkg-config-native'
115${@rust_tool(d, "BUILD_SYS")}
116
117[built-in options]
118c_args = ${@meson_array('BUILD_CFLAGS', d)}
119c_link_args = ${@meson_array('BUILD_LDFLAGS', d)}
120cpp_args = ${@meson_array('BUILD_CXXFLAGS', d)}
121cpp_link_args = ${@meson_array('BUILD_LDFLAGS', d)}
122EOF
123}
124
125do_write_config:append:class-target() {
126 # Write out a qemu wrapper that will be used as exe_wrapper so that meson
127 # can run target helper binaries through that.
128 qemu_binary="${@qemu_wrapper_cmdline(d, '$STAGING_DIR_HOST', ['$STAGING_DIR_HOST/${libdir}','$STAGING_DIR_HOST/${base_libdir}'])}"
129 cat > ${WORKDIR}/meson-qemuwrapper << EOF
130#!/bin/sh
131# Use a modules directory which doesn't exist so we don't load random things
132# which may then get deleted (or their dependencies) and potentially segfault
133export GIO_MODULE_DIR=${STAGING_LIBDIR}/gio/modules-dummy
134
135# meson sets this wrongly (only to libs in build-dir), qemu_wrapper_cmdline() and GIR_EXTRA_LIBS_PATH take care of it properly
136unset LD_LIBRARY_PATH
137
138$qemu_binary "\$@"
139EOF
140 chmod +x ${WORKDIR}/meson-qemuwrapper
141}
142
143# Tell externalsrc that changes to this file require a reconfigure
144CONFIGURE_FILES = "meson.build"
145
146meson_do_configure() {
147 # Meson requires this to be 'bfd, 'lld' or 'gold' from 0.53 onwards
148 # https://github.com/mesonbuild/meson/commit/ef9aeb188ea2bc7353e59916c18901cde90fa2b3
149 unset LD
150
151 # Work around "Meson fails if /tmp is mounted with noexec #2972"
152 mkdir -p "${B}/meson-private/tmp"
153 export TMPDIR="${B}/meson-private/tmp"
154 bbnote Executing meson ${EXTRA_OEMESON}...
155 if ! meson ${MESONOPTS} "${MESON_SOURCEPATH}" "${B}" ${MESON_CROSS_FILE} ${EXTRA_OEMESON}; then
156 bbfatal_log meson failed
157 fi
158}
159
160python meson_do_qa_configure() {
161 import re
162 warn_re = re.compile(r"^WARNING: Cross property (.+) is using default value (.+)$", re.MULTILINE)
163 with open(d.expand("${B}/meson-logs/meson-log.txt")) as logfile:
164 log = logfile.read()
165 for (prop, value) in warn_re.findall(log):
166 bb.warn("Meson cross property %s used without explicit assignment, defaulting to %s" % (prop, value))
167}
168do_configure[postfuncs] += "meson_do_qa_configure"
169
170do_compile[progress] = "outof:^\[(\d+)/(\d+)\]\s+"
171meson_do_compile() {
172 ninja -v ${PARALLEL_MAKE}
173}
174
175meson_do_install() {
176 DESTDIR='${D}' ninja -v ${PARALLEL_MAKEINST} install
177}
178
179EXPORT_FUNCTIONS do_configure do_compile do_install
diff --git a/meta/classes/mime-xdg.bbclass b/meta/classes/mime-xdg.bbclass
deleted file mode 100644
index cbdcb4c7e9..0000000000
--- a/meta/classes/mime-xdg.bbclass
+++ /dev/null
@@ -1,78 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6# This class creates mime <-> application associations based on entry
7# 'MimeType' in *.desktop files
8#
9
10DEPENDS += "desktop-file-utils"
11PACKAGE_WRITE_DEPS += "desktop-file-utils-native"
12DESKTOPDIR = "${datadir}/applications"
13
14# There are recipes out there installing their .desktop files as absolute
15# symlinks. For us these are dangling and cannot be introspected for "MimeType"
16# easily. By addding package-names to MIME_XDG_PACKAGES, packager can force
17# proper update-desktop-database handling. Note that all introspection is
18# skipped for MIME_XDG_PACKAGES not empty
19MIME_XDG_PACKAGES ?= ""
20
21mime_xdg_postinst() {
22if [ "x$D" != "x" ]; then
23 $INTERCEPT_DIR/postinst_intercept update_desktop_database ${PKG} \
24 mlprefix=${MLPREFIX} \
25 desktop_dir=${DESKTOPDIR}
26else
27 update-desktop-database $D${DESKTOPDIR}
28fi
29}
30
31mime_xdg_postrm() {
32if [ "x$D" != "x" ]; then
33 $INTERCEPT_DIR/postinst_intercept update_desktop_database ${PKG} \
34 mlprefix=${MLPREFIX} \
35 desktop_dir=${DESKTOPDIR}
36else
37 update-desktop-database $D${DESKTOPDIR}
38fi
39}
40
41python populate_packages:append () {
42 packages = d.getVar('PACKAGES').split()
43 pkgdest = d.getVar('PKGDEST')
44 desktop_base = d.getVar('DESKTOPDIR')
45 forced_mime_xdg_pkgs = (d.getVar('MIME_XDG_PACKAGES') or '').split()
46
47 for pkg in packages:
48 desktops_with_mime_found = pkg in forced_mime_xdg_pkgs
49 if d.getVar('MIME_XDG_PACKAGES') == '':
50 desktop_dir = '%s/%s%s' % (pkgdest, pkg, desktop_base)
51 if os.path.exists(desktop_dir):
52 for df in os.listdir(desktop_dir):
53 if df.endswith('.desktop'):
54 try:
55 with open(desktop_dir + '/'+ df, 'r') as f:
56 for line in f.read().split('\n'):
57 if 'MimeType' in line:
58 desktops_with_mime_found = True
59 break;
60 except:
61 bb.warn('Could not open %s. Set MIME_XDG_PACKAGES in recipe or add mime-xdg to INSANE_SKIP.' % desktop_dir + '/'+ df)
62 if desktops_with_mime_found:
63 break
64 if desktops_with_mime_found:
65 bb.note("adding mime-xdg postinst and postrm scripts to %s" % pkg)
66 postinst = d.getVar('pkg_postinst:%s' % pkg)
67 if not postinst:
68 postinst = '#!/bin/sh\n'
69 postinst += d.getVar('mime_xdg_postinst')
70 d.setVar('pkg_postinst:%s' % pkg, postinst)
71 postrm = d.getVar('pkg_postrm:%s' % pkg)
72 if not postrm:
73 postrm = '#!/bin/sh\n'
74 postrm += d.getVar('mime_xdg_postrm')
75 d.setVar('pkg_postrm:%s' % pkg, postrm)
76 bb.note("adding desktop-file-utils dependency to %s" % pkg)
77 d.appendVar('RDEPENDS:' + pkg, " " + d.getVar('MLPREFIX')+"desktop-file-utils")
78}
diff --git a/meta/classes/mime.bbclass b/meta/classes/mime.bbclass
deleted file mode 100644
index 9b13f62bda..0000000000
--- a/meta/classes/mime.bbclass
+++ /dev/null
@@ -1,76 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7#
8# This class is used by recipes installing mime types
9#
10
11DEPENDS += "${@bb.utils.contains('BPN', 'shared-mime-info', '', 'shared-mime-info', d)}"
12PACKAGE_WRITE_DEPS += "shared-mime-info-native"
13MIMEDIR = "${datadir}/mime"
14
15mime_postinst() {
16if [ "x$D" != "x" ]; then
17 $INTERCEPT_DIR/postinst_intercept update_mime_database ${PKG} \
18 mlprefix=${MLPREFIX} \
19 mimedir=${MIMEDIR}
20else
21 echo "Updating MIME database... this may take a while."
22 update-mime-database $D${MIMEDIR}
23fi
24}
25
26mime_postrm() {
27if [ "x$D" != "x" ]; then
28 $INTERCEPT_DIR/postinst_intercept update_mime_database ${PKG} \
29 mlprefix=${MLPREFIX} \
30 mimedir=${MIMEDIR}
31else
32 echo "Updating MIME database... this may take a while."
33 # $D${MIMEDIR}/packages belong to package shared-mime-info-data,
34 # packages like libfm-mime depend on shared-mime-info-data.
35 # after shared-mime-info-data uninstalled, $D${MIMEDIR}/packages
36 # is removed, but update-mime-database need this dir to update
37 # database, workaround to create one and remove it later
38 if [ ! -d $D${MIMEDIR}/packages ]; then
39 mkdir -p $D${MIMEDIR}/packages
40 update-mime-database $D${MIMEDIR}
41 rmdir --ignore-fail-on-non-empty $D${MIMEDIR}/packages
42 else
43 update-mime-database $D${MIMEDIR}
44fi
45fi
46}
47
48python populate_packages:append () {
49 packages = d.getVar('PACKAGES').split()
50 pkgdest = d.getVar('PKGDEST')
51 mimedir = d.getVar('MIMEDIR')
52
53 for pkg in packages:
54 mime_packages_dir = '%s/%s%s/packages' % (pkgdest, pkg, mimedir)
55 mimes_types_found = False
56 if os.path.exists(mime_packages_dir):
57 for f in os.listdir(mime_packages_dir):
58 if f.endswith('.xml'):
59 mimes_types_found = True
60 break
61 if mimes_types_found:
62 bb.note("adding mime postinst and postrm scripts to %s" % pkg)
63 postinst = d.getVar('pkg_postinst:%s' % pkg)
64 if not postinst:
65 postinst = '#!/bin/sh\n'
66 postinst += d.getVar('mime_postinst')
67 d.setVar('pkg_postinst:%s' % pkg, postinst)
68 postrm = d.getVar('pkg_postrm:%s' % pkg)
69 if not postrm:
70 postrm = '#!/bin/sh\n'
71 postrm += d.getVar('mime_postrm')
72 d.setVar('pkg_postrm:%s' % pkg, postrm)
73 if pkg != 'shared-mime-info-data':
74 bb.note("adding shared-mime-info-data dependency to %s" % pkg)
75 d.appendVar('RDEPENDS:' + pkg, " " + d.getVar('MLPREFIX')+"shared-mime-info-data")
76}
diff --git a/meta/classes/mirrors.bbclass b/meta/classes/mirrors.bbclass
deleted file mode 100644
index 9643b31a23..0000000000
--- a/meta/classes/mirrors.bbclass
+++ /dev/null
@@ -1,95 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7MIRRORS += "\
8${DEBIAN_MIRROR} http://snapshot.debian.org/archive/debian/20180310T215105Z/pool \
9${DEBIAN_MIRROR} http://snapshot.debian.org/archive/debian-archive/20120328T092752Z/debian/pool \
10${DEBIAN_MIRROR} http://snapshot.debian.org/archive/debian-archive/20110127T084257Z/debian/pool \
11${DEBIAN_MIRROR} http://snapshot.debian.org/archive/debian-archive/20090802T004153Z/debian/pool \
12${DEBIAN_MIRROR} http://ftp.de.debian.org/debian/pool \
13${DEBIAN_MIRROR} http://ftp.au.debian.org/debian/pool \
14${DEBIAN_MIRROR} http://ftp.cl.debian.org/debian/pool \
15${DEBIAN_MIRROR} http://ftp.hr.debian.org/debian/pool \
16${DEBIAN_MIRROR} http://ftp.fi.debian.org/debian/pool \
17${DEBIAN_MIRROR} http://ftp.hk.debian.org/debian/pool \
18${DEBIAN_MIRROR} http://ftp.hu.debian.org/debian/pool \
19${DEBIAN_MIRROR} http://ftp.ie.debian.org/debian/pool \
20${DEBIAN_MIRROR} http://ftp.it.debian.org/debian/pool \
21${DEBIAN_MIRROR} http://ftp.jp.debian.org/debian/pool \
22${DEBIAN_MIRROR} http://ftp.no.debian.org/debian/pool \
23${DEBIAN_MIRROR} http://ftp.pl.debian.org/debian/pool \
24${DEBIAN_MIRROR} http://ftp.ro.debian.org/debian/pool \
25${DEBIAN_MIRROR} http://ftp.si.debian.org/debian/pool \
26${DEBIAN_MIRROR} http://ftp.es.debian.org/debian/pool \
27${DEBIAN_MIRROR} http://ftp.se.debian.org/debian/pool \
28${DEBIAN_MIRROR} http://ftp.tr.debian.org/debian/pool \
29${GNU_MIRROR} https://mirrors.kernel.org/gnu \
30${KERNELORG_MIRROR} http://www.kernel.org/pub \
31${GNUPG_MIRROR} ftp://ftp.gnupg.org/gcrypt \
32${GNUPG_MIRROR} ftp://ftp.franken.de/pub/crypt/mirror/ftp.gnupg.org/gcrypt \
33${GNUPG_MIRROR} ftp://mirrors.dotsrc.org/gcrypt \
34ftp://dante.ctan.org/tex-archive ftp://ftp.fu-berlin.de/tex/CTAN \
35ftp://dante.ctan.org/tex-archive http://sunsite.sut.ac.jp/pub/archives/ctan/ \
36ftp://dante.ctan.org/tex-archive http://ctan.unsw.edu.au/ \
37ftp://ftp.gnutls.org/gcrypt/gnutls ${GNUPG_MIRROR}/gnutls \
38http://ftp.info-zip.org/pub/infozip/src/ ftp://sunsite.icm.edu.pl/pub/unix/archiving/info-zip/src/ \
39http://www.mirrorservice.org/sites/lsof.itap.purdue.edu/pub/tools/unix/lsof/ http://www.mirrorservice.org/sites/lsof.itap.purdue.edu/pub/tools/unix/lsof/OLD/ \
40${APACHE_MIRROR} http://www.us.apache.org/dist \
41${APACHE_MIRROR} http://archive.apache.org/dist \
42http://downloads.sourceforge.net/watchdog/ http://fossies.org/linux/misc/ \
43${SAVANNAH_GNU_MIRROR} http://download-mirror.savannah.gnu.org/releases \
44${SAVANNAH_NONGNU_MIRROR} http://download-mirror.savannah.nongnu.org/releases \
45ftp://sourceware.org/pub http://mirrors.kernel.org/sourceware \
46ftp://sourceware.org/pub http://gd.tuwien.ac.at/gnu/sourceware \
47ftp://sourceware.org/pub http://ftp.gwdg.de/pub/linux/sources.redhat.com/sourceware \
48cvs://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
49svn://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
50git://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
51gitsm://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
52hg://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
53bzr://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
54p4://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
55osc://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
56https?://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
57ftp://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
58npm://.*/?.* http://downloads.yoctoproject.org/mirror/sources/ \
59cvs://.*/.* http://sources.openembedded.org/ \
60svn://.*/.* http://sources.openembedded.org/ \
61git://.*/.* http://sources.openembedded.org/ \
62gitsm://.*/.* http://sources.openembedded.org/ \
63hg://.*/.* http://sources.openembedded.org/ \
64bzr://.*/.* http://sources.openembedded.org/ \
65p4://.*/.* http://sources.openembedded.org/ \
66osc://.*/.* http://sources.openembedded.org/ \
67https?://.*/.* http://sources.openembedded.org/ \
68ftp://.*/.* http://sources.openembedded.org/ \
69npm://.*/?.* http://sources.openembedded.org/ \
70${CPAN_MIRROR} http://cpan.metacpan.org/ \
71${CPAN_MIRROR} http://search.cpan.org/CPAN/ \
72https?://downloads.yoctoproject.org/releases/uninative/ https://mirrors.kernel.org/yocto/uninative/ \
73https?://downloads.yoctoproject.org/mirror/sources/ https://mirrors.kernel.org/yocto-sources/ \
74"
75
76# Use MIRRORS to provide git repo fallbacks using the https protocol, for cases
77# where git native protocol fetches may fail due to local firewall rules, etc.
78
79MIRRORS += "\
80git://salsa.debian.org/.* git://salsa.debian.org/PATH;protocol=https \
81git://git.gnome.org/.* git://gitlab.gnome.org/GNOME/PATH;protocol=https \
82git://.*/.* git://HOST/PATH;protocol=https \
83git://.*/.* git://HOST/git/PATH;protocol=https \
84"
85
86# Switch glibc and binutils recipes to use shallow clones as they're large and this
87# improves user experience whilst allowing the flexibility of git urls in the recipes
88BB_GIT_SHALLOW:pn-binutils = "1"
89BB_GIT_SHALLOW:pn-binutils-cross-${TARGET_ARCH} = "1"
90BB_GIT_SHALLOW:pn-binutils-cross-canadian-${TRANSLATED_TARGET_ARCH} = "1"
91BB_GIT_SHALLOW:pn-binutils-cross-testsuite = "1"
92BB_GIT_SHALLOW:pn-binutils-crosssdk-${SDK_SYS} = "1"
93BB_GIT_SHALLOW:pn-glibc = "1"
94PREMIRRORS += "git://sourceware.org/git/glibc.git https://downloads.yoctoproject.org/mirror/sources/ \
95 git://sourceware.org/git/binutils-gdb.git https://downloads.yoctoproject.org/mirror/sources/"
diff --git a/meta/classes/module-base.bbclass b/meta/classes/module-base.bbclass
deleted file mode 100644
index 094b563b1a..0000000000
--- a/meta/classes/module-base.bbclass
+++ /dev/null
@@ -1,27 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7inherit kernel-arch
8
9# We do the dependency this way because the output is not preserved
10# in sstate, so we must force do_compile to run (once).
11do_configure[depends] += "make-mod-scripts:do_compile"
12
13export OS = "${TARGET_OS}"
14export CROSS_COMPILE = "${TARGET_PREFIX}"
15
16# This points to the build artefacts from the main kernel build
17# such as .config and System.map
18# Confusingly it is not the module build output (which is ${B}) but
19# we didn't pick the name.
20export KBUILD_OUTPUT = "${STAGING_KERNEL_BUILDDIR}"
21
22export KERNEL_VERSION = "${@oe.utils.read_file('${STAGING_KERNEL_BUILDDIR}/kernel-abiversion')}"
23KERNEL_OBJECT_SUFFIX = ".ko"
24
25# kernel modules are generally machine specific
26PACKAGE_ARCH = "${MACHINE_ARCH}"
27
diff --git a/meta/classes/module.bbclass b/meta/classes/module.bbclass
deleted file mode 100644
index d52d5e3098..0000000000
--- a/meta/classes/module.bbclass
+++ /dev/null
@@ -1,80 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7inherit module-base kernel-module-split pkgconfig
8
9EXTRA_OEMAKE += "KERNEL_SRC=${STAGING_KERNEL_DIR}"
10
11MODULES_INSTALL_TARGET ?= "modules_install"
12MODULES_MODULE_SYMVERS_LOCATION ?= ""
13
14python __anonymous () {
15 depends = d.getVar('DEPENDS')
16 extra_symbols = []
17 for dep in depends.split():
18 if dep.startswith("kernel-module-"):
19 extra_symbols.append("${STAGING_INCDIR}/" + dep + "/Module.symvers")
20 d.setVar('KBUILD_EXTRA_SYMBOLS', " ".join(extra_symbols))
21}
22
23python do_devshell:prepend () {
24 os.environ['CFLAGS'] = ''
25 os.environ['CPPFLAGS'] = ''
26 os.environ['CXXFLAGS'] = ''
27 os.environ['LDFLAGS'] = ''
28
29 os.environ['KERNEL_PATH'] = d.getVar('STAGING_KERNEL_DIR')
30 os.environ['KERNEL_SRC'] = d.getVar('STAGING_KERNEL_DIR')
31 os.environ['KERNEL_VERSION'] = d.getVar('KERNEL_VERSION')
32 os.environ['CC'] = d.getVar('KERNEL_CC')
33 os.environ['LD'] = d.getVar('KERNEL_LD')
34 os.environ['AR'] = d.getVar('KERNEL_AR')
35 os.environ['O'] = d.getVar('STAGING_KERNEL_BUILDDIR')
36 kbuild_extra_symbols = d.getVar('KBUILD_EXTRA_SYMBOLS')
37 if kbuild_extra_symbols:
38 os.environ['KBUILD_EXTRA_SYMBOLS'] = kbuild_extra_symbols
39 else:
40 os.environ['KBUILD_EXTRA_SYMBOLS'] = ''
41}
42
43module_do_compile() {
44 unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS
45 oe_runmake KERNEL_PATH=${STAGING_KERNEL_DIR} \
46 KERNEL_VERSION=${KERNEL_VERSION} \
47 CC="${KERNEL_CC}" LD="${KERNEL_LD}" \
48 AR="${KERNEL_AR}" \
49 O=${STAGING_KERNEL_BUILDDIR} \
50 KBUILD_EXTRA_SYMBOLS="${KBUILD_EXTRA_SYMBOLS}" \
51 ${MAKE_TARGETS}
52}
53
54module_do_install() {
55 unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS
56 oe_runmake DEPMOD=echo MODLIB="${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION}" \
57 INSTALL_FW_PATH="${D}${nonarch_base_libdir}/firmware" \
58 CC="${KERNEL_CC}" LD="${KERNEL_LD}" \
59 O=${STAGING_KERNEL_BUILDDIR} \
60 ${MODULES_INSTALL_TARGET}
61
62 if [ ! -e "${B}/${MODULES_MODULE_SYMVERS_LOCATION}/Module.symvers" ] ; then
63 bbwarn "Module.symvers not found in ${B}/${MODULES_MODULE_SYMVERS_LOCATION}"
64 bbwarn "Please consider setting MODULES_MODULE_SYMVERS_LOCATION to a"
65 bbwarn "directory below B to get correct inter-module dependencies"
66 else
67 install -Dm0644 "${B}/${MODULES_MODULE_SYMVERS_LOCATION}"/Module.symvers ${D}${includedir}/${BPN}/Module.symvers
68 # Module.symvers contains absolute path to the build directory.
69 # While it doesn't actually seem to matter which path is specified,
70 # clear them out to avoid confusion
71 sed -e 's:${B}/::g' -i ${D}${includedir}/${BPN}/Module.symvers
72 fi
73}
74
75EXPORT_FUNCTIONS do_compile do_install
76
77# add all splitted modules to PN RDEPENDS, PN can be empty now
78KERNEL_MODULES_META_PACKAGE = "${PN}"
79FILES:${PN} = ""
80ALLOW_EMPTY:${PN} = "1"
diff --git a/meta/classes/multilib_header.bbclass b/meta/classes/multilib_header.bbclass
deleted file mode 100644
index 33f7e027f0..0000000000
--- a/meta/classes/multilib_header.bbclass
+++ /dev/null
@@ -1,58 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7inherit siteinfo
8
9# If applicable on the architecture, this routine will rename the header and
10# add a unique identifier to the name for the ABI/bitsize that is being used.
11# A wrapper will be generated for the architecture that knows how to call
12# all of the ABI variants for that given architecture.
13#
14oe_multilib_header() {
15
16 case ${HOST_OS} in
17 *-musl*)
18 return
19 ;;
20 *)
21 esac
22 # For MIPS: "n32" is a special case, which needs to be
23 # distinct from both 64-bit and 32-bit.
24 case ${TARGET_ARCH} in
25 mips*) case "${MIPSPKGSFX_ABI}" in
26 "-n32")
27 ident=n32
28 ;;
29 *)
30 ident=${SITEINFO_BITS}
31 ;;
32 esac
33 ;;
34 *) ident=${SITEINFO_BITS}
35 esac
36 for each_header in "$@" ; do
37 if [ ! -f "${D}/${includedir}/$each_header" ]; then
38 bberror "oe_multilib_header: Unable to find header $each_header."
39 continue
40 fi
41 stem=$(echo $each_header | sed 's#\.h$##')
42 # if mips64/n32 set ident to n32
43 mv ${D}/${includedir}/$each_header ${D}/${includedir}/${stem}-${ident}.h
44
45 sed -e "s#ENTER_HEADER_FILENAME_HERE#${stem}#g" ${COREBASE}/scripts/multilib_header_wrapper.h > ${D}/${includedir}/$each_header
46 done
47}
48
49# Dependencies on arch variables like MIPSPKGSFX_ABI can be problematic.
50# We don't need multilib headers for native builds so brute force things.
51oe_multilib_header:class-native () {
52 return
53}
54
55# Nor do we need multilib headers for nativesdk builds.
56oe_multilib_header:class-nativesdk () {
57 return
58}
diff --git a/meta/classes/multilib_script.bbclass b/meta/classes/multilib_script.bbclass
deleted file mode 100644
index 7011526254..0000000000
--- a/meta/classes/multilib_script.bbclass
+++ /dev/null
@@ -1,40 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7#
8# Recipe needs to set MULTILIB_SCRIPTS in the form <pkgname>:<scriptname>, e.g.
9# MULTILIB_SCRIPTS = "${PN}-dev:${bindir}/file1 ${PN}:${base_bindir}/file2"
10# to indicate which script files to process from which packages.
11#
12
13inherit update-alternatives
14
15MULTILIB_SUFFIX = "${@d.getVar('base_libdir',1).split('/')[-1]}"
16
17PACKAGE_PREPROCESS_FUNCS += "multilibscript_rename"
18
19multilibscript_rename() {
20 :
21}
22
23python () {
24 # Do nothing if multilib isn't being used
25 if not d.getVar("MULTILIB_VARIANTS"):
26 return
27 # Do nothing for native/cross
28 if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross', d):
29 return
30
31 for entry in (d.getVar("MULTILIB_SCRIPTS", False) or "").split():
32 pkg, script = entry.split(":")
33 epkg = d.expand(pkg)
34 scriptname = os.path.basename(script)
35 d.appendVar("ALTERNATIVE:" + epkg, " " + scriptname + " ")
36 d.setVarFlag("ALTERNATIVE_LINK_NAME", scriptname, script)
37 d.setVarFlag("ALTERNATIVE_TARGET", scriptname, script + "-${MULTILIB_SUFFIX}")
38 d.appendVar("multilibscript_rename", "\n mv ${PKGD}" + script + " ${PKGD}" + script + "-${MULTILIB_SUFFIX}")
39 d.appendVar("FILES:" + epkg, " " + script + "-${MULTILIB_SUFFIX}")
40}
diff --git a/meta/classes/native.bbclass b/meta/classes/native.bbclass
deleted file mode 100644
index 61ad053def..0000000000
--- a/meta/classes/native.bbclass
+++ /dev/null
@@ -1,236 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# We want native packages to be relocatable
8inherit relocatable
9
10# Native packages are built indirectly via dependency,
11# no need for them to be a direct target of 'world'
12EXCLUDE_FROM_WORLD = "1"
13
14PACKAGE_ARCH = "${BUILD_ARCH}"
15
16# used by cmake class
17OECMAKE_RPATH = "${libdir}"
18OECMAKE_RPATH:class-native = "${libdir}"
19
20TARGET_ARCH = "${BUILD_ARCH}"
21TARGET_OS = "${BUILD_OS}"
22TARGET_VENDOR = "${BUILD_VENDOR}"
23TARGET_PREFIX = "${BUILD_PREFIX}"
24TARGET_CC_ARCH = "${BUILD_CC_ARCH}"
25TARGET_LD_ARCH = "${BUILD_LD_ARCH}"
26TARGET_AS_ARCH = "${BUILD_AS_ARCH}"
27TARGET_CPPFLAGS = "${BUILD_CPPFLAGS}"
28TARGET_CFLAGS = "${BUILD_CFLAGS}"
29TARGET_CXXFLAGS = "${BUILD_CXXFLAGS}"
30TARGET_LDFLAGS = "${BUILD_LDFLAGS}"
31TARGET_FPU = ""
32TUNE_FEATURES = ""
33ABIEXTENSION = ""
34
35HOST_ARCH = "${BUILD_ARCH}"
36HOST_OS = "${BUILD_OS}"
37HOST_VENDOR = "${BUILD_VENDOR}"
38HOST_PREFIX = "${BUILD_PREFIX}"
39HOST_CC_ARCH = "${BUILD_CC_ARCH}"
40HOST_LD_ARCH = "${BUILD_LD_ARCH}"
41HOST_AS_ARCH = "${BUILD_AS_ARCH}"
42
43CPPFLAGS = "${BUILD_CPPFLAGS}"
44CFLAGS = "${BUILD_CFLAGS}"
45CXXFLAGS = "${BUILD_CXXFLAGS}"
46LDFLAGS = "${BUILD_LDFLAGS}"
47
48STAGING_BINDIR = "${STAGING_BINDIR_NATIVE}"
49STAGING_BINDIR_CROSS = "${STAGING_BINDIR_NATIVE}"
50
51# native pkg doesn't need the TOOLCHAIN_OPTIONS.
52TOOLCHAIN_OPTIONS = ""
53
54# Don't build ptest natively
55PTEST_ENABLED = "0"
56
57# Don't use site files for native builds
58export CONFIG_SITE = "${COREBASE}/meta/site/native"
59
60# set the compiler as well. It could have been set to something else
61export CC = "${BUILD_CC}"
62export CXX = "${BUILD_CXX}"
63export FC = "${BUILD_FC}"
64export CPP = "${BUILD_CPP}"
65export LD = "${BUILD_LD}"
66export CCLD = "${BUILD_CCLD}"
67export AR = "${BUILD_AR}"
68export AS = "${BUILD_AS}"
69export RANLIB = "${BUILD_RANLIB}"
70export STRIP = "${BUILD_STRIP}"
71export NM = "${BUILD_NM}"
72
73# Path prefixes
74base_prefix = "${STAGING_DIR_NATIVE}"
75prefix = "${STAGING_DIR_NATIVE}${prefix_native}"
76exec_prefix = "${STAGING_DIR_NATIVE}${prefix_native}"
77
78bindir = "${STAGING_BINDIR_NATIVE}"
79sbindir = "${STAGING_SBINDIR_NATIVE}"
80base_libdir = "${STAGING_LIBDIR_NATIVE}"
81libdir = "${STAGING_LIBDIR_NATIVE}"
82includedir = "${STAGING_INCDIR_NATIVE}"
83sysconfdir = "${STAGING_ETCDIR_NATIVE}"
84datadir = "${STAGING_DATADIR_NATIVE}"
85
86baselib = "lib"
87
88export lt_cv_sys_lib_dlsearch_path_spec = "${libdir} ${base_libdir} /lib /lib64 /usr/lib /usr/lib64"
89
90NATIVE_PACKAGE_PATH_SUFFIX ?= ""
91bindir .= "${NATIVE_PACKAGE_PATH_SUFFIX}"
92sbindir .= "${NATIVE_PACKAGE_PATH_SUFFIX}"
93base_libdir .= "${NATIVE_PACKAGE_PATH_SUFFIX}"
94libdir .= "${NATIVE_PACKAGE_PATH_SUFFIX}"
95libexecdir .= "${NATIVE_PACKAGE_PATH_SUFFIX}"
96
97do_populate_sysroot[sstate-inputdirs] = "${SYSROOT_DESTDIR}/${STAGING_DIR_NATIVE}/"
98do_populate_sysroot[sstate-outputdirs] = "${COMPONENTS_DIR}/${PACKAGE_ARCH}/${PN}"
99
100# Since we actually install these into situ there is no staging prefix
101STAGING_DIR_HOST = ""
102STAGING_DIR_TARGET = ""
103PKG_CONFIG_DIR = "${libdir}/pkgconfig"
104
105EXTRA_NATIVE_PKGCONFIG_PATH ?= ""
106PKG_CONFIG_PATH .= "${EXTRA_NATIVE_PKGCONFIG_PATH}"
107PKG_CONFIG_SYSROOT_DIR = ""
108PKG_CONFIG_SYSTEM_LIBRARY_PATH[unexport] = "1"
109PKG_CONFIG_SYSTEM_INCLUDE_PATH[unexport] = "1"
110
111# we dont want libc-*libc to kick in for native recipes
112LIBCOVERRIDE = ""
113CLASSOVERRIDE = "class-native"
114MACHINEOVERRIDES = ""
115MACHINE_FEATURES = ""
116
117PATH:prepend = "${COREBASE}/scripts/native-intercept:"
118
119# This class encodes staging paths into its scripts data so can only be
120# reused if we manipulate the paths.
121SSTATE_SCAN_CMD ?= "${SSTATE_SCAN_CMD_NATIVE}"
122
123# No strip sysroot when DEBUG_BUILD is enabled
124INHIBIT_SYSROOT_STRIP ?= "${@oe.utils.vartrue('DEBUG_BUILD', '1', '', d)}"
125
126python native_virtclass_handler () {
127 pn = e.data.getVar("PN")
128 if not pn.endswith("-native"):
129 return
130 bpn = e.data.getVar("BPN")
131
132 # Set features here to prevent appends and distro features backfill
133 # from modifying native distro features
134 features = set(d.getVar("DISTRO_FEATURES_NATIVE").split())
135 filtered = set(bb.utils.filter("DISTRO_FEATURES", d.getVar("DISTRO_FEATURES_FILTER_NATIVE"), d).split())
136 d.setVar("DISTRO_FEATURES", " ".join(sorted(features | filtered)))
137
138 classextend = e.data.getVar('BBCLASSEXTEND') or ""
139 if "native" not in classextend:
140 return
141
142 def map_dependencies(varname, d, suffix = "", selfref=True):
143 if suffix:
144 varname = varname + ":" + suffix
145 deps = d.getVar(varname)
146 if not deps:
147 return
148 deps = bb.utils.explode_deps(deps)
149 newdeps = []
150 for dep in deps:
151 if dep == pn:
152 if not selfref:
153 continue
154 newdeps.append(dep)
155 elif "-cross-" in dep:
156 newdeps.append(dep.replace("-cross", "-native"))
157 elif not dep.endswith("-native"):
158 # Replace ${PN} with ${BPN} in the dependency to make sure
159 # dependencies on, e.g., ${PN}-foo become ${BPN}-foo-native
160 # rather than ${BPN}-native-foo-native.
161 newdeps.append(dep.replace(pn, bpn) + "-native")
162 else:
163 newdeps.append(dep)
164 d.setVar(varname, " ".join(newdeps), parsing=True)
165
166 map_dependencies("DEPENDS", e.data, selfref=False)
167 for pkg in e.data.getVar("PACKAGES", False).split():
168 map_dependencies("RDEPENDS", e.data, pkg)
169 map_dependencies("RRECOMMENDS", e.data, pkg)
170 map_dependencies("RSUGGESTS", e.data, pkg)
171 map_dependencies("RPROVIDES", e.data, pkg)
172 map_dependencies("RREPLACES", e.data, pkg)
173 map_dependencies("PACKAGES", e.data)
174
175 provides = e.data.getVar("PROVIDES")
176 nprovides = []
177 for prov in provides.split():
178 if prov.find(pn) != -1:
179 nprovides.append(prov)
180 elif not prov.endswith("-native"):
181 nprovides.append(prov + "-native")
182 else:
183 nprovides.append(prov)
184 e.data.setVar("PROVIDES", ' '.join(nprovides))
185
186
187}
188
189addhandler native_virtclass_handler
190native_virtclass_handler[eventmask] = "bb.event.RecipePreFinalise"
191
192python do_addto_recipe_sysroot () {
193 bb.build.exec_func("extend_recipe_sysroot", d)
194}
195addtask addto_recipe_sysroot after do_populate_sysroot
196do_addto_recipe_sysroot[deptask] = "do_populate_sysroot"
197
198inherit nopackages
199
200do_packagedata[stamp-extra-info] = ""
201
202USE_NLS = "no"
203
204RECIPERDEPTASK = "do_populate_sysroot"
205do_populate_sysroot[rdeptask] = "${RECIPERDEPTASK}"
206
207#
208# Native task outputs are directly run on the target (host) system after being
209# built. Even if the output of this recipe doesn't change, a change in one of
210# its dependencies may cause a change in the output it generates (e.g. rpm
211# output depends on the output of its dependent zstd library).
212#
213# This can cause poor interactions with hash equivalence, since this recipes
214# output-changing dependency is "hidden" and downstream task only see that this
215# recipe has the same outhash and therefore is equivalent. This can result in
216# different output in different cases.
217#
218# To resolve this, unhide the output-changing dependency by adding its unihash
219# to this tasks outhash calculation. Unfortunately, don't know specifically
220# know which dependencies are output-changing, so we have to add all of them.
221#
222python native_add_do_populate_sysroot_deps () {
223 current_task = "do_" + d.getVar("BB_CURRENTTASK")
224 if current_task != "do_populate_sysroot":
225 return
226
227 taskdepdata = d.getVar("BB_TASKDEPDATA", False)
228 pn = d.getVar("PN")
229 deps = {
230 dep[0]:dep[6] for dep in taskdepdata.values() if
231 dep[1] == current_task and dep[0] != pn
232 }
233
234 d.setVar("HASHEQUIV_EXTRA_SIGDATA", "\n".join("%s: %s" % (k, deps[k]) for k in sorted(deps.keys())))
235}
236SSTATECREATEFUNCS += "native_add_do_populate_sysroot_deps"
diff --git a/meta/classes/nativesdk.bbclass b/meta/classes/nativesdk.bbclass
deleted file mode 100644
index 08288fdb73..0000000000
--- a/meta/classes/nativesdk.bbclass
+++ /dev/null
@@ -1,124 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# SDK packages are built either explicitly by the user,
8# or indirectly via dependency. No need to be in 'world'.
9EXCLUDE_FROM_WORLD = "1"
10
11STAGING_BINDIR_TOOLCHAIN = "${STAGING_DIR_NATIVE}${bindir_native}/${SDK_ARCH}${SDK_VENDOR}-${SDK_OS}"
12
13# libc for the SDK can be different to that of the target
14NATIVESDKLIBC ?= "libc-glibc"
15LIBCOVERRIDE = ":${NATIVESDKLIBC}"
16CLASSOVERRIDE = "class-nativesdk"
17MACHINEOVERRIDES = ""
18MACHINE_FEATURES = ""
19
20MULTILIBS = ""
21
22# we need consistent staging dir whether or not multilib is enabled
23STAGING_DIR_HOST = "${WORKDIR}/recipe-sysroot"
24STAGING_DIR_TARGET = "${WORKDIR}/recipe-sysroot"
25RECIPE_SYSROOT = "${WORKDIR}/recipe-sysroot"
26
27#
28# Update PACKAGE_ARCH and PACKAGE_ARCHS
29#
30PACKAGE_ARCH = "${SDK_ARCH}-${SDKPKGSUFFIX}"
31PACKAGE_ARCHS = "${SDK_PACKAGE_ARCHS}"
32
33#
34# We need chrpath >= 0.14 to ensure we can deal with 32 and 64 bit
35# binaries
36#
37DEPENDS:append = " chrpath-replacement-native"
38EXTRANATIVEPATH += "chrpath-native"
39
40PKGDATA_DIR = "${PKGDATA_DIR_SDK}"
41
42HOST_ARCH = "${SDK_ARCH}"
43HOST_VENDOR = "${SDK_VENDOR}"
44HOST_OS = "${SDK_OS}"
45HOST_PREFIX = "${SDK_PREFIX}"
46HOST_CC_ARCH = "${SDK_CC_ARCH}"
47HOST_LD_ARCH = "${SDK_LD_ARCH}"
48HOST_AS_ARCH = "${SDK_AS_ARCH}"
49#HOST_SYS = "${HOST_ARCH}${TARGET_VENDOR}-${HOST_OS}"
50
51TARGET_ARCH = "${SDK_ARCH}"
52TARGET_VENDOR = "${SDK_VENDOR}"
53TARGET_OS = "${SDK_OS}"
54TARGET_PREFIX = "${SDK_PREFIX}"
55TARGET_CC_ARCH = "${SDK_CC_ARCH}"
56TARGET_LD_ARCH = "${SDK_LD_ARCH}"
57TARGET_AS_ARCH = "${SDK_AS_ARCH}"
58TARGET_CPPFLAGS = "${BUILDSDK_CPPFLAGS}"
59TARGET_CFLAGS = "${BUILDSDK_CFLAGS}"
60TARGET_CXXFLAGS = "${BUILDSDK_CXXFLAGS}"
61TARGET_LDFLAGS = "${BUILDSDK_LDFLAGS}"
62TARGET_FPU = ""
63EXTRA_OECONF_GCC_FLOAT = ""
64TUNE_FEATURES = ""
65
66CPPFLAGS = "${BUILDSDK_CPPFLAGS}"
67CFLAGS = "${BUILDSDK_CFLAGS}"
68CXXFLAGS = "${BUILDSDK_CXXFLAGS}"
69LDFLAGS = "${BUILDSDK_LDFLAGS}"
70
71# Change to place files in SDKPATH
72base_prefix = "${SDKPATHNATIVE}"
73prefix = "${SDKPATHNATIVE}${prefix_nativesdk}"
74exec_prefix = "${SDKPATHNATIVE}${prefix_nativesdk}"
75baselib = "lib"
76sbindir = "${bindir}"
77
78export PKG_CONFIG_DIR = "${STAGING_DIR_HOST}${libdir}/pkgconfig"
79export PKG_CONFIG_SYSROOT_DIR = "${STAGING_DIR_HOST}"
80
81python nativesdk_virtclass_handler () {
82 pn = e.data.getVar("PN")
83 if not (pn.endswith("-nativesdk") or pn.startswith("nativesdk-")):
84 return
85
86 # Set features here to prevent appends and distro features backfill
87 # from modifying nativesdk distro features
88 features = set(d.getVar("DISTRO_FEATURES_NATIVESDK").split())
89 filtered = set(bb.utils.filter("DISTRO_FEATURES", d.getVar("DISTRO_FEATURES_FILTER_NATIVESDK"), d).split())
90 d.setVar("DISTRO_FEATURES", " ".join(sorted(features | filtered)))
91
92 e.data.setVar("MLPREFIX", "nativesdk-")
93 e.data.setVar("PN", "nativesdk-" + e.data.getVar("PN").replace("-nativesdk", "").replace("nativesdk-", ""))
94}
95
96python () {
97 pn = d.getVar("PN")
98 if not pn.startswith("nativesdk-"):
99 return
100
101 import oe.classextend
102
103 clsextend = oe.classextend.NativesdkClassExtender("nativesdk", d)
104 clsextend.rename_packages()
105 clsextend.rename_package_variables((d.getVar("PACKAGEVARS") or "").split())
106
107 clsextend.map_depends_variable("DEPENDS")
108 clsextend.map_packagevars()
109 clsextend.map_variable("PROVIDES")
110 clsextend.map_regexp_variable("PACKAGES_DYNAMIC")
111 d.setVar("LIBCEXTENSION", "")
112 d.setVar("ABIEXTENSION", "")
113}
114
115addhandler nativesdk_virtclass_handler
116nativesdk_virtclass_handler[eventmask] = "bb.event.RecipePreFinalise"
117
118do_packagedata[stamp-extra-info] = ""
119
120USE_NLS = "${SDKUSE_NLS}"
121
122OLDEST_KERNEL = "${SDK_OLDEST_KERNEL}"
123
124PATH:prepend = "${COREBASE}/scripts/nativesdk-intercept:"
diff --git a/meta/classes/nopackages.bbclass b/meta/classes/nopackages.bbclass
deleted file mode 100644
index 9ea7273530..0000000000
--- a/meta/classes/nopackages.bbclass
+++ /dev/null
@@ -1,19 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7deltask do_package
8deltask do_package_write_rpm
9deltask do_package_write_ipk
10deltask do_package_write_deb
11deltask do_package_write_tar
12deltask do_package_qa
13deltask do_packagedata
14deltask do_package_setscene
15deltask do_package_write_rpm_setscene
16deltask do_package_write_ipk_setscene
17deltask do_package_write_deb_setscene
18deltask do_package_qa_setscene
19deltask do_packagedata_setscene
diff --git a/meta/classes/npm.bbclass b/meta/classes/npm.bbclass
deleted file mode 100644
index deea53c9ec..0000000000
--- a/meta/classes/npm.bbclass
+++ /dev/null
@@ -1,340 +0,0 @@
1# Copyright (C) 2020 Savoir-Faire Linux
2#
3# SPDX-License-Identifier: GPL-2.0-only
4#
5# This bbclass builds and installs an npm package to the target. The package
6# sources files should be fetched in the calling recipe by using the SRC_URI
7# variable. The ${S} variable should be updated depending of your fetcher.
8#
9# Usage:
10# SRC_URI = "..."
11# inherit npm
12#
13# Optional variables:
14# NPM_ARCH:
15# Override the auto generated npm architecture.
16#
17# NPM_INSTALL_DEV:
18# Set to 1 to also install devDependencies.
19
20inherit python3native
21
22DEPENDS:prepend = "nodejs-native nodejs-oe-cache-native "
23RDEPENDS:${PN}:append:class-target = " nodejs"
24
25EXTRA_OENPM = ""
26
27NPM_INSTALL_DEV ?= "0"
28
29NPM_NODEDIR ?= "${RECIPE_SYSROOT_NATIVE}${prefix_native}"
30
31def npm_target_arch_map(target_arch):
32 """Maps arch names to npm arch names"""
33 import re
34 if re.match("p(pc|owerpc)(|64)", target_arch):
35 return "ppc"
36 elif re.match("i.86$", target_arch):
37 return "ia32"
38 elif re.match("x86_64$", target_arch):
39 return "x64"
40 elif re.match("arm64$", target_arch):
41 return "arm"
42 return target_arch
43
44NPM_ARCH ?= "${@npm_target_arch_map(d.getVar("TARGET_ARCH"))}"
45
46NPM_PACKAGE = "${WORKDIR}/npm-package"
47NPM_CACHE = "${WORKDIR}/npm-cache"
48NPM_BUILD = "${WORKDIR}/npm-build"
49NPM_REGISTRY = "${WORKDIR}/npm-registry"
50
51def npm_global_configs(d):
52 """Get the npm global configuration"""
53 configs = []
54 # Ensure no network access is done
55 configs.append(("offline", "true"))
56 configs.append(("proxy", "http://invalid"))
57 configs.append(("funds", False))
58 configs.append(("audit", False))
59 # Configure the cache directory
60 configs.append(("cache", d.getVar("NPM_CACHE")))
61 return configs
62
63## 'npm pack' runs 'prepare' and 'prepack' scripts. Support for
64## 'ignore-scripts' which prevents this behavior has been removed
65## from nodejs 16. Use simple 'tar' instead of.
66def npm_pack(env, srcdir, workdir):
67 """Emulate 'npm pack' on a specified directory"""
68 import subprocess
69 import os
70 import json
71
72 src = os.path.join(srcdir, 'package.json')
73 with open(src) as f:
74 j = json.load(f)
75
76 # base does not really matter and is for documentation purposes
77 # only. But the 'version' part must exist because other parts of
78 # the bbclass rely on it.
79 base = j['name'].split('/')[-1]
80 tarball = os.path.join(workdir, "%s-%s.tgz" % (base, j['version']));
81
82 # TODO: real 'npm pack' does not include directories while 'tar'
83 # does. But this does not seem to matter...
84 subprocess.run(['tar', 'czf', tarball,
85 '--exclude', './node-modules',
86 '--exclude-vcs',
87 '--transform', 's,^\./,package/,',
88 '--mtime', '1985-10-26T08:15:00.000Z',
89 '.'],
90 check = True, cwd = srcdir)
91
92 return (tarball, j)
93
94python npm_do_configure() {
95 """
96 Step one: configure the npm cache and the main npm package
97
98 Every dependencies have been fetched and patched in the source directory.
99 They have to be packed (this remove unneeded files) and added to the npm
100 cache to be available for the next step.
101
102 The main package and its associated manifest file and shrinkwrap file have
103 to be configured to take into account these cached dependencies.
104 """
105 import base64
106 import copy
107 import json
108 import re
109 import shlex
110 import stat
111 import tempfile
112 from bb.fetch2.npm import NpmEnvironment
113 from bb.fetch2.npm import npm_unpack
114 from bb.fetch2.npmsw import foreach_dependencies
115 from bb.progress import OutOfProgressHandler
116 from oe.npm_registry import NpmRegistry
117
118 bb.utils.remove(d.getVar("NPM_CACHE"), recurse=True)
119 bb.utils.remove(d.getVar("NPM_PACKAGE"), recurse=True)
120
121 env = NpmEnvironment(d, configs=npm_global_configs(d))
122 registry = NpmRegistry(d.getVar('NPM_REGISTRY'), d.getVar('NPM_CACHE'))
123
124 def _npm_cache_add(tarball, pkg):
125 """Add tarball to local registry and register it in the
126 cache"""
127 registry.add_pkg(tarball, pkg)
128
129 def _npm_integrity(tarball):
130 """Return the npm integrity of a specified tarball"""
131 sha512 = bb.utils.sha512_file(tarball)
132 return "sha512-" + base64.b64encode(bytes.fromhex(sha512)).decode()
133
134 def _npmsw_dependency_dict(orig, deptree):
135 """
136 Return the sub dictionary in the 'orig' dictionary corresponding to the
137 'deptree' dependency tree. This function follows the shrinkwrap file
138 format.
139 """
140 ptr = orig
141 for dep in deptree:
142 if "dependencies" not in ptr:
143 ptr["dependencies"] = {}
144 ptr = ptr["dependencies"]
145 if dep not in ptr:
146 ptr[dep] = {}
147 ptr = ptr[dep]
148 return ptr
149
150 # Manage the manifest file and shrinkwrap files
151 orig_manifest_file = d.expand("${S}/package.json")
152 orig_shrinkwrap_file = d.expand("${S}/npm-shrinkwrap.json")
153 cached_manifest_file = d.expand("${NPM_PACKAGE}/package.json")
154 cached_shrinkwrap_file = d.expand("${NPM_PACKAGE}/npm-shrinkwrap.json")
155
156 with open(orig_manifest_file, "r") as f:
157 orig_manifest = json.load(f)
158
159 cached_manifest = copy.deepcopy(orig_manifest)
160 cached_manifest.pop("dependencies", None)
161 cached_manifest.pop("devDependencies", None)
162
163 has_shrinkwrap_file = True
164
165 try:
166 with open(orig_shrinkwrap_file, "r") as f:
167 orig_shrinkwrap = json.load(f)
168 except IOError:
169 has_shrinkwrap_file = False
170
171 if has_shrinkwrap_file:
172 cached_shrinkwrap = copy.deepcopy(orig_shrinkwrap)
173 cached_shrinkwrap.pop("dependencies", None)
174
175 # Manage the dependencies
176 progress = OutOfProgressHandler(d, r"^(\d+)/(\d+)$")
177 progress_total = 1 # also count the main package
178 progress_done = 0
179
180 def _count_dependency(name, params, deptree):
181 nonlocal progress_total
182 progress_total += 1
183
184 def _cache_dependency(name, params, deptree):
185 destsubdirs = [os.path.join("node_modules", dep) for dep in deptree]
186 destsuffix = os.path.join(*destsubdirs)
187 with tempfile.TemporaryDirectory() as tmpdir:
188 # Add the dependency to the npm cache
189 destdir = os.path.join(d.getVar("S"), destsuffix)
190 (tarball, pkg) = npm_pack(env, destdir, tmpdir)
191 _npm_cache_add(tarball, pkg)
192 # Add its signature to the cached shrinkwrap
193 dep = _npmsw_dependency_dict(cached_shrinkwrap, deptree)
194 dep["version"] = pkg['version']
195 dep["integrity"] = _npm_integrity(tarball)
196 if params.get("dev", False):
197 dep["dev"] = True
198 # Display progress
199 nonlocal progress_done
200 progress_done += 1
201 progress.write("%d/%d" % (progress_done, progress_total))
202
203 dev = bb.utils.to_boolean(d.getVar("NPM_INSTALL_DEV"), False)
204
205 if has_shrinkwrap_file:
206 foreach_dependencies(orig_shrinkwrap, _count_dependency, dev)
207 foreach_dependencies(orig_shrinkwrap, _cache_dependency, dev)
208
209 # Configure the main package
210 with tempfile.TemporaryDirectory() as tmpdir:
211 (tarball, _) = npm_pack(env, d.getVar("S"), tmpdir)
212 npm_unpack(tarball, d.getVar("NPM_PACKAGE"), d)
213
214 # Configure the cached manifest file and cached shrinkwrap file
215 def _update_manifest(depkey):
216 for name in orig_manifest.get(depkey, {}):
217 version = cached_shrinkwrap["dependencies"][name]["version"]
218 if depkey not in cached_manifest:
219 cached_manifest[depkey] = {}
220 cached_manifest[depkey][name] = version
221
222 if has_shrinkwrap_file:
223 _update_manifest("dependencies")
224
225 if dev:
226 if has_shrinkwrap_file:
227 _update_manifest("devDependencies")
228
229 os.chmod(cached_manifest_file, os.stat(cached_manifest_file).st_mode | stat.S_IWUSR)
230 with open(cached_manifest_file, "w") as f:
231 json.dump(cached_manifest, f, indent=2)
232
233 if has_shrinkwrap_file:
234 with open(cached_shrinkwrap_file, "w") as f:
235 json.dump(cached_shrinkwrap, f, indent=2)
236}
237
238python npm_do_compile() {
239 """
240 Step two: install the npm package
241
242 Use the configured main package and the cached dependencies to run the
243 installation process. The installation is done in a directory which is
244 not the destination directory yet.
245
246 A combination of 'npm pack' and 'npm install' is used to ensure that the
247 installed files are actual copies instead of symbolic links (which is the
248 default npm behavior).
249 """
250 import shlex
251 import tempfile
252 from bb.fetch2.npm import NpmEnvironment
253
254 bb.utils.remove(d.getVar("NPM_BUILD"), recurse=True)
255
256 with tempfile.TemporaryDirectory() as tmpdir:
257 args = []
258 configs = npm_global_configs(d)
259
260 if bb.utils.to_boolean(d.getVar("NPM_INSTALL_DEV"), False):
261 configs.append(("also", "development"))
262 else:
263 configs.append(("only", "production"))
264
265 # Report as many logs as possible for debugging purpose
266 configs.append(("loglevel", "silly"))
267
268 # Configure the installation to be done globally in the build directory
269 configs.append(("global", "true"))
270 configs.append(("prefix", d.getVar("NPM_BUILD")))
271
272 # Add node-gyp configuration
273 configs.append(("arch", d.getVar("NPM_ARCH")))
274 configs.append(("release", "true"))
275 configs.append(("nodedir", d.getVar("NPM_NODEDIR")))
276 configs.append(("python", d.getVar("PYTHON")))
277
278 env = NpmEnvironment(d, configs)
279
280 # Add node-pre-gyp configuration
281 args.append(("target_arch", d.getVar("NPM_ARCH")))
282 args.append(("build-from-source", "true"))
283
284 # Pack and install the main package
285 (tarball, _) = npm_pack(env, d.getVar("NPM_PACKAGE"), tmpdir)
286 cmd = "npm install %s %s" % (shlex.quote(tarball), d.getVar("EXTRA_OENPM"))
287 env.run(cmd, args=args)
288}
289
290npm_do_install() {
291 # Step three: final install
292 #
293 # The previous installation have to be filtered to remove some extra files.
294
295 rm -rf ${D}
296
297 # Copy the entire lib and bin directories
298 install -d ${D}/${nonarch_libdir}
299 cp --no-preserve=ownership --recursive ${NPM_BUILD}/lib/. ${D}/${nonarch_libdir}
300
301 if [ -d "${NPM_BUILD}/bin" ]
302 then
303 install -d ${D}/${bindir}
304 cp --no-preserve=ownership --recursive ${NPM_BUILD}/bin/. ${D}/${bindir}
305 fi
306
307 # If the package (or its dependencies) uses node-gyp to build native addons,
308 # object files, static libraries or other temporary files can be hidden in
309 # the lib directory. To reduce the package size and to avoid QA issues
310 # (staticdev with static library files) these files must be removed.
311 local GYP_REGEX=".*/build/Release/[^/]*.node"
312
313 # Remove any node-gyp directory in ${D} to remove temporary build files
314 for GYP_D_FILE in $(find ${D} -regex "${GYP_REGEX}")
315 do
316 local GYP_D_DIR=${GYP_D_FILE%/Release/*}
317
318 rm --recursive --force ${GYP_D_DIR}
319 done
320
321 # Copy only the node-gyp release files
322 for GYP_B_FILE in $(find ${NPM_BUILD} -regex "${GYP_REGEX}")
323 do
324 local GYP_D_FILE=${D}/${prefix}/${GYP_B_FILE#${NPM_BUILD}}
325
326 install -d ${GYP_D_FILE%/*}
327 install -m 755 ${GYP_B_FILE} ${GYP_D_FILE}
328 done
329
330 # Remove the shrinkwrap file which does not need to be packed
331 rm -f ${D}/${nonarch_libdir}/node_modules/*/npm-shrinkwrap.json
332 rm -f ${D}/${nonarch_libdir}/node_modules/@*/*/npm-shrinkwrap.json
333}
334
335FILES:${PN} += " \
336 ${bindir} \
337 ${nonarch_libdir} \
338"
339
340EXPORT_FUNCTIONS do_configure do_compile do_install
diff --git a/meta/classes/package.bbclass b/meta/classes/package.bbclass
deleted file mode 100644
index 418400da8c..0000000000
--- a/meta/classes/package.bbclass
+++ /dev/null
@@ -1,2558 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7#
8# Packaging process
9#
10# Executive summary: This class iterates over the functions listed in PACKAGEFUNCS
11# Taking D and splitting it up into the packages listed in PACKAGES, placing the
12# resulting output in PKGDEST.
13#
14# There are the following default steps but PACKAGEFUNCS can be extended:
15#
16# a) package_convert_pr_autoinc - convert AUTOINC in PKGV to ${PRSERV_PV_AUTOINC}
17#
18# b) perform_packagecopy - Copy D into PKGD
19#
20# c) package_do_split_locales - Split out the locale files, updates FILES and PACKAGES
21#
22# d) split_and_strip_files - split the files into runtime and debug and strip them.
23# Debug files include debug info split, and associated sources that end up in -dbg packages
24#
25# e) fixup_perms - Fix up permissions in the package before we split it.
26#
27# f) populate_packages - Split the files in PKGD into separate packages in PKGDEST/<pkgname>
28# Also triggers the binary stripping code to put files in -dbg packages.
29#
30# g) package_do_filedeps - Collect perfile run-time dependency metadata
31# The data is stores in FILER{PROVIDES,DEPENDS}_file_pkg variables with
32# a list of affected files in FILER{PROVIDES,DEPENDS}FLIST_pkg
33#
34# h) package_do_shlibs - Look at the shared libraries generated and autotmatically add any
35# dependencies found. Also stores the package name so anyone else using this library
36# knows which package to depend on.
37#
38# i) package_do_pkgconfig - Keep track of which packages need and provide which .pc files
39#
40# j) read_shlibdeps - Reads the stored shlibs information into the metadata
41#
42# k) package_depchains - Adds automatic dependencies to -dbg and -dev packages
43#
44# l) emit_pkgdata - saves the packaging data into PKGDATA_DIR for use in later
45# packaging steps
46
47inherit packagedata
48inherit chrpath
49inherit package_pkgdata
50inherit insane
51
52PKGD = "${WORKDIR}/package"
53PKGDEST = "${WORKDIR}/packages-split"
54
55LOCALE_SECTION ?= ''
56
57ALL_MULTILIB_PACKAGE_ARCHS = "${@all_multilib_tune_values(d, 'PACKAGE_ARCHS')}"
58
59# rpm is used for the per-file dependency identification
60# dwarfsrcfiles is used to determine the list of debug source files
61PACKAGE_DEPENDS += "rpm-native dwarfsrcfiles-native"
62
63
64# If your postinstall can execute at rootfs creation time rather than on
65# target but depends on a native/cross tool in order to execute, you need to
66# list that tool in PACKAGE_WRITE_DEPS. Target package dependencies belong
67# in the package dependencies as normal, this is just for native/cross support
68# tools at rootfs build time.
69PACKAGE_WRITE_DEPS ??= ""
70
71def legitimize_package_name(s):
72 """
73 Make sure package names are legitimate strings
74 """
75 import re
76
77 def fixutf(m):
78 cp = m.group(1)
79 if cp:
80 return ('\\u%s' % cp).encode('latin-1').decode('unicode_escape')
81
82 # Handle unicode codepoints encoded as <U0123>, as in glibc locale files.
83 s = re.sub(r'<U([0-9A-Fa-f]{1,4})>', fixutf, s)
84
85 # Remaining package name validity fixes
86 return s.lower().replace('_', '-').replace('@', '+').replace(',', '+').replace('/', '-')
87
88def do_split_packages(d, root, file_regex, output_pattern, description, postinst=None, recursive=False, hook=None, extra_depends=None, aux_files_pattern=None, postrm=None, allow_dirs=False, prepend=False, match_path=False, aux_files_pattern_verbatim=None, allow_links=False, summary=None):
89 """
90 Used in .bb files to split up dynamically generated subpackages of a
91 given package, usually plugins or modules.
92
93 Arguments:
94 root -- the path in which to search
95 file_regex -- regular expression to match searched files. Use
96 parentheses () to mark the part of this expression
97 that should be used to derive the module name (to be
98 substituted where %s is used in other function
99 arguments as noted below)
100 output_pattern -- pattern to use for the package names. Must include %s.
101 description -- description to set for each package. Must include %s.
102 postinst -- postinstall script to use for all packages (as a
103 string)
104 recursive -- True to perform a recursive search - default False
105 hook -- a hook function to be called for every match. The
106 function will be called with the following arguments
107 (in the order listed):
108 f: full path to the file/directory match
109 pkg: the package name
110 file_regex: as above
111 output_pattern: as above
112 modulename: the module name derived using file_regex
113 extra_depends -- extra runtime dependencies (RDEPENDS) to be set for
114 all packages. The default value of None causes a
115 dependency on the main package (${PN}) - if you do
116 not want this, pass '' for this parameter.
117 aux_files_pattern -- extra item(s) to be added to FILES for each
118 package. Can be a single string item or a list of
119 strings for multiple items. Must include %s.
120 postrm -- postrm script to use for all packages (as a string)
121 allow_dirs -- True allow directories to be matched - default False
122 prepend -- if True, prepend created packages to PACKAGES instead
123 of the default False which appends them
124 match_path -- match file_regex on the whole relative path to the
125 root rather than just the file name
126 aux_files_pattern_verbatim -- extra item(s) to be added to FILES for
127 each package, using the actual derived module name
128 rather than converting it to something legal for a
129 package name. Can be a single string item or a list
130 of strings for multiple items. Must include %s.
131 allow_links -- True to allow symlinks to be matched - default False
132 summary -- Summary to set for each package. Must include %s;
133 defaults to description if not set.
134
135 """
136
137 dvar = d.getVar('PKGD')
138 root = d.expand(root)
139 output_pattern = d.expand(output_pattern)
140 extra_depends = d.expand(extra_depends)
141
142 # If the root directory doesn't exist, don't error out later but silently do
143 # no splitting.
144 if not os.path.exists(dvar + root):
145 return []
146
147 ml = d.getVar("MLPREFIX")
148 if ml:
149 if not output_pattern.startswith(ml):
150 output_pattern = ml + output_pattern
151
152 newdeps = []
153 for dep in (extra_depends or "").split():
154 if dep.startswith(ml):
155 newdeps.append(dep)
156 else:
157 newdeps.append(ml + dep)
158 if newdeps:
159 extra_depends = " ".join(newdeps)
160
161
162 packages = d.getVar('PACKAGES').split()
163 split_packages = set()
164
165 if postinst:
166 postinst = '#!/bin/sh\n' + postinst + '\n'
167 if postrm:
168 postrm = '#!/bin/sh\n' + postrm + '\n'
169 if not recursive:
170 objs = os.listdir(dvar + root)
171 else:
172 objs = []
173 for walkroot, dirs, files in os.walk(dvar + root):
174 for file in files:
175 relpath = os.path.join(walkroot, file).replace(dvar + root + '/', '', 1)
176 if relpath:
177 objs.append(relpath)
178
179 if extra_depends == None:
180 extra_depends = d.getVar("PN")
181
182 if not summary:
183 summary = description
184
185 for o in sorted(objs):
186 import re, stat
187 if match_path:
188 m = re.match(file_regex, o)
189 else:
190 m = re.match(file_regex, os.path.basename(o))
191
192 if not m:
193 continue
194 f = os.path.join(dvar + root, o)
195 mode = os.lstat(f).st_mode
196 if not (stat.S_ISREG(mode) or (allow_links and stat.S_ISLNK(mode)) or (allow_dirs and stat.S_ISDIR(mode))):
197 continue
198 on = legitimize_package_name(m.group(1))
199 pkg = output_pattern % on
200 split_packages.add(pkg)
201 if not pkg in packages:
202 if prepend:
203 packages = [pkg] + packages
204 else:
205 packages.append(pkg)
206 oldfiles = d.getVar('FILES:' + pkg)
207 newfile = os.path.join(root, o)
208 # These names will be passed through glob() so if the filename actually
209 # contains * or ? (rare, but possible) we need to handle that specially
210 newfile = newfile.replace('*', '[*]')
211 newfile = newfile.replace('?', '[?]')
212 if not oldfiles:
213 the_files = [newfile]
214 if aux_files_pattern:
215 if type(aux_files_pattern) is list:
216 for fp in aux_files_pattern:
217 the_files.append(fp % on)
218 else:
219 the_files.append(aux_files_pattern % on)
220 if aux_files_pattern_verbatim:
221 if type(aux_files_pattern_verbatim) is list:
222 for fp in aux_files_pattern_verbatim:
223 the_files.append(fp % m.group(1))
224 else:
225 the_files.append(aux_files_pattern_verbatim % m.group(1))
226 d.setVar('FILES:' + pkg, " ".join(the_files))
227 else:
228 d.setVar('FILES:' + pkg, oldfiles + " " + newfile)
229 if extra_depends != '':
230 d.appendVar('RDEPENDS:' + pkg, ' ' + extra_depends)
231 if not d.getVar('DESCRIPTION:' + pkg):
232 d.setVar('DESCRIPTION:' + pkg, description % on)
233 if not d.getVar('SUMMARY:' + pkg):
234 d.setVar('SUMMARY:' + pkg, summary % on)
235 if postinst:
236 d.setVar('pkg_postinst:' + pkg, postinst)
237 if postrm:
238 d.setVar('pkg_postrm:' + pkg, postrm)
239 if callable(hook):
240 hook(f, pkg, file_regex, output_pattern, m.group(1))
241
242 d.setVar('PACKAGES', ' '.join(packages))
243 return list(split_packages)
244
245PACKAGE_DEPENDS += "file-native"
246
247python () {
248 if d.getVar('PACKAGES') != '':
249 deps = ""
250 for dep in (d.getVar('PACKAGE_DEPENDS') or "").split():
251 deps += " %s:do_populate_sysroot" % dep
252 if d.getVar('PACKAGE_MINIDEBUGINFO') == '1':
253 deps += ' xz-native:do_populate_sysroot'
254 d.appendVarFlag('do_package', 'depends', deps)
255
256 # shlibs requires any DEPENDS to have already packaged for the *.list files
257 d.appendVarFlag('do_package', 'deptask', " do_packagedata")
258}
259
260# Get a list of files from file vars by searching files under current working directory
261# The list contains symlinks, directories and normal files.
262def files_from_filevars(filevars):
263 import os,glob
264 cpath = oe.cachedpath.CachedPath()
265 files = []
266 for f in filevars:
267 if os.path.isabs(f):
268 f = '.' + f
269 if not f.startswith("./"):
270 f = './' + f
271 globbed = glob.glob(f)
272 if globbed:
273 if [ f ] != globbed:
274 files += globbed
275 continue
276 files.append(f)
277
278 symlink_paths = []
279 for ind, f in enumerate(files):
280 # Handle directory symlinks. Truncate path to the lowest level symlink
281 parent = ''
282 for dirname in f.split('/')[:-1]:
283 parent = os.path.join(parent, dirname)
284 if dirname == '.':
285 continue
286 if cpath.islink(parent):
287 bb.warn("FILES contains file '%s' which resides under a "
288 "directory symlink. Please fix the recipe and use the "
289 "real path for the file." % f[1:])
290 symlink_paths.append(f)
291 files[ind] = parent
292 f = parent
293 break
294
295 if not cpath.islink(f):
296 if cpath.isdir(f):
297 newfiles = [ os.path.join(f,x) for x in os.listdir(f) ]
298 if newfiles:
299 files += newfiles
300
301 return files, symlink_paths
302
303# Called in package_<rpm,ipk,deb>.bbclass to get the correct list of configuration files
304def get_conffiles(pkg, d):
305 pkgdest = d.getVar('PKGDEST')
306 root = os.path.join(pkgdest, pkg)
307 cwd = os.getcwd()
308 os.chdir(root)
309
310 conffiles = d.getVar('CONFFILES:%s' % pkg);
311 if conffiles == None:
312 conffiles = d.getVar('CONFFILES')
313 if conffiles == None:
314 conffiles = ""
315 conffiles = conffiles.split()
316 conf_orig_list = files_from_filevars(conffiles)[0]
317
318 # Remove links and directories from conf_orig_list to get conf_list which only contains normal files
319 conf_list = []
320 for f in conf_orig_list:
321 if os.path.isdir(f):
322 continue
323 if os.path.islink(f):
324 continue
325 if not os.path.exists(f):
326 continue
327 conf_list.append(f)
328
329 # Remove the leading './'
330 for i in range(0, len(conf_list)):
331 conf_list[i] = conf_list[i][1:]
332
333 os.chdir(cwd)
334 return conf_list
335
336def checkbuildpath(file, d):
337 tmpdir = d.getVar('TMPDIR')
338 with open(file) as f:
339 file_content = f.read()
340 if tmpdir in file_content:
341 return True
342
343 return False
344
345def parse_debugsources_from_dwarfsrcfiles_output(dwarfsrcfiles_output):
346 debugfiles = {}
347
348 for line in dwarfsrcfiles_output.splitlines():
349 if line.startswith("\t"):
350 debugfiles[os.path.normpath(line.split()[0])] = ""
351
352 return debugfiles.keys()
353
354def source_info(file, d, fatal=True):
355 import subprocess
356
357 cmd = ["dwarfsrcfiles", file]
358 try:
359 output = subprocess.check_output(cmd, universal_newlines=True, stderr=subprocess.STDOUT)
360 retval = 0
361 except subprocess.CalledProcessError as exc:
362 output = exc.output
363 retval = exc.returncode
364
365 # 255 means a specific file wasn't fully parsed to get the debug file list, which is not a fatal failure
366 if retval != 0 and retval != 255:
367 msg = "dwarfsrcfiles failed with exit code %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else "")
368 if fatal:
369 bb.fatal(msg)
370 bb.note(msg)
371
372 debugsources = parse_debugsources_from_dwarfsrcfiles_output(output)
373
374 return list(debugsources)
375
376def splitdebuginfo(file, dvar, dv, d):
377 # Function to split a single file into two components, one is the stripped
378 # target system binary, the other contains any debugging information. The
379 # two files are linked to reference each other.
380 #
381 # return a mapping of files:debugsources
382
383 import stat
384 import subprocess
385
386 src = file[len(dvar):]
387 dest = dv["libdir"] + os.path.dirname(src) + dv["dir"] + "/" + os.path.basename(src) + dv["append"]
388 debugfile = dvar + dest
389 sources = []
390
391 if file.endswith(".ko") and file.find("/lib/modules/") != -1:
392 if oe.package.is_kernel_module_signed(file):
393 bb.debug(1, "Skip strip on signed module %s" % file)
394 return (file, sources)
395
396 # Split the file...
397 bb.utils.mkdirhier(os.path.dirname(debugfile))
398 #bb.note("Split %s -> %s" % (file, debugfile))
399 # Only store off the hard link reference if we successfully split!
400
401 dvar = d.getVar('PKGD')
402 objcopy = d.getVar("OBJCOPY")
403
404 newmode = None
405 if not os.access(file, os.W_OK) or os.access(file, os.R_OK):
406 origmode = os.stat(file)[stat.ST_MODE]
407 newmode = origmode | stat.S_IWRITE | stat.S_IREAD
408 os.chmod(file, newmode)
409
410 # We need to extract the debug src information here...
411 if dv["srcdir"]:
412 sources = source_info(file, d)
413
414 bb.utils.mkdirhier(os.path.dirname(debugfile))
415
416 subprocess.check_output([objcopy, '--only-keep-debug', file, debugfile], stderr=subprocess.STDOUT)
417
418 # Set the debuglink to have the view of the file path on the target
419 subprocess.check_output([objcopy, '--add-gnu-debuglink', debugfile, file], stderr=subprocess.STDOUT)
420
421 if newmode:
422 os.chmod(file, origmode)
423
424 return (file, sources)
425
426def splitstaticdebuginfo(file, dvar, dv, d):
427 # Unlike the function above, there is no way to split a static library
428 # two components. So to get similar results we will copy the unmodified
429 # static library (containing the debug symbols) into a new directory.
430 # We will then strip (preserving symbols) the static library in the
431 # typical location.
432 #
433 # return a mapping of files:debugsources
434
435 import stat
436
437 src = file[len(dvar):]
438 dest = dv["staticlibdir"] + os.path.dirname(src) + dv["staticdir"] + "/" + os.path.basename(src) + dv["staticappend"]
439 debugfile = dvar + dest
440 sources = []
441
442 # Copy the file...
443 bb.utils.mkdirhier(os.path.dirname(debugfile))
444 #bb.note("Copy %s -> %s" % (file, debugfile))
445
446 dvar = d.getVar('PKGD')
447
448 newmode = None
449 if not os.access(file, os.W_OK) or os.access(file, os.R_OK):
450 origmode = os.stat(file)[stat.ST_MODE]
451 newmode = origmode | stat.S_IWRITE | stat.S_IREAD
452 os.chmod(file, newmode)
453
454 # We need to extract the debug src information here...
455 if dv["srcdir"]:
456 sources = source_info(file, d)
457
458 bb.utils.mkdirhier(os.path.dirname(debugfile))
459
460 # Copy the unmodified item to the debug directory
461 shutil.copy2(file, debugfile)
462
463 if newmode:
464 os.chmod(file, origmode)
465
466 return (file, sources)
467
468def inject_minidebuginfo(file, dvar, dv, d):
469 # Extract just the symbols from debuginfo into minidebuginfo,
470 # compress it with xz and inject it back into the binary in a .gnu_debugdata section.
471 # https://sourceware.org/gdb/onlinedocs/gdb/MiniDebugInfo.html
472
473 import subprocess
474
475 readelf = d.getVar('READELF')
476 nm = d.getVar('NM')
477 objcopy = d.getVar('OBJCOPY')
478
479 minidebuginfodir = d.expand('${WORKDIR}/minidebuginfo')
480
481 src = file[len(dvar):]
482 dest = dv["libdir"] + os.path.dirname(src) + dv["dir"] + "/" + os.path.basename(src) + dv["append"]
483 debugfile = dvar + dest
484 minidebugfile = minidebuginfodir + src + '.minidebug'
485 bb.utils.mkdirhier(os.path.dirname(minidebugfile))
486
487 # If we didn't produce debuginfo for any reason, we can't produce minidebuginfo either
488 # so skip it.
489 if not os.path.exists(debugfile):
490 bb.debug(1, 'ELF file {} has no debuginfo, skipping minidebuginfo injection'.format(file))
491 return
492
493 # Find non-allocated PROGBITS, NOTE, and NOBITS sections in the debuginfo.
494 # We will exclude all of these from minidebuginfo to save space.
495 remove_section_names = []
496 for line in subprocess.check_output([readelf, '-W', '-S', debugfile], universal_newlines=True).splitlines():
497 fields = line.split()
498 if len(fields) < 8:
499 continue
500 name = fields[0]
501 type = fields[1]
502 flags = fields[7]
503 # .debug_ sections will be removed by objcopy -S so no need to explicitly remove them
504 if name.startswith('.debug_'):
505 continue
506 if 'A' not in flags and type in ['PROGBITS', 'NOTE', 'NOBITS']:
507 remove_section_names.append(name)
508
509 # List dynamic symbols in the binary. We can exclude these from minidebuginfo
510 # because they are always present in the binary.
511 dynsyms = set()
512 for line in subprocess.check_output([nm, '-D', file, '--format=posix', '--defined-only'], universal_newlines=True).splitlines():
513 dynsyms.add(line.split()[0])
514
515 # Find all function symbols from debuginfo which aren't in the dynamic symbols table.
516 # These are the ones we want to keep in minidebuginfo.
517 keep_symbols_file = minidebugfile + '.symlist'
518 found_any_symbols = False
519 with open(keep_symbols_file, 'w') as f:
520 for line in subprocess.check_output([nm, debugfile, '--format=sysv', '--defined-only'], universal_newlines=True).splitlines():
521 fields = line.split('|')
522 if len(fields) < 7:
523 continue
524 name = fields[0].strip()
525 type = fields[3].strip()
526 if type == 'FUNC' and name not in dynsyms:
527 f.write('{}\n'.format(name))
528 found_any_symbols = True
529
530 if not found_any_symbols:
531 bb.debug(1, 'ELF file {} contains no symbols, skipping minidebuginfo injection'.format(file))
532 return
533
534 bb.utils.remove(minidebugfile)
535 bb.utils.remove(minidebugfile + '.xz')
536
537 subprocess.check_call([objcopy, '-S'] +
538 ['--remove-section={}'.format(s) for s in remove_section_names] +
539 ['--keep-symbols={}'.format(keep_symbols_file), debugfile, minidebugfile])
540
541 subprocess.check_call(['xz', '--keep', minidebugfile])
542
543 subprocess.check_call([objcopy, '--add-section', '.gnu_debugdata={}.xz'.format(minidebugfile), file])
544
545def copydebugsources(debugsrcdir, sources, d):
546 # The debug src information written out to sourcefile is further processed
547 # and copied to the destination here.
548
549 import stat
550 import subprocess
551
552 if debugsrcdir and sources:
553 sourcefile = d.expand("${WORKDIR}/debugsources.list")
554 bb.utils.remove(sourcefile)
555
556 # filenames are null-separated - this is an artefact of the previous use
557 # of rpm's debugedit, which was writing them out that way, and the code elsewhere
558 # is still assuming that.
559 debuglistoutput = '\0'.join(sources) + '\0'
560 with open(sourcefile, 'a') as sf:
561 sf.write(debuglistoutput)
562
563 dvar = d.getVar('PKGD')
564 strip = d.getVar("STRIP")
565 objcopy = d.getVar("OBJCOPY")
566 workdir = d.getVar("WORKDIR")
567 sdir = d.getVar("S")
568 sparentdir = os.path.dirname(os.path.dirname(sdir))
569 sbasedir = os.path.basename(os.path.dirname(sdir)) + "/" + os.path.basename(sdir)
570 workparentdir = os.path.dirname(os.path.dirname(workdir))
571 workbasedir = os.path.basename(os.path.dirname(workdir)) + "/" + os.path.basename(workdir)
572
573 # If S isnt based on WORKDIR we can infer our sources are located elsewhere,
574 # e.g. using externalsrc; use S as base for our dirs
575 if workdir in sdir or 'work-shared' in sdir:
576 basedir = workbasedir
577 parentdir = workparentdir
578 else:
579 basedir = sbasedir
580 parentdir = sparentdir
581
582 # If build path exists in sourcefile, it means toolchain did not use
583 # -fdebug-prefix-map to compile
584 if checkbuildpath(sourcefile, d):
585 localsrc_prefix = parentdir + "/"
586 else:
587 localsrc_prefix = "/usr/src/debug/"
588
589 nosuchdir = []
590 basepath = dvar
591 for p in debugsrcdir.split("/"):
592 basepath = basepath + "/" + p
593 if not cpath.exists(basepath):
594 nosuchdir.append(basepath)
595 bb.utils.mkdirhier(basepath)
596 cpath.updatecache(basepath)
597
598 # Ignore files from the recipe sysroots (target and native)
599 processdebugsrc = "LC_ALL=C ; sort -z -u '%s' | egrep -v -z '((<internal>|<built-in>)$|/.*recipe-sysroot.*/)' | "
600 # We need to ignore files that are not actually ours
601 # we do this by only paying attention to items from this package
602 processdebugsrc += "fgrep -zw '%s' | "
603 # Remove prefix in the source paths
604 processdebugsrc += "sed 's#%s##g' | "
605 processdebugsrc += "(cd '%s' ; cpio -pd0mlL --no-preserve-owner '%s%s' 2>/dev/null)"
606
607 cmd = processdebugsrc % (sourcefile, basedir, localsrc_prefix, parentdir, dvar, debugsrcdir)
608 try:
609 subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
610 except subprocess.CalledProcessError:
611 # Can "fail" if internal headers/transient sources are attempted
612 pass
613
614 # cpio seems to have a bug with -lL together and symbolic links are just copied, not dereferenced.
615 # Work around this by manually finding and copying any symbolic links that made it through.
616 cmd = "find %s%s -type l -print0 -delete | sed s#%s%s/##g | (cd '%s' ; cpio -pd0mL --no-preserve-owner '%s%s')" % \
617 (dvar, debugsrcdir, dvar, debugsrcdir, parentdir, dvar, debugsrcdir)
618 subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
619
620
621 # debugsources.list may be polluted from the host if we used externalsrc,
622 # cpio uses copy-pass and may have just created a directory structure
623 # matching the one from the host, if thats the case move those files to
624 # debugsrcdir to avoid host contamination.
625 # Empty dir structure will be deleted in the next step.
626
627 # Same check as above for externalsrc
628 if workdir not in sdir:
629 if os.path.exists(dvar + debugsrcdir + sdir):
630 cmd = "mv %s%s%s/* %s%s" % (dvar, debugsrcdir, sdir, dvar,debugsrcdir)
631 subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
632
633 # The copy by cpio may have resulted in some empty directories! Remove these
634 cmd = "find %s%s -empty -type d -delete" % (dvar, debugsrcdir)
635 subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
636
637 # Also remove debugsrcdir if its empty
638 for p in nosuchdir[::-1]:
639 if os.path.exists(p) and not os.listdir(p):
640 os.rmdir(p)
641
642#
643# Package data handling routines
644#
645
646def get_package_mapping (pkg, basepkg, d, depversions=None):
647 import oe.packagedata
648
649 data = oe.packagedata.read_subpkgdata(pkg, d)
650 key = "PKG:%s" % pkg
651
652 if key in data:
653 if bb.data.inherits_class('allarch', d) and bb.data.inherits_class('packagegroup', d) and pkg != data[key]:
654 bb.error("An allarch packagegroup shouldn't depend on packages which are dynamically renamed (%s to %s)" % (pkg, data[key]))
655 # Have to avoid undoing the write_extra_pkgs(global_variants...)
656 if bb.data.inherits_class('allarch', d) and not d.getVar('MULTILIB_VARIANTS') \
657 and data[key] == basepkg:
658 return pkg
659 if depversions == []:
660 # Avoid returning a mapping if the renamed package rprovides its original name
661 rprovkey = "RPROVIDES:%s" % pkg
662 if rprovkey in data:
663 if pkg in bb.utils.explode_dep_versions2(data[rprovkey]):
664 bb.note("%s rprovides %s, not replacing the latter" % (data[key], pkg))
665 return pkg
666 # Do map to rewritten package name
667 return data[key]
668
669 return pkg
670
671def get_package_additional_metadata (pkg_type, d):
672 base_key = "PACKAGE_ADD_METADATA"
673 for key in ("%s_%s" % (base_key, pkg_type.upper()), base_key):
674 if d.getVar(key, False) is None:
675 continue
676 d.setVarFlag(key, "type", "list")
677 if d.getVarFlag(key, "separator") is None:
678 d.setVarFlag(key, "separator", "\\n")
679 metadata_fields = [field.strip() for field in oe.data.typed_value(key, d)]
680 return "\n".join(metadata_fields).strip()
681
682def runtime_mapping_rename (varname, pkg, d):
683 #bb.note("%s before: %s" % (varname, d.getVar(varname)))
684
685 new_depends = {}
686 deps = bb.utils.explode_dep_versions2(d.getVar(varname) or "")
687 for depend, depversions in deps.items():
688 new_depend = get_package_mapping(depend, pkg, d, depversions)
689 if depend != new_depend:
690 bb.note("package name mapping done: %s -> %s" % (depend, new_depend))
691 new_depends[new_depend] = deps[depend]
692
693 d.setVar(varname, bb.utils.join_deps(new_depends, commasep=False))
694
695 #bb.note("%s after: %s" % (varname, d.getVar(varname)))
696
697#
698# Used by do_packagedata (and possibly other routines post do_package)
699#
700
701PRSERV_ACTIVE = "${@bool(d.getVar("PRSERV_HOST"))}"
702PRSERV_ACTIVE[vardepvalue] = "${PRSERV_ACTIVE}"
703package_get_auto_pr[vardepsexclude] = "BB_TASKDEPDATA"
704package_get_auto_pr[vardeps] += "PRSERV_ACTIVE"
705python package_get_auto_pr() {
706 import oe.prservice
707
708 def get_do_package_hash(pn):
709 if d.getVar("BB_RUNTASK") != "do_package":
710 taskdepdata = d.getVar("BB_TASKDEPDATA", False)
711 for dep in taskdepdata:
712 if taskdepdata[dep][1] == "do_package" and taskdepdata[dep][0] == pn:
713 return taskdepdata[dep][6]
714 return None
715
716 # Support per recipe PRSERV_HOST
717 pn = d.getVar('PN')
718 host = d.getVar("PRSERV_HOST_" + pn)
719 if not (host is None):
720 d.setVar("PRSERV_HOST", host)
721
722 pkgv = d.getVar("PKGV")
723
724 # PR Server not active, handle AUTOINC
725 if not d.getVar('PRSERV_HOST'):
726 d.setVar("PRSERV_PV_AUTOINC", "0")
727 return
728
729 auto_pr = None
730 pv = d.getVar("PV")
731 version = d.getVar("PRAUTOINX")
732 pkgarch = d.getVar("PACKAGE_ARCH")
733 checksum = get_do_package_hash(pn)
734
735 # If do_package isn't in the dependencies, we can't get the checksum...
736 if not checksum:
737 bb.warn('Task %s requested do_package unihash, but it was not available.' % d.getVar('BB_RUNTASK'))
738 #taskdepdata = d.getVar("BB_TASKDEPDATA", False)
739 #for dep in taskdepdata:
740 # bb.warn('%s:%s = %s' % (taskdepdata[dep][0], taskdepdata[dep][1], taskdepdata[dep][6]))
741 return
742
743 if d.getVar('PRSERV_LOCKDOWN'):
744 auto_pr = d.getVar('PRAUTO_' + version + '_' + pkgarch) or d.getVar('PRAUTO_' + version) or None
745 if auto_pr is None:
746 bb.fatal("Can NOT get PRAUTO from lockdown exported file")
747 d.setVar('PRAUTO',str(auto_pr))
748 return
749
750 try:
751 conn = oe.prservice.prserv_make_conn(d)
752 if conn is not None:
753 if "AUTOINC" in pkgv:
754 srcpv = bb.fetch2.get_srcrev(d)
755 base_ver = "AUTOINC-%s" % version[:version.find(srcpv)]
756 value = conn.getPR(base_ver, pkgarch, srcpv)
757 d.setVar("PRSERV_PV_AUTOINC", str(value))
758
759 auto_pr = conn.getPR(version, pkgarch, checksum)
760 conn.close()
761 except Exception as e:
762 bb.fatal("Can NOT get PRAUTO, exception %s" % str(e))
763 if auto_pr is None:
764 bb.fatal("Can NOT get PRAUTO from remote PR service")
765 d.setVar('PRAUTO',str(auto_pr))
766}
767
768#
769# Package functions suitable for inclusion in PACKAGEFUNCS
770#
771
772python package_convert_pr_autoinc() {
773 pkgv = d.getVar("PKGV")
774
775 # Adjust pkgv as necessary...
776 if 'AUTOINC' in pkgv:
777 d.setVar("PKGV", pkgv.replace("AUTOINC", "${PRSERV_PV_AUTOINC}"))
778
779 # Change PRSERV_PV_AUTOINC and EXTENDPRAUTO usage to special values
780 d.setVar('PRSERV_PV_AUTOINC', '@PRSERV_PV_AUTOINC@')
781 d.setVar('EXTENDPRAUTO', '@EXTENDPRAUTO@')
782}
783
784LOCALEBASEPN ??= "${PN}"
785
786python package_do_split_locales() {
787 if (d.getVar('PACKAGE_NO_LOCALE') == '1'):
788 bb.debug(1, "package requested not splitting locales")
789 return
790
791 packages = (d.getVar('PACKAGES') or "").split()
792
793 datadir = d.getVar('datadir')
794 if not datadir:
795 bb.note("datadir not defined")
796 return
797
798 dvar = d.getVar('PKGD')
799 pn = d.getVar('LOCALEBASEPN')
800
801 if pn + '-locale' in packages:
802 packages.remove(pn + '-locale')
803
804 localedir = os.path.join(dvar + datadir, 'locale')
805
806 if not cpath.isdir(localedir):
807 bb.debug(1, "No locale files in this package")
808 return
809
810 locales = os.listdir(localedir)
811
812 summary = d.getVar('SUMMARY') or pn
813 description = d.getVar('DESCRIPTION') or ""
814 locale_section = d.getVar('LOCALE_SECTION')
815 mlprefix = d.getVar('MLPREFIX') or ""
816 for l in sorted(locales):
817 ln = legitimize_package_name(l)
818 pkg = pn + '-locale-' + ln
819 packages.append(pkg)
820 d.setVar('FILES:' + pkg, os.path.join(datadir, 'locale', l))
821 d.setVar('RRECOMMENDS:' + pkg, '%svirtual-locale-%s' % (mlprefix, ln))
822 d.setVar('RPROVIDES:' + pkg, '%s-locale %s%s-translation' % (pn, mlprefix, ln))
823 d.setVar('SUMMARY:' + pkg, '%s - %s translations' % (summary, l))
824 d.setVar('DESCRIPTION:' + pkg, '%s This package contains language translation files for the %s locale.' % (description, l))
825 if locale_section:
826 d.setVar('SECTION:' + pkg, locale_section)
827
828 d.setVar('PACKAGES', ' '.join(packages))
829
830 # Disabled by RP 18/06/07
831 # Wildcards aren't supported in debian
832 # They break with ipkg since glibc-locale* will mean that
833 # glibc-localedata-translit* won't install as a dependency
834 # for some other package which breaks meta-toolchain
835 # Probably breaks since virtual-locale- isn't provided anywhere
836 #rdep = (d.getVar('RDEPENDS:%s' % pn) or "").split()
837 #rdep.append('%s-locale*' % pn)
838 #d.setVar('RDEPENDS:%s' % pn, ' '.join(rdep))
839}
840
841python perform_packagecopy () {
842 import subprocess
843 import shutil
844
845 dest = d.getVar('D')
846 dvar = d.getVar('PKGD')
847
848 # Start by package population by taking a copy of the installed
849 # files to operate on
850 # Preserve sparse files and hard links
851 cmd = 'tar --exclude=./sysroot-only -cf - -C %s -p -S . | tar -xf - -C %s' % (dest, dvar)
852 subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
853
854 # replace RPATHs for the nativesdk binaries, to make them relocatable
855 if bb.data.inherits_class('nativesdk', d) or bb.data.inherits_class('cross-canadian', d):
856 rpath_replace (dvar, d)
857}
858perform_packagecopy[cleandirs] = "${PKGD}"
859perform_packagecopy[dirs] = "${PKGD}"
860
861# We generate a master list of directories to process, we start by
862# seeding this list with reasonable defaults, then load from
863# the fs-perms.txt files
864python fixup_perms () {
865 import pwd, grp
866
867 # init using a string with the same format as a line as documented in
868 # the fs-perms.txt file
869 # <path> <mode> <uid> <gid> <walk> <fmode> <fuid> <fgid>
870 # <path> link <link target>
871 #
872 # __str__ can be used to print out an entry in the input format
873 #
874 # if fs_perms_entry.path is None:
875 # an error occurred
876 # if fs_perms_entry.link, you can retrieve:
877 # fs_perms_entry.path = path
878 # fs_perms_entry.link = target of link
879 # if not fs_perms_entry.link, you can retrieve:
880 # fs_perms_entry.path = path
881 # fs_perms_entry.mode = expected dir mode or None
882 # fs_perms_entry.uid = expected uid or -1
883 # fs_perms_entry.gid = expected gid or -1
884 # fs_perms_entry.walk = 'true' or something else
885 # fs_perms_entry.fmode = expected file mode or None
886 # fs_perms_entry.fuid = expected file uid or -1
887 # fs_perms_entry_fgid = expected file gid or -1
888 class fs_perms_entry():
889 def __init__(self, line):
890 lsplit = line.split()
891 if len(lsplit) == 3 and lsplit[1].lower() == "link":
892 self._setlink(lsplit[0], lsplit[2])
893 elif len(lsplit) == 8:
894 self._setdir(lsplit[0], lsplit[1], lsplit[2], lsplit[3], lsplit[4], lsplit[5], lsplit[6], lsplit[7])
895 else:
896 msg = "Fixup Perms: invalid config line %s" % line
897 oe.qa.handle_error("perm-config", msg, d)
898 self.path = None
899 self.link = None
900
901 def _setdir(self, path, mode, uid, gid, walk, fmode, fuid, fgid):
902 self.path = os.path.normpath(path)
903 self.link = None
904 self.mode = self._procmode(mode)
905 self.uid = self._procuid(uid)
906 self.gid = self._procgid(gid)
907 self.walk = walk.lower()
908 self.fmode = self._procmode(fmode)
909 self.fuid = self._procuid(fuid)
910 self.fgid = self._procgid(fgid)
911
912 def _setlink(self, path, link):
913 self.path = os.path.normpath(path)
914 self.link = link
915
916 def _procmode(self, mode):
917 if not mode or (mode and mode == "-"):
918 return None
919 else:
920 return int(mode,8)
921
922 # Note uid/gid -1 has special significance in os.lchown
923 def _procuid(self, uid):
924 if uid is None or uid == "-":
925 return -1
926 elif uid.isdigit():
927 return int(uid)
928 else:
929 return pwd.getpwnam(uid).pw_uid
930
931 def _procgid(self, gid):
932 if gid is None or gid == "-":
933 return -1
934 elif gid.isdigit():
935 return int(gid)
936 else:
937 return grp.getgrnam(gid).gr_gid
938
939 # Use for debugging the entries
940 def __str__(self):
941 if self.link:
942 return "%s link %s" % (self.path, self.link)
943 else:
944 mode = "-"
945 if self.mode:
946 mode = "0%o" % self.mode
947 fmode = "-"
948 if self.fmode:
949 fmode = "0%o" % self.fmode
950 uid = self._mapugid(self.uid)
951 gid = self._mapugid(self.gid)
952 fuid = self._mapugid(self.fuid)
953 fgid = self._mapugid(self.fgid)
954 return "%s %s %s %s %s %s %s %s" % (self.path, mode, uid, gid, self.walk, fmode, fuid, fgid)
955
956 def _mapugid(self, id):
957 if id is None or id == -1:
958 return "-"
959 else:
960 return "%d" % id
961
962 # Fix the permission, owner and group of path
963 def fix_perms(path, mode, uid, gid, dir):
964 if mode and not os.path.islink(path):
965 #bb.note("Fixup Perms: chmod 0%o %s" % (mode, dir))
966 os.chmod(path, mode)
967 # -1 is a special value that means don't change the uid/gid
968 # if they are BOTH -1, don't bother to lchown
969 if not (uid == -1 and gid == -1):
970 #bb.note("Fixup Perms: lchown %d:%d %s" % (uid, gid, dir))
971 os.lchown(path, uid, gid)
972
973 # Return a list of configuration files based on either the default
974 # files/fs-perms.txt or the contents of FILESYSTEM_PERMS_TABLES
975 # paths are resolved via BBPATH
976 def get_fs_perms_list(d):
977 str = ""
978 bbpath = d.getVar('BBPATH')
979 fs_perms_tables = d.getVar('FILESYSTEM_PERMS_TABLES') or ""
980 for conf_file in fs_perms_tables.split():
981 confpath = bb.utils.which(bbpath, conf_file)
982 if confpath:
983 str += " %s" % bb.utils.which(bbpath, conf_file)
984 else:
985 bb.warn("cannot find %s specified in FILESYSTEM_PERMS_TABLES" % conf_file)
986 return str
987
988
989
990 dvar = d.getVar('PKGD')
991
992 fs_perms_table = {}
993 fs_link_table = {}
994
995 # By default all of the standard directories specified in
996 # bitbake.conf will get 0755 root:root.
997 target_path_vars = [ 'base_prefix',
998 'prefix',
999 'exec_prefix',
1000 'base_bindir',
1001 'base_sbindir',
1002 'base_libdir',
1003 'datadir',
1004 'sysconfdir',
1005 'servicedir',
1006 'sharedstatedir',
1007 'localstatedir',
1008 'infodir',
1009 'mandir',
1010 'docdir',
1011 'bindir',
1012 'sbindir',
1013 'libexecdir',
1014 'libdir',
1015 'includedir',
1016 'oldincludedir' ]
1017
1018 for path in target_path_vars:
1019 dir = d.getVar(path) or ""
1020 if dir == "":
1021 continue
1022 fs_perms_table[dir] = fs_perms_entry(d.expand("%s 0755 root root false - - -" % (dir)))
1023
1024 # Now we actually load from the configuration files
1025 for conf in get_fs_perms_list(d).split():
1026 if not os.path.exists(conf):
1027 continue
1028 with open(conf) as f:
1029 for line in f:
1030 if line.startswith('#'):
1031 continue
1032 lsplit = line.split()
1033 if len(lsplit) == 0:
1034 continue
1035 if len(lsplit) != 8 and not (len(lsplit) == 3 and lsplit[1].lower() == "link"):
1036 msg = "Fixup perms: %s invalid line: %s" % (conf, line)
1037 oe.qa.handle_error("perm-line", msg, d)
1038 continue
1039 entry = fs_perms_entry(d.expand(line))
1040 if entry and entry.path:
1041 if entry.link:
1042 fs_link_table[entry.path] = entry
1043 if entry.path in fs_perms_table:
1044 fs_perms_table.pop(entry.path)
1045 else:
1046 fs_perms_table[entry.path] = entry
1047 if entry.path in fs_link_table:
1048 fs_link_table.pop(entry.path)
1049
1050 # Debug -- list out in-memory table
1051 #for dir in fs_perms_table:
1052 # bb.note("Fixup Perms: %s: %s" % (dir, str(fs_perms_table[dir])))
1053 #for link in fs_link_table:
1054 # bb.note("Fixup Perms: %s: %s" % (link, str(fs_link_table[link])))
1055
1056 # We process links first, so we can go back and fixup directory ownership
1057 # for any newly created directories
1058 # Process in sorted order so /run gets created before /run/lock, etc.
1059 for entry in sorted(fs_link_table.values(), key=lambda x: x.link):
1060 link = entry.link
1061 dir = entry.path
1062 origin = dvar + dir
1063 if not (cpath.exists(origin) and cpath.isdir(origin) and not cpath.islink(origin)):
1064 continue
1065
1066 if link[0] == "/":
1067 target = dvar + link
1068 ptarget = link
1069 else:
1070 target = os.path.join(os.path.dirname(origin), link)
1071 ptarget = os.path.join(os.path.dirname(dir), link)
1072 if os.path.exists(target):
1073 msg = "Fixup Perms: Unable to correct directory link, target already exists: %s -> %s" % (dir, ptarget)
1074 oe.qa.handle_error("perm-link", msg, d)
1075 continue
1076
1077 # Create path to move directory to, move it, and then setup the symlink
1078 bb.utils.mkdirhier(os.path.dirname(target))
1079 #bb.note("Fixup Perms: Rename %s -> %s" % (dir, ptarget))
1080 bb.utils.rename(origin, target)
1081 #bb.note("Fixup Perms: Link %s -> %s" % (dir, link))
1082 os.symlink(link, origin)
1083
1084 for dir in fs_perms_table:
1085 origin = dvar + dir
1086 if not (cpath.exists(origin) and cpath.isdir(origin)):
1087 continue
1088
1089 fix_perms(origin, fs_perms_table[dir].mode, fs_perms_table[dir].uid, fs_perms_table[dir].gid, dir)
1090
1091 if fs_perms_table[dir].walk == 'true':
1092 for root, dirs, files in os.walk(origin):
1093 for dr in dirs:
1094 each_dir = os.path.join(root, dr)
1095 fix_perms(each_dir, fs_perms_table[dir].mode, fs_perms_table[dir].uid, fs_perms_table[dir].gid, dir)
1096 for f in files:
1097 each_file = os.path.join(root, f)
1098 fix_perms(each_file, fs_perms_table[dir].fmode, fs_perms_table[dir].fuid, fs_perms_table[dir].fgid, dir)
1099}
1100
1101def package_debug_vars(d):
1102 # We default to '.debug' style
1103 if d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-file-directory':
1104 # Single debug-file-directory style debug info
1105 debug_vars = {
1106 "append": ".debug",
1107 "staticappend": "",
1108 "dir": "",
1109 "staticdir": "",
1110 "libdir": "/usr/lib/debug",
1111 "staticlibdir": "/usr/lib/debug-static",
1112 "srcdir": "/usr/src/debug",
1113 }
1114 elif d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-without-src':
1115 # Original OE-core, a.k.a. ".debug", style debug info, but without sources in /usr/src/debug
1116 debug_vars = {
1117 "append": "",
1118 "staticappend": "",
1119 "dir": "/.debug",
1120 "staticdir": "/.debug-static",
1121 "libdir": "",
1122 "staticlibdir": "",
1123 "srcdir": "",
1124 }
1125 elif d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-with-srcpkg':
1126 debug_vars = {
1127 "append": "",
1128 "staticappend": "",
1129 "dir": "/.debug",
1130 "staticdir": "/.debug-static",
1131 "libdir": "",
1132 "staticlibdir": "",
1133 "srcdir": "/usr/src/debug",
1134 }
1135 else:
1136 # Original OE-core, a.k.a. ".debug", style debug info
1137 debug_vars = {
1138 "append": "",
1139 "staticappend": "",
1140 "dir": "/.debug",
1141 "staticdir": "/.debug-static",
1142 "libdir": "",
1143 "staticlibdir": "",
1144 "srcdir": "/usr/src/debug",
1145 }
1146
1147 return debug_vars
1148
1149python split_and_strip_files () {
1150 import stat, errno
1151 import subprocess
1152
1153 dvar = d.getVar('PKGD')
1154 pn = d.getVar('PN')
1155 hostos = d.getVar('HOST_OS')
1156
1157 oldcwd = os.getcwd()
1158 os.chdir(dvar)
1159
1160 dv = package_debug_vars(d)
1161
1162 #
1163 # First lets figure out all of the files we may have to process ... do this only once!
1164 #
1165 elffiles = {}
1166 symlinks = {}
1167 staticlibs = []
1168 inodes = {}
1169 libdir = os.path.abspath(dvar + os.sep + d.getVar("libdir"))
1170 baselibdir = os.path.abspath(dvar + os.sep + d.getVar("base_libdir"))
1171 skipfiles = (d.getVar("INHIBIT_PACKAGE_STRIP_FILES") or "").split()
1172 if (d.getVar('INHIBIT_PACKAGE_STRIP') != '1' or \
1173 d.getVar('INHIBIT_PACKAGE_DEBUG_SPLIT') != '1'):
1174 checkelf = {}
1175 checkelflinks = {}
1176 for root, dirs, files in cpath.walk(dvar):
1177 for f in files:
1178 file = os.path.join(root, f)
1179
1180 # Skip debug files
1181 if dv["append"] and file.endswith(dv["append"]):
1182 continue
1183 if dv["dir"] and dv["dir"] in os.path.dirname(file[len(dvar):]):
1184 continue
1185
1186 if file in skipfiles:
1187 continue
1188
1189 if oe.package.is_static_lib(file):
1190 staticlibs.append(file)
1191 continue
1192
1193 try:
1194 ltarget = cpath.realpath(file, dvar, False)
1195 s = cpath.lstat(ltarget)
1196 except OSError as e:
1197 (err, strerror) = e.args
1198 if err != errno.ENOENT:
1199 raise
1200 # Skip broken symlinks
1201 continue
1202 if not s:
1203 continue
1204 # Check its an executable
1205 if (s[stat.ST_MODE] & stat.S_IXUSR) or (s[stat.ST_MODE] & stat.S_IXGRP) \
1206 or (s[stat.ST_MODE] & stat.S_IXOTH) \
1207 or ((file.startswith(libdir) or file.startswith(baselibdir)) \
1208 and (".so" in f or ".node" in f)) \
1209 or (f.startswith('vmlinux') or ".ko" in f):
1210
1211 if cpath.islink(file):
1212 checkelflinks[file] = ltarget
1213 continue
1214 # Use a reference of device ID and inode number to identify files
1215 file_reference = "%d_%d" % (s.st_dev, s.st_ino)
1216 checkelf[file] = (file, file_reference)
1217
1218 results = oe.utils.multiprocess_launch(oe.package.is_elf, checkelflinks.values(), d)
1219 results_map = {}
1220 for (ltarget, elf_file) in results:
1221 results_map[ltarget] = elf_file
1222 for file in checkelflinks:
1223 ltarget = checkelflinks[file]
1224 # If it's a symlink, and points to an ELF file, we capture the readlink target
1225 if results_map[ltarget]:
1226 target = os.readlink(file)
1227 #bb.note("Sym: %s (%d)" % (ltarget, results_map[ltarget]))
1228 symlinks[file] = target
1229
1230 results = oe.utils.multiprocess_launch(oe.package.is_elf, checkelf.keys(), d)
1231
1232 # Sort results by file path. This ensures that the files are always
1233 # processed in the same order, which is important to make sure builds
1234 # are reproducible when dealing with hardlinks
1235 results.sort(key=lambda x: x[0])
1236
1237 for (file, elf_file) in results:
1238 # It's a file (or hardlink), not a link
1239 # ...but is it ELF, and is it already stripped?
1240 if elf_file & 1:
1241 if elf_file & 2:
1242 if 'already-stripped' in (d.getVar('INSANE_SKIP:' + pn) or "").split():
1243 bb.note("Skipping file %s from %s for already-stripped QA test" % (file[len(dvar):], pn))
1244 else:
1245 msg = "File '%s' from %s was already stripped, this will prevent future debugging!" % (file[len(dvar):], pn)
1246 oe.qa.handle_error("already-stripped", msg, d)
1247 continue
1248
1249 # At this point we have an unstripped elf file. We need to:
1250 # a) Make sure any file we strip is not hardlinked to anything else outside this tree
1251 # b) Only strip any hardlinked file once (no races)
1252 # c) Track any hardlinks between files so that we can reconstruct matching debug file hardlinks
1253
1254 # Use a reference of device ID and inode number to identify files
1255 file_reference = checkelf[file][1]
1256 if file_reference in inodes:
1257 os.unlink(file)
1258 os.link(inodes[file_reference][0], file)
1259 inodes[file_reference].append(file)
1260 else:
1261 inodes[file_reference] = [file]
1262 # break hardlink
1263 bb.utils.break_hardlinks(file)
1264 elffiles[file] = elf_file
1265 # Modified the file so clear the cache
1266 cpath.updatecache(file)
1267
1268 def strip_pkgd_prefix(f):
1269 nonlocal dvar
1270
1271 if f.startswith(dvar):
1272 return f[len(dvar):]
1273
1274 return f
1275
1276 #
1277 # First lets process debug splitting
1278 #
1279 if (d.getVar('INHIBIT_PACKAGE_DEBUG_SPLIT') != '1'):
1280 results = oe.utils.multiprocess_launch(splitdebuginfo, list(elffiles), d, extraargs=(dvar, dv, d))
1281
1282 if dv["srcdir"] and not hostos.startswith("mingw"):
1283 if (d.getVar('PACKAGE_DEBUG_STATIC_SPLIT') == '1'):
1284 results = oe.utils.multiprocess_launch(splitstaticdebuginfo, staticlibs, d, extraargs=(dvar, dv, d))
1285 else:
1286 for file in staticlibs:
1287 results.append( (file,source_info(file, d)) )
1288
1289 d.setVar("PKGDEBUGSOURCES", {strip_pkgd_prefix(f): sorted(s) for f, s in results})
1290
1291 sources = set()
1292 for r in results:
1293 sources.update(r[1])
1294
1295 # Hardlink our debug symbols to the other hardlink copies
1296 for ref in inodes:
1297 if len(inodes[ref]) == 1:
1298 continue
1299
1300 target = inodes[ref][0][len(dvar):]
1301 for file in inodes[ref][1:]:
1302 src = file[len(dvar):]
1303 dest = dv["libdir"] + os.path.dirname(src) + dv["dir"] + "/" + os.path.basename(target) + dv["append"]
1304 fpath = dvar + dest
1305 ftarget = dvar + dv["libdir"] + os.path.dirname(target) + dv["dir"] + "/" + os.path.basename(target) + dv["append"]
1306 bb.utils.mkdirhier(os.path.dirname(fpath))
1307 # Only one hardlink of separated debug info file in each directory
1308 if not os.access(fpath, os.R_OK):
1309 #bb.note("Link %s -> %s" % (fpath, ftarget))
1310 os.link(ftarget, fpath)
1311
1312 # Create symlinks for all cases we were able to split symbols
1313 for file in symlinks:
1314 src = file[len(dvar):]
1315 dest = dv["libdir"] + os.path.dirname(src) + dv["dir"] + "/" + os.path.basename(src) + dv["append"]
1316 fpath = dvar + dest
1317 # Skip it if the target doesn't exist
1318 try:
1319 s = os.stat(fpath)
1320 except OSError as e:
1321 (err, strerror) = e.args
1322 if err != errno.ENOENT:
1323 raise
1324 continue
1325
1326 ltarget = symlinks[file]
1327 lpath = os.path.dirname(ltarget)
1328 lbase = os.path.basename(ltarget)
1329 ftarget = ""
1330 if lpath and lpath != ".":
1331 ftarget += lpath + dv["dir"] + "/"
1332 ftarget += lbase + dv["append"]
1333 if lpath.startswith(".."):
1334 ftarget = os.path.join("..", ftarget)
1335 bb.utils.mkdirhier(os.path.dirname(fpath))
1336 #bb.note("Symlink %s -> %s" % (fpath, ftarget))
1337 os.symlink(ftarget, fpath)
1338
1339 # Process the dv["srcdir"] if requested...
1340 # This copies and places the referenced sources for later debugging...
1341 copydebugsources(dv["srcdir"], sources, d)
1342 #
1343 # End of debug splitting
1344 #
1345
1346 #
1347 # Now lets go back over things and strip them
1348 #
1349 if (d.getVar('INHIBIT_PACKAGE_STRIP') != '1'):
1350 strip = d.getVar("STRIP")
1351 sfiles = []
1352 for file in elffiles:
1353 elf_file = int(elffiles[file])
1354 #bb.note("Strip %s" % file)
1355 sfiles.append((file, elf_file, strip))
1356 if (d.getVar('PACKAGE_STRIP_STATIC') == '1' or d.getVar('PACKAGE_DEBUG_STATIC_SPLIT') == '1'):
1357 for f in staticlibs:
1358 sfiles.append((f, 16, strip))
1359
1360 oe.utils.multiprocess_launch(oe.package.runstrip, sfiles, d)
1361
1362 # Build "minidebuginfo" and reinject it back into the stripped binaries
1363 if d.getVar('PACKAGE_MINIDEBUGINFO') == '1':
1364 oe.utils.multiprocess_launch(inject_minidebuginfo, list(elffiles), d,
1365 extraargs=(dvar, dv, d))
1366
1367 #
1368 # End of strip
1369 #
1370 os.chdir(oldcwd)
1371}
1372
1373python populate_packages () {
1374 import glob, re
1375
1376 workdir = d.getVar('WORKDIR')
1377 outdir = d.getVar('DEPLOY_DIR')
1378 dvar = d.getVar('PKGD')
1379 packages = d.getVar('PACKAGES').split()
1380 pn = d.getVar('PN')
1381
1382 bb.utils.mkdirhier(outdir)
1383 os.chdir(dvar)
1384
1385 autodebug = not (d.getVar("NOAUTOPACKAGEDEBUG") or False)
1386
1387 split_source_package = (d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-with-srcpkg')
1388
1389 # If debug-with-srcpkg mode is enabled then add the source package if it
1390 # doesn't exist and add the source file contents to the source package.
1391 if split_source_package:
1392 src_package_name = ('%s-src' % d.getVar('PN'))
1393 if not src_package_name in packages:
1394 packages.append(src_package_name)
1395 d.setVar('FILES:%s' % src_package_name, '/usr/src/debug')
1396
1397 # Sanity check PACKAGES for duplicates
1398 # Sanity should be moved to sanity.bbclass once we have the infrastructure
1399 package_dict = {}
1400
1401 for i, pkg in enumerate(packages):
1402 if pkg in package_dict:
1403 msg = "%s is listed in PACKAGES multiple times, this leads to packaging errors." % pkg
1404 oe.qa.handle_error("packages-list", msg, d)
1405 # Ensure the source package gets the chance to pick up the source files
1406 # before the debug package by ordering it first in PACKAGES. Whether it
1407 # actually picks up any source files is controlled by
1408 # PACKAGE_DEBUG_SPLIT_STYLE.
1409 elif pkg.endswith("-src"):
1410 package_dict[pkg] = (10, i)
1411 elif autodebug and pkg.endswith("-dbg"):
1412 package_dict[pkg] = (30, i)
1413 else:
1414 package_dict[pkg] = (50, i)
1415 packages = sorted(package_dict.keys(), key=package_dict.get)
1416 d.setVar('PACKAGES', ' '.join(packages))
1417 pkgdest = d.getVar('PKGDEST')
1418
1419 seen = []
1420
1421 # os.mkdir masks the permissions with umask so we have to unset it first
1422 oldumask = os.umask(0)
1423
1424 debug = []
1425 for root, dirs, files in cpath.walk(dvar):
1426 dir = root[len(dvar):]
1427 if not dir:
1428 dir = os.sep
1429 for f in (files + dirs):
1430 path = "." + os.path.join(dir, f)
1431 if "/.debug/" in path or "/.debug-static/" in path or path.endswith("/.debug"):
1432 debug.append(path)
1433
1434 for pkg in packages:
1435 root = os.path.join(pkgdest, pkg)
1436 bb.utils.mkdirhier(root)
1437
1438 filesvar = d.getVar('FILES:%s' % pkg) or ""
1439 if "//" in filesvar:
1440 msg = "FILES variable for package %s contains '//' which is invalid. Attempting to fix this but you should correct the metadata.\n" % pkg
1441 oe.qa.handle_error("files-invalid", msg, d)
1442 filesvar.replace("//", "/")
1443
1444 origfiles = filesvar.split()
1445 files, symlink_paths = files_from_filevars(origfiles)
1446
1447 if autodebug and pkg.endswith("-dbg"):
1448 files.extend(debug)
1449
1450 for file in files:
1451 if (not cpath.islink(file)) and (not cpath.exists(file)):
1452 continue
1453 if file in seen:
1454 continue
1455 seen.append(file)
1456
1457 def mkdir(src, dest, p):
1458 src = os.path.join(src, p)
1459 dest = os.path.join(dest, p)
1460 fstat = cpath.stat(src)
1461 os.mkdir(dest)
1462 os.chmod(dest, fstat.st_mode)
1463 os.chown(dest, fstat.st_uid, fstat.st_gid)
1464 if p not in seen:
1465 seen.append(p)
1466 cpath.updatecache(dest)
1467
1468 def mkdir_recurse(src, dest, paths):
1469 if cpath.exists(dest + '/' + paths):
1470 return
1471 while paths.startswith("./"):
1472 paths = paths[2:]
1473 p = "."
1474 for c in paths.split("/"):
1475 p = os.path.join(p, c)
1476 if not cpath.exists(os.path.join(dest, p)):
1477 mkdir(src, dest, p)
1478
1479 if cpath.isdir(file) and not cpath.islink(file):
1480 mkdir_recurse(dvar, root, file)
1481 continue
1482
1483 mkdir_recurse(dvar, root, os.path.dirname(file))
1484 fpath = os.path.join(root,file)
1485 if not cpath.islink(file):
1486 os.link(file, fpath)
1487 continue
1488 ret = bb.utils.copyfile(file, fpath)
1489 if ret is False or ret == 0:
1490 bb.fatal("File population failed")
1491
1492 # Check if symlink paths exist
1493 for file in symlink_paths:
1494 if not os.path.exists(os.path.join(root,file)):
1495 bb.fatal("File '%s' cannot be packaged into '%s' because its "
1496 "parent directory structure does not exist. One of "
1497 "its parent directories is a symlink whose target "
1498 "directory is not included in the package." %
1499 (file, pkg))
1500
1501 os.umask(oldumask)
1502 os.chdir(workdir)
1503
1504 # Handle excluding packages with incompatible licenses
1505 package_list = []
1506 for pkg in packages:
1507 licenses = d.getVar('_exclude_incompatible-' + pkg)
1508 if licenses:
1509 msg = "Excluding %s from packaging as it has incompatible license(s): %s" % (pkg, licenses)
1510 oe.qa.handle_error("incompatible-license", msg, d)
1511 else:
1512 package_list.append(pkg)
1513 d.setVar('PACKAGES', ' '.join(package_list))
1514
1515 unshipped = []
1516 for root, dirs, files in cpath.walk(dvar):
1517 dir = root[len(dvar):]
1518 if not dir:
1519 dir = os.sep
1520 for f in (files + dirs):
1521 path = os.path.join(dir, f)
1522 if ('.' + path) not in seen:
1523 unshipped.append(path)
1524
1525 if unshipped != []:
1526 msg = pn + ": Files/directories were installed but not shipped in any package:"
1527 if "installed-vs-shipped" in (d.getVar('INSANE_SKIP:' + pn) or "").split():
1528 bb.note("Package %s skipping QA tests: installed-vs-shipped" % pn)
1529 else:
1530 for f in unshipped:
1531 msg = msg + "\n " + f
1532 msg = msg + "\nPlease set FILES such that these items are packaged. Alternatively if they are unneeded, avoid installing them or delete them within do_install.\n"
1533 msg = msg + "%s: %d installed and not shipped files." % (pn, len(unshipped))
1534 oe.qa.handle_error("installed-vs-shipped", msg, d)
1535}
1536populate_packages[dirs] = "${D}"
1537
1538python package_fixsymlinks () {
1539 import errno
1540 pkgdest = d.getVar('PKGDEST')
1541 packages = d.getVar("PACKAGES", False).split()
1542
1543 dangling_links = {}
1544 pkg_files = {}
1545 for pkg in packages:
1546 dangling_links[pkg] = []
1547 pkg_files[pkg] = []
1548 inst_root = os.path.join(pkgdest, pkg)
1549 for path in pkgfiles[pkg]:
1550 rpath = path[len(inst_root):]
1551 pkg_files[pkg].append(rpath)
1552 rtarget = cpath.realpath(path, inst_root, True, assume_dir = True)
1553 if not cpath.lexists(rtarget):
1554 dangling_links[pkg].append(os.path.normpath(rtarget[len(inst_root):]))
1555
1556 newrdepends = {}
1557 for pkg in dangling_links:
1558 for l in dangling_links[pkg]:
1559 found = False
1560 bb.debug(1, "%s contains dangling link %s" % (pkg, l))
1561 for p in packages:
1562 if l in pkg_files[p]:
1563 found = True
1564 bb.debug(1, "target found in %s" % p)
1565 if p == pkg:
1566 break
1567 if pkg not in newrdepends:
1568 newrdepends[pkg] = []
1569 newrdepends[pkg].append(p)
1570 break
1571 if found == False:
1572 bb.note("%s contains dangling symlink to %s" % (pkg, l))
1573
1574 for pkg in newrdepends:
1575 rdepends = bb.utils.explode_dep_versions2(d.getVar('RDEPENDS:' + pkg) or "")
1576 for p in newrdepends[pkg]:
1577 if p not in rdepends:
1578 rdepends[p] = []
1579 d.setVar('RDEPENDS:' + pkg, bb.utils.join_deps(rdepends, commasep=False))
1580}
1581
1582
1583python package_package_name_hook() {
1584 """
1585 A package_name_hook function can be used to rewrite the package names by
1586 changing PKG. For an example, see debian.bbclass.
1587 """
1588 pass
1589}
1590
1591EXPORT_FUNCTIONS package_name_hook
1592
1593
1594PKGDESTWORK = "${WORKDIR}/pkgdata"
1595
1596PKGDATA_VARS = "PN PE PV PR PKGE PKGV PKGR LICENSE DESCRIPTION SUMMARY RDEPENDS RPROVIDES RRECOMMENDS RSUGGESTS RREPLACES RCONFLICTS SECTION PKG ALLOW_EMPTY FILES CONFFILES FILES_INFO PACKAGE_ADD_METADATA pkg_postinst pkg_postrm pkg_preinst pkg_prerm"
1597
1598python emit_pkgdata() {
1599 from glob import glob
1600 import json
1601 import bb.compress.zstd
1602
1603 def process_postinst_on_target(pkg, mlprefix):
1604 pkgval = d.getVar('PKG:%s' % pkg)
1605 if pkgval is None:
1606 pkgval = pkg
1607
1608 defer_fragment = """
1609if [ -n "$D" ]; then
1610 $INTERCEPT_DIR/postinst_intercept delay_to_first_boot %s mlprefix=%s
1611 exit 0
1612fi
1613""" % (pkgval, mlprefix)
1614
1615 postinst = d.getVar('pkg_postinst:%s' % pkg)
1616 postinst_ontarget = d.getVar('pkg_postinst_ontarget:%s' % pkg)
1617
1618 if postinst_ontarget:
1619 bb.debug(1, 'adding deferred pkg_postinst_ontarget() to pkg_postinst() for %s' % pkg)
1620 if not postinst:
1621 postinst = '#!/bin/sh\n'
1622 postinst += defer_fragment
1623 postinst += postinst_ontarget
1624 d.setVar('pkg_postinst:%s' % pkg, postinst)
1625
1626 def add_set_e_to_scriptlets(pkg):
1627 for scriptlet_name in ('pkg_preinst', 'pkg_postinst', 'pkg_prerm', 'pkg_postrm'):
1628 scriptlet = d.getVar('%s:%s' % (scriptlet_name, pkg))
1629 if scriptlet:
1630 scriptlet_split = scriptlet.split('\n')
1631 if scriptlet_split[0].startswith("#!"):
1632 scriptlet = scriptlet_split[0] + "\nset -e\n" + "\n".join(scriptlet_split[1:])
1633 else:
1634 scriptlet = "set -e\n" + "\n".join(scriptlet_split[0:])
1635 d.setVar('%s:%s' % (scriptlet_name, pkg), scriptlet)
1636
1637 def write_if_exists(f, pkg, var):
1638 def encode(str):
1639 import codecs
1640 c = codecs.getencoder("unicode_escape")
1641 return c(str)[0].decode("latin1")
1642
1643 val = d.getVar('%s:%s' % (var, pkg))
1644 if val:
1645 f.write('%s:%s: %s\n' % (var, pkg, encode(val)))
1646 return val
1647 val = d.getVar('%s' % (var))
1648 if val:
1649 f.write('%s: %s\n' % (var, encode(val)))
1650 return val
1651
1652 def write_extra_pkgs(variants, pn, packages, pkgdatadir):
1653 for variant in variants:
1654 with open("%s/%s-%s" % (pkgdatadir, variant, pn), 'w') as fd:
1655 fd.write("PACKAGES: %s\n" % ' '.join(
1656 map(lambda pkg: '%s-%s' % (variant, pkg), packages.split())))
1657
1658 def write_extra_runtime_pkgs(variants, packages, pkgdatadir):
1659 for variant in variants:
1660 for pkg in packages.split():
1661 ml_pkg = "%s-%s" % (variant, pkg)
1662 subdata_file = "%s/runtime/%s" % (pkgdatadir, ml_pkg)
1663 with open(subdata_file, 'w') as fd:
1664 fd.write("PKG:%s: %s" % (ml_pkg, pkg))
1665
1666 packages = d.getVar('PACKAGES')
1667 pkgdest = d.getVar('PKGDEST')
1668 pkgdatadir = d.getVar('PKGDESTWORK')
1669
1670 data_file = pkgdatadir + d.expand("/${PN}")
1671 with open(data_file, 'w') as fd:
1672 fd.write("PACKAGES: %s\n" % packages)
1673
1674 pkgdebugsource = d.getVar("PKGDEBUGSOURCES") or []
1675
1676 pn = d.getVar('PN')
1677 global_variants = (d.getVar('MULTILIB_GLOBAL_VARIANTS') or "").split()
1678 variants = (d.getVar('MULTILIB_VARIANTS') or "").split()
1679
1680 if bb.data.inherits_class('kernel', d) or bb.data.inherits_class('module-base', d):
1681 write_extra_pkgs(variants, pn, packages, pkgdatadir)
1682
1683 if bb.data.inherits_class('allarch', d) and not variants \
1684 and not bb.data.inherits_class('packagegroup', d):
1685 write_extra_pkgs(global_variants, pn, packages, pkgdatadir)
1686
1687 workdir = d.getVar('WORKDIR')
1688
1689 for pkg in packages.split():
1690 pkgval = d.getVar('PKG:%s' % pkg)
1691 if pkgval is None:
1692 pkgval = pkg
1693 d.setVar('PKG:%s' % pkg, pkg)
1694
1695 extended_data = {
1696 "files_info": {}
1697 }
1698
1699 pkgdestpkg = os.path.join(pkgdest, pkg)
1700 files = {}
1701 files_extra = {}
1702 total_size = 0
1703 seen = set()
1704 for f in pkgfiles[pkg]:
1705 fpath = os.sep + os.path.relpath(f, pkgdestpkg)
1706
1707 fstat = os.lstat(f)
1708 files[fpath] = fstat.st_size
1709
1710 extended_data["files_info"].setdefault(fpath, {})
1711 extended_data["files_info"][fpath]['size'] = fstat.st_size
1712
1713 if fstat.st_ino not in seen:
1714 seen.add(fstat.st_ino)
1715 total_size += fstat.st_size
1716
1717 if fpath in pkgdebugsource:
1718 extended_data["files_info"][fpath]['debugsrc'] = pkgdebugsource[fpath]
1719 del pkgdebugsource[fpath]
1720
1721 d.setVar('FILES_INFO:' + pkg , json.dumps(files, sort_keys=True))
1722
1723 process_postinst_on_target(pkg, d.getVar("MLPREFIX"))
1724 add_set_e_to_scriptlets(pkg)
1725
1726 subdata_file = pkgdatadir + "/runtime/%s" % pkg
1727 with open(subdata_file, 'w') as sf:
1728 for var in (d.getVar('PKGDATA_VARS') or "").split():
1729 val = write_if_exists(sf, pkg, var)
1730
1731 write_if_exists(sf, pkg, 'FILERPROVIDESFLIST')
1732 for dfile in sorted((d.getVar('FILERPROVIDESFLIST:' + pkg) or "").split()):
1733 write_if_exists(sf, pkg, 'FILERPROVIDES:' + dfile)
1734
1735 write_if_exists(sf, pkg, 'FILERDEPENDSFLIST')
1736 for dfile in sorted((d.getVar('FILERDEPENDSFLIST:' + pkg) or "").split()):
1737 write_if_exists(sf, pkg, 'FILERDEPENDS:' + dfile)
1738
1739 sf.write('%s:%s: %d\n' % ('PKGSIZE', pkg, total_size))
1740
1741 subdata_extended_file = pkgdatadir + "/extended/%s.json.zstd" % pkg
1742 num_threads = int(d.getVar("BB_NUMBER_THREADS"))
1743 with bb.compress.zstd.open(subdata_extended_file, "wt", encoding="utf-8", num_threads=num_threads) as f:
1744 json.dump(extended_data, f, sort_keys=True, separators=(",", ":"))
1745
1746 # Symlinks needed for rprovides lookup
1747 rprov = d.getVar('RPROVIDES:%s' % pkg) or d.getVar('RPROVIDES')
1748 if rprov:
1749 for p in bb.utils.explode_deps(rprov):
1750 subdata_sym = pkgdatadir + "/runtime-rprovides/%s/%s" % (p, pkg)
1751 bb.utils.mkdirhier(os.path.dirname(subdata_sym))
1752 oe.path.symlink("../../runtime/%s" % pkg, subdata_sym, True)
1753
1754 allow_empty = d.getVar('ALLOW_EMPTY:%s' % pkg)
1755 if not allow_empty:
1756 allow_empty = d.getVar('ALLOW_EMPTY')
1757 root = "%s/%s" % (pkgdest, pkg)
1758 os.chdir(root)
1759 g = glob('*')
1760 if g or allow_empty == "1":
1761 # Symlinks needed for reverse lookups (from the final package name)
1762 subdata_sym = pkgdatadir + "/runtime-reverse/%s" % pkgval
1763 oe.path.symlink("../runtime/%s" % pkg, subdata_sym, True)
1764
1765 packagedfile = pkgdatadir + '/runtime/%s.packaged' % pkg
1766 open(packagedfile, 'w').close()
1767
1768 if bb.data.inherits_class('kernel', d) or bb.data.inherits_class('module-base', d):
1769 write_extra_runtime_pkgs(variants, packages, pkgdatadir)
1770
1771 if bb.data.inherits_class('allarch', d) and not variants \
1772 and not bb.data.inherits_class('packagegroup', d):
1773 write_extra_runtime_pkgs(global_variants, packages, pkgdatadir)
1774
1775}
1776emit_pkgdata[dirs] = "${PKGDESTWORK}/runtime ${PKGDESTWORK}/runtime-reverse ${PKGDESTWORK}/runtime-rprovides ${PKGDESTWORK}/extended"
1777emit_pkgdata[vardepsexclude] = "BB_NUMBER_THREADS"
1778
1779ldconfig_postinst_fragment() {
1780if [ x"$D" = "x" ]; then
1781 if [ -x /sbin/ldconfig ]; then /sbin/ldconfig ; fi
1782fi
1783}
1784
1785RPMDEPS = "${STAGING_LIBDIR_NATIVE}/rpm/rpmdeps --alldeps --define '__font_provides %{nil}'"
1786
1787# Collect perfile run-time dependency metadata
1788# Output:
1789# FILERPROVIDESFLIST:pkg - list of all files w/ deps
1790# FILERPROVIDES:filepath:pkg - per file dep
1791#
1792# FILERDEPENDSFLIST:pkg - list of all files w/ deps
1793# FILERDEPENDS:filepath:pkg - per file dep
1794
1795python package_do_filedeps() {
1796 if d.getVar('SKIP_FILEDEPS') == '1':
1797 return
1798
1799 pkgdest = d.getVar('PKGDEST')
1800 packages = d.getVar('PACKAGES')
1801 rpmdeps = d.getVar('RPMDEPS')
1802
1803 def chunks(files, n):
1804 return [files[i:i+n] for i in range(0, len(files), n)]
1805
1806 pkglist = []
1807 for pkg in packages.split():
1808 if d.getVar('SKIP_FILEDEPS:' + pkg) == '1':
1809 continue
1810 if pkg.endswith('-dbg') or pkg.endswith('-doc') or pkg.find('-locale-') != -1 or pkg.find('-localedata-') != -1 or pkg.find('-gconv-') != -1 or pkg.find('-charmap-') != -1 or pkg.startswith('kernel-module-') or pkg.endswith('-src'):
1811 continue
1812 for files in chunks(pkgfiles[pkg], 100):
1813 pkglist.append((pkg, files, rpmdeps, pkgdest))
1814
1815 processed = oe.utils.multiprocess_launch(oe.package.filedeprunner, pkglist, d)
1816
1817 provides_files = {}
1818 requires_files = {}
1819
1820 for result in processed:
1821 (pkg, provides, requires) = result
1822
1823 if pkg not in provides_files:
1824 provides_files[pkg] = []
1825 if pkg not in requires_files:
1826 requires_files[pkg] = []
1827
1828 for file in sorted(provides):
1829 provides_files[pkg].append(file)
1830 key = "FILERPROVIDES:" + file + ":" + pkg
1831 d.appendVar(key, " " + " ".join(provides[file]))
1832
1833 for file in sorted(requires):
1834 requires_files[pkg].append(file)
1835 key = "FILERDEPENDS:" + file + ":" + pkg
1836 d.appendVar(key, " " + " ".join(requires[file]))
1837
1838 for pkg in requires_files:
1839 d.setVar("FILERDEPENDSFLIST:" + pkg, " ".join(sorted(requires_files[pkg])))
1840 for pkg in provides_files:
1841 d.setVar("FILERPROVIDESFLIST:" + pkg, " ".join(sorted(provides_files[pkg])))
1842}
1843
1844SHLIBSDIRS = "${WORKDIR_PKGDATA}/${MLPREFIX}shlibs2"
1845SHLIBSWORKDIR = "${PKGDESTWORK}/${MLPREFIX}shlibs2"
1846
1847python package_do_shlibs() {
1848 import itertools
1849 import re, pipes
1850 import subprocess
1851
1852 exclude_shlibs = d.getVar('EXCLUDE_FROM_SHLIBS', False)
1853 if exclude_shlibs:
1854 bb.note("not generating shlibs")
1855 return
1856
1857 lib_re = re.compile(r"^.*\.so")
1858 libdir_re = re.compile(r".*/%s$" % d.getVar('baselib'))
1859
1860 packages = d.getVar('PACKAGES')
1861
1862 shlib_pkgs = []
1863 exclusion_list = d.getVar("EXCLUDE_PACKAGES_FROM_SHLIBS")
1864 if exclusion_list:
1865 for pkg in packages.split():
1866 if pkg not in exclusion_list.split():
1867 shlib_pkgs.append(pkg)
1868 else:
1869 bb.note("not generating shlibs for %s" % pkg)
1870 else:
1871 shlib_pkgs = packages.split()
1872
1873 hostos = d.getVar('HOST_OS')
1874
1875 workdir = d.getVar('WORKDIR')
1876
1877 ver = d.getVar('PKGV')
1878 if not ver:
1879 msg = "PKGV not defined"
1880 oe.qa.handle_error("pkgv-undefined", msg, d)
1881 return
1882
1883 pkgdest = d.getVar('PKGDEST')
1884
1885 shlibswork_dir = d.getVar('SHLIBSWORKDIR')
1886
1887 def linux_so(file, pkg, pkgver, d):
1888 needs_ldconfig = False
1889 needed = set()
1890 sonames = set()
1891 renames = []
1892 ldir = os.path.dirname(file).replace(pkgdest + "/" + pkg, '')
1893 cmd = d.getVar('OBJDUMP') + " -p " + pipes.quote(file) + " 2>/dev/null"
1894 fd = os.popen(cmd)
1895 lines = fd.readlines()
1896 fd.close()
1897 rpath = tuple()
1898 for l in lines:
1899 m = re.match(r"\s+RPATH\s+([^\s]*)", l)
1900 if m:
1901 rpaths = m.group(1).replace("$ORIGIN", ldir).split(":")
1902 rpath = tuple(map(os.path.normpath, rpaths))
1903 for l in lines:
1904 m = re.match(r"\s+NEEDED\s+([^\s]*)", l)
1905 if m:
1906 dep = m.group(1)
1907 if dep not in needed:
1908 needed.add((dep, file, rpath))
1909 m = re.match(r"\s+SONAME\s+([^\s]*)", l)
1910 if m:
1911 this_soname = m.group(1)
1912 prov = (this_soname, ldir, pkgver)
1913 if not prov in sonames:
1914 # if library is private (only used by package) then do not build shlib for it
1915 import fnmatch
1916 if not private_libs or len([i for i in private_libs if fnmatch.fnmatch(this_soname, i)]) == 0:
1917 sonames.add(prov)
1918 if libdir_re.match(os.path.dirname(file)):
1919 needs_ldconfig = True
1920 if needs_ldconfig and snap_symlinks and (os.path.basename(file) != this_soname):
1921 renames.append((file, os.path.join(os.path.dirname(file), this_soname)))
1922 return (needs_ldconfig, needed, sonames, renames)
1923
1924 def darwin_so(file, needed, sonames, renames, pkgver):
1925 if not os.path.exists(file):
1926 return
1927 ldir = os.path.dirname(file).replace(pkgdest + "/" + pkg, '')
1928
1929 def get_combinations(base):
1930 #
1931 # Given a base library name, find all combinations of this split by "." and "-"
1932 #
1933 combos = []
1934 options = base.split(".")
1935 for i in range(1, len(options) + 1):
1936 combos.append(".".join(options[0:i]))
1937 options = base.split("-")
1938 for i in range(1, len(options) + 1):
1939 combos.append("-".join(options[0:i]))
1940 return combos
1941
1942 if (file.endswith('.dylib') or file.endswith('.so')) and not pkg.endswith('-dev') and not pkg.endswith('-dbg') and not pkg.endswith('-src'):
1943 # Drop suffix
1944 name = os.path.basename(file).rsplit(".",1)[0]
1945 # Find all combinations
1946 combos = get_combinations(name)
1947 for combo in combos:
1948 if not combo in sonames:
1949 prov = (combo, ldir, pkgver)
1950 sonames.add(prov)
1951 if file.endswith('.dylib') or file.endswith('.so'):
1952 rpath = []
1953 p = subprocess.Popen([d.expand("${HOST_PREFIX}otool"), '-l', file], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
1954 out, err = p.communicate()
1955 # If returned successfully, process stdout for results
1956 if p.returncode == 0:
1957 for l in out.split("\n"):
1958 l = l.strip()
1959 if l.startswith('path '):
1960 rpath.append(l.split()[1])
1961
1962 p = subprocess.Popen([d.expand("${HOST_PREFIX}otool"), '-L', file], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
1963 out, err = p.communicate()
1964 # If returned successfully, process stdout for results
1965 if p.returncode == 0:
1966 for l in out.split("\n"):
1967 l = l.strip()
1968 if not l or l.endswith(":"):
1969 continue
1970 if "is not an object file" in l:
1971 continue
1972 name = os.path.basename(l.split()[0]).rsplit(".", 1)[0]
1973 if name and name not in needed[pkg]:
1974 needed[pkg].add((name, file, tuple()))
1975
1976 def mingw_dll(file, needed, sonames, renames, pkgver):
1977 if not os.path.exists(file):
1978 return
1979
1980 if file.endswith(".dll"):
1981 # assume all dlls are shared objects provided by the package
1982 sonames.add((os.path.basename(file), os.path.dirname(file).replace(pkgdest + "/" + pkg, ''), pkgver))
1983
1984 if (file.endswith(".dll") or file.endswith(".exe")):
1985 # use objdump to search for "DLL Name: .*\.dll"
1986 p = subprocess.Popen([d.expand("${HOST_PREFIX}objdump"), "-p", file], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
1987 out, err = p.communicate()
1988 # process the output, grabbing all .dll names
1989 if p.returncode == 0:
1990 for m in re.finditer(r"DLL Name: (.*?\.dll)$", out.decode(), re.MULTILINE | re.IGNORECASE):
1991 dllname = m.group(1)
1992 if dllname:
1993 needed[pkg].add((dllname, file, tuple()))
1994
1995 if d.getVar('PACKAGE_SNAP_LIB_SYMLINKS') == "1":
1996 snap_symlinks = True
1997 else:
1998 snap_symlinks = False
1999
2000 needed = {}
2001
2002 shlib_provider = oe.package.read_shlib_providers(d)
2003
2004 for pkg in shlib_pkgs:
2005 private_libs = d.getVar('PRIVATE_LIBS:' + pkg) or d.getVar('PRIVATE_LIBS') or ""
2006 private_libs = private_libs.split()
2007 needs_ldconfig = False
2008 bb.debug(2, "calculating shlib provides for %s" % pkg)
2009
2010 pkgver = d.getVar('PKGV:' + pkg)
2011 if not pkgver:
2012 pkgver = d.getVar('PV_' + pkg)
2013 if not pkgver:
2014 pkgver = ver
2015
2016 needed[pkg] = set()
2017 sonames = set()
2018 renames = []
2019 linuxlist = []
2020 for file in pkgfiles[pkg]:
2021 soname = None
2022 if cpath.islink(file):
2023 continue
2024 if hostos == "darwin" or hostos == "darwin8":
2025 darwin_so(file, needed, sonames, renames, pkgver)
2026 elif hostos.startswith("mingw"):
2027 mingw_dll(file, needed, sonames, renames, pkgver)
2028 elif os.access(file, os.X_OK) or lib_re.match(file):
2029 linuxlist.append(file)
2030
2031 if linuxlist:
2032 results = oe.utils.multiprocess_launch(linux_so, linuxlist, d, extraargs=(pkg, pkgver, d))
2033 for r in results:
2034 ldconfig = r[0]
2035 needed[pkg] |= r[1]
2036 sonames |= r[2]
2037 renames.extend(r[3])
2038 needs_ldconfig = needs_ldconfig or ldconfig
2039
2040 for (old, new) in renames:
2041 bb.note("Renaming %s to %s" % (old, new))
2042 bb.utils.rename(old, new)
2043 pkgfiles[pkg].remove(old)
2044
2045 shlibs_file = os.path.join(shlibswork_dir, pkg + ".list")
2046 if len(sonames):
2047 with open(shlibs_file, 'w') as fd:
2048 for s in sorted(sonames):
2049 if s[0] in shlib_provider and s[1] in shlib_provider[s[0]]:
2050 (old_pkg, old_pkgver) = shlib_provider[s[0]][s[1]]
2051 if old_pkg != pkg:
2052 bb.warn('%s-%s was registered as shlib provider for %s, changing it to %s-%s because it was built later' % (old_pkg, old_pkgver, s[0], pkg, pkgver))
2053 bb.debug(1, 'registering %s-%s as shlib provider for %s' % (pkg, pkgver, s[0]))
2054 fd.write(s[0] + ':' + s[1] + ':' + s[2] + '\n')
2055 if s[0] not in shlib_provider:
2056 shlib_provider[s[0]] = {}
2057 shlib_provider[s[0]][s[1]] = (pkg, pkgver)
2058 if needs_ldconfig:
2059 bb.debug(1, 'adding ldconfig call to postinst for %s' % pkg)
2060 postinst = d.getVar('pkg_postinst:%s' % pkg)
2061 if not postinst:
2062 postinst = '#!/bin/sh\n'
2063 postinst += d.getVar('ldconfig_postinst_fragment')
2064 d.setVar('pkg_postinst:%s' % pkg, postinst)
2065 bb.debug(1, 'LIBNAMES: pkg %s sonames %s' % (pkg, sonames))
2066
2067 assumed_libs = d.getVar('ASSUME_SHLIBS')
2068 if assumed_libs:
2069 libdir = d.getVar("libdir")
2070 for e in assumed_libs.split():
2071 l, dep_pkg = e.split(":")
2072 lib_ver = None
2073 dep_pkg = dep_pkg.rsplit("_", 1)
2074 if len(dep_pkg) == 2:
2075 lib_ver = dep_pkg[1]
2076 dep_pkg = dep_pkg[0]
2077 if l not in shlib_provider:
2078 shlib_provider[l] = {}
2079 shlib_provider[l][libdir] = (dep_pkg, lib_ver)
2080
2081 libsearchpath = [d.getVar('libdir'), d.getVar('base_libdir')]
2082
2083 for pkg in shlib_pkgs:
2084 bb.debug(2, "calculating shlib requirements for %s" % pkg)
2085
2086 private_libs = d.getVar('PRIVATE_LIBS:' + pkg) or d.getVar('PRIVATE_LIBS') or ""
2087 private_libs = private_libs.split()
2088
2089 deps = list()
2090 for n in needed[pkg]:
2091 # if n is in private libraries, don't try to search provider for it
2092 # this could cause problem in case some abc.bb provides private
2093 # /opt/abc/lib/libfoo.so.1 and contains /usr/bin/abc depending on system library libfoo.so.1
2094 # but skipping it is still better alternative than providing own
2095 # version and then adding runtime dependency for the same system library
2096 import fnmatch
2097 if private_libs and len([i for i in private_libs if fnmatch.fnmatch(n[0], i)]) > 0:
2098 bb.debug(2, '%s: Dependency %s covered by PRIVATE_LIBS' % (pkg, n[0]))
2099 continue
2100 if n[0] in shlib_provider.keys():
2101 shlib_provider_map = shlib_provider[n[0]]
2102 matches = set()
2103 for p in itertools.chain(list(n[2]), sorted(shlib_provider_map.keys()), libsearchpath):
2104 if p in shlib_provider_map:
2105 matches.add(p)
2106 if len(matches) > 1:
2107 matchpkgs = ', '.join([shlib_provider_map[match][0] for match in matches])
2108 bb.error("%s: Multiple shlib providers for %s: %s (used by files: %s)" % (pkg, n[0], matchpkgs, n[1]))
2109 elif len(matches) == 1:
2110 (dep_pkg, ver_needed) = shlib_provider_map[matches.pop()]
2111
2112 bb.debug(2, '%s: Dependency %s requires package %s (used by files: %s)' % (pkg, n[0], dep_pkg, n[1]))
2113
2114 if dep_pkg == pkg:
2115 continue
2116
2117 if ver_needed:
2118 dep = "%s (>= %s)" % (dep_pkg, ver_needed)
2119 else:
2120 dep = dep_pkg
2121 if not dep in deps:
2122 deps.append(dep)
2123 continue
2124 bb.note("Couldn't find shared library provider for %s, used by files: %s" % (n[0], n[1]))
2125
2126 deps_file = os.path.join(pkgdest, pkg + ".shlibdeps")
2127 if os.path.exists(deps_file):
2128 os.remove(deps_file)
2129 if deps:
2130 with open(deps_file, 'w') as fd:
2131 for dep in sorted(deps):
2132 fd.write(dep + '\n')
2133}
2134
2135python package_do_pkgconfig () {
2136 import re
2137
2138 packages = d.getVar('PACKAGES')
2139 workdir = d.getVar('WORKDIR')
2140 pkgdest = d.getVar('PKGDEST')
2141
2142 shlibs_dirs = d.getVar('SHLIBSDIRS').split()
2143 shlibswork_dir = d.getVar('SHLIBSWORKDIR')
2144
2145 pc_re = re.compile(r'(.*)\.pc$')
2146 var_re = re.compile(r'(.*)=(.*)')
2147 field_re = re.compile(r'(.*): (.*)')
2148
2149 pkgconfig_provided = {}
2150 pkgconfig_needed = {}
2151 for pkg in packages.split():
2152 pkgconfig_provided[pkg] = []
2153 pkgconfig_needed[pkg] = []
2154 for file in sorted(pkgfiles[pkg]):
2155 m = pc_re.match(file)
2156 if m:
2157 pd = bb.data.init()
2158 name = m.group(1)
2159 pkgconfig_provided[pkg].append(os.path.basename(name))
2160 if not os.access(file, os.R_OK):
2161 continue
2162 with open(file, 'r') as f:
2163 lines = f.readlines()
2164 for l in lines:
2165 m = var_re.match(l)
2166 if m:
2167 name = m.group(1)
2168 val = m.group(2)
2169 pd.setVar(name, pd.expand(val))
2170 continue
2171 m = field_re.match(l)
2172 if m:
2173 hdr = m.group(1)
2174 exp = pd.expand(m.group(2))
2175 if hdr == 'Requires':
2176 pkgconfig_needed[pkg] += exp.replace(',', ' ').split()
2177
2178 for pkg in packages.split():
2179 pkgs_file = os.path.join(shlibswork_dir, pkg + ".pclist")
2180 if pkgconfig_provided[pkg] != []:
2181 with open(pkgs_file, 'w') as f:
2182 for p in sorted(pkgconfig_provided[pkg]):
2183 f.write('%s\n' % p)
2184
2185 # Go from least to most specific since the last one found wins
2186 for dir in reversed(shlibs_dirs):
2187 if not os.path.exists(dir):
2188 continue
2189 for file in sorted(os.listdir(dir)):
2190 m = re.match(r'^(.*)\.pclist$', file)
2191 if m:
2192 pkg = m.group(1)
2193 with open(os.path.join(dir, file)) as fd:
2194 lines = fd.readlines()
2195 pkgconfig_provided[pkg] = []
2196 for l in lines:
2197 pkgconfig_provided[pkg].append(l.rstrip())
2198
2199 for pkg in packages.split():
2200 deps = []
2201 for n in pkgconfig_needed[pkg]:
2202 found = False
2203 for k in pkgconfig_provided.keys():
2204 if n in pkgconfig_provided[k]:
2205 if k != pkg and not (k in deps):
2206 deps.append(k)
2207 found = True
2208 if found == False:
2209 bb.note("couldn't find pkgconfig module '%s' in any package" % n)
2210 deps_file = os.path.join(pkgdest, pkg + ".pcdeps")
2211 if len(deps):
2212 with open(deps_file, 'w') as fd:
2213 for dep in deps:
2214 fd.write(dep + '\n')
2215}
2216
2217def read_libdep_files(d):
2218 pkglibdeps = {}
2219 packages = d.getVar('PACKAGES').split()
2220 for pkg in packages:
2221 pkglibdeps[pkg] = {}
2222 for extension in ".shlibdeps", ".pcdeps", ".clilibdeps":
2223 depsfile = d.expand("${PKGDEST}/" + pkg + extension)
2224 if os.access(depsfile, os.R_OK):
2225 with open(depsfile) as fd:
2226 lines = fd.readlines()
2227 for l in lines:
2228 l.rstrip()
2229 deps = bb.utils.explode_dep_versions2(l)
2230 for dep in deps:
2231 if not dep in pkglibdeps[pkg]:
2232 pkglibdeps[pkg][dep] = deps[dep]
2233 return pkglibdeps
2234
2235python read_shlibdeps () {
2236 pkglibdeps = read_libdep_files(d)
2237
2238 packages = d.getVar('PACKAGES').split()
2239 for pkg in packages:
2240 rdepends = bb.utils.explode_dep_versions2(d.getVar('RDEPENDS:' + pkg) or "")
2241 for dep in sorted(pkglibdeps[pkg]):
2242 # Add the dep if it's not already there, or if no comparison is set
2243 if dep not in rdepends:
2244 rdepends[dep] = []
2245 for v in pkglibdeps[pkg][dep]:
2246 if v not in rdepends[dep]:
2247 rdepends[dep].append(v)
2248 d.setVar('RDEPENDS:' + pkg, bb.utils.join_deps(rdepends, commasep=False))
2249}
2250
2251python package_depchains() {
2252 """
2253 For a given set of prefix and postfix modifiers, make those packages
2254 RRECOMMENDS on the corresponding packages for its RDEPENDS.
2255
2256 Example: If package A depends upon package B, and A's .bb emits an
2257 A-dev package, this would make A-dev Recommends: B-dev.
2258
2259 If only one of a given suffix is specified, it will take the RRECOMMENDS
2260 based on the RDEPENDS of *all* other packages. If more than one of a given
2261 suffix is specified, its will only use the RDEPENDS of the single parent
2262 package.
2263 """
2264
2265 packages = d.getVar('PACKAGES')
2266 postfixes = (d.getVar('DEPCHAIN_POST') or '').split()
2267 prefixes = (d.getVar('DEPCHAIN_PRE') or '').split()
2268
2269 def pkg_adddeprrecs(pkg, base, suffix, getname, depends, d):
2270
2271 #bb.note('depends for %s is %s' % (base, depends))
2272 rreclist = bb.utils.explode_dep_versions2(d.getVar('RRECOMMENDS:' + pkg) or "")
2273
2274 for depend in sorted(depends):
2275 if depend.find('-native') != -1 or depend.find('-cross') != -1 or depend.startswith('virtual/'):
2276 #bb.note("Skipping %s" % depend)
2277 continue
2278 if depend.endswith('-dev'):
2279 depend = depend[:-4]
2280 if depend.endswith('-dbg'):
2281 depend = depend[:-4]
2282 pkgname = getname(depend, suffix)
2283 #bb.note("Adding %s for %s" % (pkgname, depend))
2284 if pkgname not in rreclist and pkgname != pkg:
2285 rreclist[pkgname] = []
2286
2287 #bb.note('setting: RRECOMMENDS:%s=%s' % (pkg, ' '.join(rreclist)))
2288 d.setVar('RRECOMMENDS:%s' % pkg, bb.utils.join_deps(rreclist, commasep=False))
2289
2290 def pkg_addrrecs(pkg, base, suffix, getname, rdepends, d):
2291
2292 #bb.note('rdepends for %s is %s' % (base, rdepends))
2293 rreclist = bb.utils.explode_dep_versions2(d.getVar('RRECOMMENDS:' + pkg) or "")
2294
2295 for depend in sorted(rdepends):
2296 if depend.find('virtual-locale-') != -1:
2297 #bb.note("Skipping %s" % depend)
2298 continue
2299 if depend.endswith('-dev'):
2300 depend = depend[:-4]
2301 if depend.endswith('-dbg'):
2302 depend = depend[:-4]
2303 pkgname = getname(depend, suffix)
2304 #bb.note("Adding %s for %s" % (pkgname, depend))
2305 if pkgname not in rreclist and pkgname != pkg:
2306 rreclist[pkgname] = []
2307
2308 #bb.note('setting: RRECOMMENDS:%s=%s' % (pkg, ' '.join(rreclist)))
2309 d.setVar('RRECOMMENDS:%s' % pkg, bb.utils.join_deps(rreclist, commasep=False))
2310
2311 def add_dep(list, dep):
2312 if dep not in list:
2313 list.append(dep)
2314
2315 depends = []
2316 for dep in bb.utils.explode_deps(d.getVar('DEPENDS') or ""):
2317 add_dep(depends, dep)
2318
2319 rdepends = []
2320 for pkg in packages.split():
2321 for dep in bb.utils.explode_deps(d.getVar('RDEPENDS:' + pkg) or ""):
2322 add_dep(rdepends, dep)
2323
2324 #bb.note('rdepends is %s' % rdepends)
2325
2326 def post_getname(name, suffix):
2327 return '%s%s' % (name, suffix)
2328 def pre_getname(name, suffix):
2329 return '%s%s' % (suffix, name)
2330
2331 pkgs = {}
2332 for pkg in packages.split():
2333 for postfix in postfixes:
2334 if pkg.endswith(postfix):
2335 if not postfix in pkgs:
2336 pkgs[postfix] = {}
2337 pkgs[postfix][pkg] = (pkg[:-len(postfix)], post_getname)
2338
2339 for prefix in prefixes:
2340 if pkg.startswith(prefix):
2341 if not prefix in pkgs:
2342 pkgs[prefix] = {}
2343 pkgs[prefix][pkg] = (pkg[:-len(prefix)], pre_getname)
2344
2345 if "-dbg" in pkgs:
2346 pkglibdeps = read_libdep_files(d)
2347 pkglibdeplist = []
2348 for pkg in pkglibdeps:
2349 for k in pkglibdeps[pkg]:
2350 add_dep(pkglibdeplist, k)
2351 dbgdefaultdeps = ((d.getVar('DEPCHAIN_DBGDEFAULTDEPS') == '1') or (bb.data.inherits_class('packagegroup', d)))
2352
2353 for suffix in pkgs:
2354 for pkg in pkgs[suffix]:
2355 if d.getVarFlag('RRECOMMENDS:' + pkg, 'nodeprrecs'):
2356 continue
2357 (base, func) = pkgs[suffix][pkg]
2358 if suffix == "-dev":
2359 pkg_adddeprrecs(pkg, base, suffix, func, depends, d)
2360 elif suffix == "-dbg":
2361 if not dbgdefaultdeps:
2362 pkg_addrrecs(pkg, base, suffix, func, pkglibdeplist, d)
2363 continue
2364 if len(pkgs[suffix]) == 1:
2365 pkg_addrrecs(pkg, base, suffix, func, rdepends, d)
2366 else:
2367 rdeps = []
2368 for dep in bb.utils.explode_deps(d.getVar('RDEPENDS:' + base) or ""):
2369 add_dep(rdeps, dep)
2370 pkg_addrrecs(pkg, base, suffix, func, rdeps, d)
2371}
2372
2373# Since bitbake can't determine which variables are accessed during package
2374# iteration, we need to list them here:
2375PACKAGEVARS = "FILES RDEPENDS RRECOMMENDS SUMMARY DESCRIPTION RSUGGESTS RPROVIDES RCONFLICTS PKG ALLOW_EMPTY pkg_postinst pkg_postrm pkg_postinst_ontarget INITSCRIPT_NAME INITSCRIPT_PARAMS DEBIAN_NOAUTONAME ALTERNATIVE PKGE PKGV PKGR USERADD_PARAM GROUPADD_PARAM CONFFILES SYSTEMD_SERVICE LICENSE SECTION pkg_preinst pkg_prerm RREPLACES GROUPMEMS_PARAM SYSTEMD_AUTO_ENABLE SKIP_FILEDEPS PRIVATE_LIBS PACKAGE_ADD_METADATA"
2376
2377def gen_packagevar(d, pkgvars="PACKAGEVARS"):
2378 ret = []
2379 pkgs = (d.getVar("PACKAGES") or "").split()
2380 vars = (d.getVar(pkgvars) or "").split()
2381 for v in vars:
2382 ret.append(v)
2383 for p in pkgs:
2384 for v in vars:
2385 ret.append(v + ":" + p)
2386
2387 # Ensure that changes to INCOMPATIBLE_LICENSE re-run do_package for
2388 # affected recipes.
2389 ret.append('_exclude_incompatible-%s' % p)
2390 return " ".join(ret)
2391
2392PACKAGE_PREPROCESS_FUNCS ?= ""
2393# Functions for setting up PKGD
2394PACKAGEBUILDPKGD ?= " \
2395 package_prepare_pkgdata \
2396 perform_packagecopy \
2397 ${PACKAGE_PREPROCESS_FUNCS} \
2398 split_and_strip_files \
2399 fixup_perms \
2400 "
2401# Functions which split PKGD up into separate packages
2402PACKAGESPLITFUNCS ?= " \
2403 package_do_split_locales \
2404 populate_packages"
2405# Functions which process metadata based on split packages
2406PACKAGEFUNCS += " \
2407 package_fixsymlinks \
2408 package_name_hook \
2409 package_do_filedeps \
2410 package_do_shlibs \
2411 package_do_pkgconfig \
2412 read_shlibdeps \
2413 package_depchains \
2414 emit_pkgdata"
2415
2416python do_package () {
2417 # Change the following version to cause sstate to invalidate the package
2418 # cache. This is useful if an item this class depends on changes in a
2419 # way that the output of this class changes. rpmdeps is a good example
2420 # as any change to rpmdeps requires this to be rerun.
2421 # PACKAGE_BBCLASS_VERSION = "4"
2422
2423 # Init cachedpath
2424 global cpath
2425 cpath = oe.cachedpath.CachedPath()
2426
2427 ###########################################################################
2428 # Sanity test the setup
2429 ###########################################################################
2430
2431 packages = (d.getVar('PACKAGES') or "").split()
2432 if len(packages) < 1:
2433 bb.debug(1, "No packages to build, skipping do_package")
2434 return
2435
2436 workdir = d.getVar('WORKDIR')
2437 outdir = d.getVar('DEPLOY_DIR')
2438 dest = d.getVar('D')
2439 dvar = d.getVar('PKGD')
2440 pn = d.getVar('PN')
2441
2442 if not workdir or not outdir or not dest or not dvar or not pn:
2443 msg = "WORKDIR, DEPLOY_DIR, D, PN and PKGD all must be defined, unable to package"
2444 oe.qa.handle_error("var-undefined", msg, d)
2445 return
2446
2447 bb.build.exec_func("package_convert_pr_autoinc", d)
2448
2449 ###########################################################################
2450 # Optimisations
2451 ###########################################################################
2452
2453 # Continually expanding complex expressions is inefficient, particularly
2454 # when we write to the datastore and invalidate the expansion cache. This
2455 # code pre-expands some frequently used variables
2456
2457 def expandVar(x, d):
2458 d.setVar(x, d.getVar(x))
2459
2460 for x in 'PN', 'PV', 'BPN', 'TARGET_SYS', 'EXTENDPRAUTO':
2461 expandVar(x, d)
2462
2463 ###########################################################################
2464 # Setup PKGD (from D)
2465 ###########################################################################
2466
2467 for f in (d.getVar('PACKAGEBUILDPKGD') or '').split():
2468 bb.build.exec_func(f, d)
2469
2470 ###########################################################################
2471 # Split up PKGD into PKGDEST
2472 ###########################################################################
2473
2474 cpath = oe.cachedpath.CachedPath()
2475
2476 for f in (d.getVar('PACKAGESPLITFUNCS') or '').split():
2477 bb.build.exec_func(f, d)
2478
2479 ###########################################################################
2480 # Process PKGDEST
2481 ###########################################################################
2482
2483 # Build global list of files in each split package
2484 global pkgfiles
2485 pkgfiles = {}
2486 packages = d.getVar('PACKAGES').split()
2487 pkgdest = d.getVar('PKGDEST')
2488 for pkg in packages:
2489 pkgfiles[pkg] = []
2490 for walkroot, dirs, files in cpath.walk(pkgdest + "/" + pkg):
2491 for file in files:
2492 pkgfiles[pkg].append(walkroot + os.sep + file)
2493
2494 for f in (d.getVar('PACKAGEFUNCS') or '').split():
2495 bb.build.exec_func(f, d)
2496
2497 oe.qa.exit_if_errors(d)
2498}
2499
2500do_package[dirs] = "${SHLIBSWORKDIR} ${D}"
2501do_package[vardeps] += "${PACKAGEBUILDPKGD} ${PACKAGESPLITFUNCS} ${PACKAGEFUNCS} ${@gen_packagevar(d)}"
2502addtask package after do_install
2503
2504SSTATETASKS += "do_package"
2505do_package[cleandirs] = "${PKGDEST} ${PKGDESTWORK}"
2506do_package[sstate-plaindirs] = "${PKGD} ${PKGDEST} ${PKGDESTWORK}"
2507do_package_setscene[dirs] = "${STAGING_DIR}"
2508
2509python do_package_setscene () {
2510 sstate_setscene(d)
2511}
2512addtask do_package_setscene
2513
2514# Copy from PKGDESTWORK to tempdirectory as tempdirectory can be cleaned at both
2515# do_package_setscene and do_packagedata_setscene leading to races
2516python do_packagedata () {
2517 bb.build.exec_func("package_get_auto_pr", d)
2518
2519 src = d.expand("${PKGDESTWORK}")
2520 dest = d.expand("${WORKDIR}/pkgdata-pdata-input")
2521 oe.path.copyhardlinktree(src, dest)
2522
2523 bb.build.exec_func("packagedata_translate_pr_autoinc", d)
2524}
2525do_packagedata[cleandirs] += "${WORKDIR}/pkgdata-pdata-input"
2526
2527# Translate the EXTENDPRAUTO and AUTOINC to the final values
2528packagedata_translate_pr_autoinc() {
2529 find ${WORKDIR}/pkgdata-pdata-input -type f | xargs --no-run-if-empty \
2530 sed -e 's,@PRSERV_PV_AUTOINC@,${PRSERV_PV_AUTOINC},g' \
2531 -e 's,@EXTENDPRAUTO@,${EXTENDPRAUTO},g' -i
2532}
2533
2534addtask packagedata before do_build after do_package
2535
2536SSTATETASKS += "do_packagedata"
2537do_packagedata[sstate-inputdirs] = "${WORKDIR}/pkgdata-pdata-input"
2538do_packagedata[sstate-outputdirs] = "${PKGDATA_DIR}"
2539do_packagedata[stamp-extra-info] = "${MACHINE_ARCH}"
2540
2541python do_packagedata_setscene () {
2542 sstate_setscene(d)
2543}
2544addtask do_packagedata_setscene
2545
2546#
2547# Helper functions for the package writing classes
2548#
2549
2550def mapping_rename_hook(d):
2551 """
2552 Rewrite variables to account for package renaming in things
2553 like debian.bbclass or manual PKG variable name changes
2554 """
2555 pkg = d.getVar("PKG")
2556 runtime_mapping_rename("RDEPENDS", pkg, d)
2557 runtime_mapping_rename("RRECOMMENDS", pkg, d)
2558 runtime_mapping_rename("RSUGGESTS", pkg, d)
diff --git a/meta/classes/package_deb.bbclass b/meta/classes/package_deb.bbclass
deleted file mode 100644
index ec7e10dbc9..0000000000
--- a/meta/classes/package_deb.bbclass
+++ /dev/null
@@ -1,329 +0,0 @@
1#
2# Copyright 2006-2008 OpenedHand Ltd.
3#
4# SPDX-License-Identifier: MIT
5#
6
7inherit package
8
9IMAGE_PKGTYPE ?= "deb"
10
11DPKG_BUILDCMD ??= "dpkg-deb"
12
13DPKG_ARCH ?= "${@debian_arch_map(d.getVar('TARGET_ARCH'), d.getVar('TUNE_FEATURES'))}"
14DPKG_ARCH[vardepvalue] = "${DPKG_ARCH}"
15
16PKGWRITEDIRDEB = "${WORKDIR}/deploy-debs"
17
18APTCONF_TARGET = "${WORKDIR}"
19
20APT_ARGS = "${@['', '--no-install-recommends'][d.getVar("NO_RECOMMENDATIONS") == "1"]}"
21
22def debian_arch_map(arch, tune):
23 tune_features = tune.split()
24 if arch == "allarch":
25 return "all"
26 if arch in ["i586", "i686"]:
27 return "i386"
28 if arch == "x86_64":
29 if "mx32" in tune_features:
30 return "x32"
31 return "amd64"
32 if arch.startswith("mips"):
33 endian = ["el", ""]["bigendian" in tune_features]
34 if "n64" in tune_features:
35 return "mips64" + endian
36 if "n32" in tune_features:
37 return "mipsn32" + endian
38 return "mips" + endian
39 if arch == "powerpc":
40 return arch + ["", "spe"]["spe" in tune_features]
41 if arch == "aarch64":
42 return "arm64"
43 if arch == "arm":
44 return arch + ["el", "hf"]["callconvention-hard" in tune_features]
45 return arch
46
47python do_package_deb () {
48 packages = d.getVar('PACKAGES')
49 if not packages:
50 bb.debug(1, "PACKAGES not defined, nothing to package")
51 return
52
53 tmpdir = d.getVar('TMPDIR')
54 if os.access(os.path.join(tmpdir, "stamps", "DEB_PACKAGE_INDEX_CLEAN"),os.R_OK):
55 os.unlink(os.path.join(tmpdir, "stamps", "DEB_PACKAGE_INDEX_CLEAN"))
56
57 oe.utils.multiprocess_launch(deb_write_pkg, packages.split(), d, extraargs=(d,))
58}
59do_package_deb[vardeps] += "deb_write_pkg"
60do_package_deb[vardepsexclude] = "BB_NUMBER_THREADS"
61
62def deb_write_pkg(pkg, d):
63 import re, copy
64 import textwrap
65 import subprocess
66 import collections
67 import codecs
68
69 outdir = d.getVar('PKGWRITEDIRDEB')
70 pkgdest = d.getVar('PKGDEST')
71
72 def cleanupcontrol(root):
73 for p in ['CONTROL', 'DEBIAN']:
74 p = os.path.join(root, p)
75 if os.path.exists(p):
76 bb.utils.prunedir(p)
77
78 localdata = bb.data.createCopy(d)
79 root = "%s/%s" % (pkgdest, pkg)
80
81 lf = bb.utils.lockfile(root + ".lock")
82 try:
83
84 localdata.setVar('ROOT', '')
85 localdata.setVar('ROOT_%s' % pkg, root)
86 pkgname = localdata.getVar('PKG:%s' % pkg)
87 if not pkgname:
88 pkgname = pkg
89 localdata.setVar('PKG', pkgname)
90
91 localdata.setVar('OVERRIDES', d.getVar("OVERRIDES", False) + ":" + pkg)
92
93 basedir = os.path.join(os.path.dirname(root))
94
95 pkgoutdir = os.path.join(outdir, localdata.getVar('PACKAGE_ARCH'))
96 bb.utils.mkdirhier(pkgoutdir)
97
98 os.chdir(root)
99 cleanupcontrol(root)
100 from glob import glob
101 g = glob('*')
102 if not g and localdata.getVar('ALLOW_EMPTY', False) != "1":
103 bb.note("Not creating empty archive for %s-%s-%s" % (pkg, localdata.getVar('PKGV'), localdata.getVar('PKGR')))
104 return
105
106 controldir = os.path.join(root, 'DEBIAN')
107 bb.utils.mkdirhier(controldir)
108 os.chmod(controldir, 0o755)
109
110 ctrlfile = codecs.open(os.path.join(controldir, 'control'), 'w', 'utf-8')
111
112 fields = []
113 pe = d.getVar('PKGE')
114 if pe and int(pe) > 0:
115 fields.append(["Version: %s:%s-%s\n", ['PKGE', 'PKGV', 'PKGR']])
116 else:
117 fields.append(["Version: %s-%s\n", ['PKGV', 'PKGR']])
118 fields.append(["Description: %s\n", ['DESCRIPTION']])
119 fields.append(["Section: %s\n", ['SECTION']])
120 fields.append(["Priority: %s\n", ['PRIORITY']])
121 fields.append(["Maintainer: %s\n", ['MAINTAINER']])
122 fields.append(["Architecture: %s\n", ['DPKG_ARCH']])
123 fields.append(["OE: %s\n", ['PN']])
124 fields.append(["PackageArch: %s\n", ['PACKAGE_ARCH']])
125 if d.getVar('HOMEPAGE'):
126 fields.append(["Homepage: %s\n", ['HOMEPAGE']])
127
128 # Package, Version, Maintainer, Description - mandatory
129 # Section, Priority, Essential, Architecture, Source, Depends, Pre-Depends, Recommends, Suggests, Conflicts, Replaces, Provides - Optional
130
131
132 def pullData(l, d):
133 l2 = []
134 for i in l:
135 data = d.getVar(i)
136 if data is None:
137 raise KeyError(i)
138 if i == 'DPKG_ARCH' and d.getVar('PACKAGE_ARCH') == 'all':
139 data = 'all'
140 elif i == 'PACKAGE_ARCH' or i == 'DPKG_ARCH':
141 # The params in deb package control don't allow character
142 # `_', so change the arch's `_' to `-'. Such as `x86_64'
143 # -->`x86-64'
144 data = data.replace('_', '-')
145 l2.append(data)
146 return l2
147
148 ctrlfile.write("Package: %s\n" % pkgname)
149 if d.getVar('PACKAGE_ARCH') == "all":
150 ctrlfile.write("Multi-Arch: foreign\n")
151 # check for required fields
152 for (c, fs) in fields:
153 # Special behavior for description...
154 if 'DESCRIPTION' in fs:
155 summary = localdata.getVar('SUMMARY') or localdata.getVar('DESCRIPTION') or "."
156 ctrlfile.write('Description: %s\n' % summary)
157 description = localdata.getVar('DESCRIPTION') or "."
158 description = textwrap.dedent(description).strip()
159 if '\\n' in description:
160 # Manually indent
161 for t in description.split('\\n'):
162 ctrlfile.write(' %s\n' % (t.strip() or '.'))
163 else:
164 # Auto indent
165 ctrlfile.write('%s\n' % textwrap.fill(description.strip(), width=74, initial_indent=' ', subsequent_indent=' '))
166
167 else:
168 ctrlfile.write(c % tuple(pullData(fs, localdata)))
169
170 # more fields
171
172 custom_fields_chunk = get_package_additional_metadata("deb", localdata)
173 if custom_fields_chunk:
174 ctrlfile.write(custom_fields_chunk)
175 ctrlfile.write("\n")
176
177 mapping_rename_hook(localdata)
178
179 def debian_cmp_remap(var):
180 # dpkg does not allow for '(', ')' or ':' in a dependency name
181 # Replace any instances of them with '__'
182 #
183 # In debian '>' and '<' do not mean what it appears they mean
184 # '<' = less or equal
185 # '>' = greater or equal
186 # adjust these to the '<<' and '>>' equivalents
187 # Also, "=" specifiers only work if they have the PR in, so 1.2.3 != 1.2.3-r0
188 # so to avoid issues, map this to ">= 1.2.3 << 1.2.3.0"
189 for dep in list(var.keys()):
190 if '(' in dep or '/' in dep:
191 newdep = re.sub(r'[(:)/]', '__', dep)
192 if newdep.startswith("__"):
193 newdep = "A" + newdep
194 if newdep != dep:
195 var[newdep] = var[dep]
196 del var[dep]
197 for dep in var:
198 for i, v in enumerate(var[dep]):
199 if (v or "").startswith("< "):
200 var[dep][i] = var[dep][i].replace("< ", "<< ")
201 elif (v or "").startswith("> "):
202 var[dep][i] = var[dep][i].replace("> ", ">> ")
203 elif (v or "").startswith("= ") and "-r" not in v:
204 ver = var[dep][i].replace("= ", "")
205 var[dep][i] = var[dep][i].replace("= ", ">= ")
206 var[dep].append("<< " + ver + ".0")
207
208 rdepends = bb.utils.explode_dep_versions2(localdata.getVar("RDEPENDS") or "")
209 debian_cmp_remap(rdepends)
210 for dep in list(rdepends.keys()):
211 if dep == pkg:
212 del rdepends[dep]
213 continue
214 if '*' in dep:
215 del rdepends[dep]
216 rrecommends = bb.utils.explode_dep_versions2(localdata.getVar("RRECOMMENDS") or "")
217 debian_cmp_remap(rrecommends)
218 for dep in list(rrecommends.keys()):
219 if '*' in dep:
220 del rrecommends[dep]
221 rsuggests = bb.utils.explode_dep_versions2(localdata.getVar("RSUGGESTS") or "")
222 debian_cmp_remap(rsuggests)
223 # Deliberately drop version information here, not wanted/supported by deb
224 rprovides = dict.fromkeys(bb.utils.explode_dep_versions2(localdata.getVar("RPROVIDES") or ""), [])
225 # Remove file paths if any from rprovides, debian does not support custom providers
226 for key in list(rprovides.keys()):
227 if key.startswith('/'):
228 del rprovides[key]
229 rprovides = collections.OrderedDict(sorted(rprovides.items(), key=lambda x: x[0]))
230 debian_cmp_remap(rprovides)
231 rreplaces = bb.utils.explode_dep_versions2(localdata.getVar("RREPLACES") or "")
232 debian_cmp_remap(rreplaces)
233 rconflicts = bb.utils.explode_dep_versions2(localdata.getVar("RCONFLICTS") or "")
234 debian_cmp_remap(rconflicts)
235 if rdepends:
236 ctrlfile.write("Depends: %s\n" % bb.utils.join_deps(rdepends))
237 if rsuggests:
238 ctrlfile.write("Suggests: %s\n" % bb.utils.join_deps(rsuggests))
239 if rrecommends:
240 ctrlfile.write("Recommends: %s\n" % bb.utils.join_deps(rrecommends))
241 if rprovides:
242 ctrlfile.write("Provides: %s\n" % bb.utils.join_deps(rprovides))
243 if rreplaces:
244 ctrlfile.write("Replaces: %s\n" % bb.utils.join_deps(rreplaces))
245 if rconflicts:
246 ctrlfile.write("Conflicts: %s\n" % bb.utils.join_deps(rconflicts))
247 ctrlfile.close()
248
249 for script in ["preinst", "postinst", "prerm", "postrm"]:
250 scriptvar = localdata.getVar('pkg_%s' % script)
251 if not scriptvar:
252 continue
253 scriptvar = scriptvar.strip()
254 scriptfile = open(os.path.join(controldir, script), 'w')
255
256 if scriptvar.startswith("#!"):
257 pos = scriptvar.find("\n") + 1
258 scriptfile.write(scriptvar[:pos])
259 else:
260 pos = 0
261 scriptfile.write("#!/bin/sh\n")
262
263 # Prevent the prerm/postrm scripts from being run during an upgrade
264 if script in ('prerm', 'postrm'):
265 scriptfile.write('[ "$1" != "upgrade" ] || exit 0\n')
266
267 scriptfile.write(scriptvar[pos:])
268 scriptfile.write('\n')
269 scriptfile.close()
270 os.chmod(os.path.join(controldir, script), 0o755)
271
272 conffiles_str = ' '.join(get_conffiles(pkg, d))
273 if conffiles_str:
274 conffiles = open(os.path.join(controldir, 'conffiles'), 'w')
275 for f in conffiles_str.split():
276 if os.path.exists(oe.path.join(root, f)):
277 conffiles.write('%s\n' % f)
278 conffiles.close()
279
280 os.chdir(basedir)
281 subprocess.check_output("PATH=\"%s\" %s -b %s %s" % (localdata.getVar("PATH"), localdata.getVar("DPKG_BUILDCMD"),
282 root, pkgoutdir),
283 stderr=subprocess.STDOUT,
284 shell=True)
285
286 finally:
287 cleanupcontrol(root)
288 bb.utils.unlockfile(lf)
289
290# Otherwise allarch packages may change depending on override configuration
291deb_write_pkg[vardepsexclude] = "OVERRIDES"
292
293# Have to list any variables referenced as X_<pkg> that aren't in pkgdata here
294DEBEXTRAVARS = "PKGV PKGR PKGV DESCRIPTION SECTION PRIORITY MAINTAINER DPKG_ARCH PN HOMEPAGE PACKAGE_ADD_METADATA_DEB"
295do_package_write_deb[vardeps] += "${@gen_packagevar(d, 'DEBEXTRAVARS')}"
296
297SSTATETASKS += "do_package_write_deb"
298do_package_write_deb[sstate-inputdirs] = "${PKGWRITEDIRDEB}"
299do_package_write_deb[sstate-outputdirs] = "${DEPLOY_DIR_DEB}"
300
301python do_package_write_deb_setscene () {
302 tmpdir = d.getVar('TMPDIR')
303
304 if os.access(os.path.join(tmpdir, "stamps", "DEB_PACKAGE_INDEX_CLEAN"),os.R_OK):
305 os.unlink(os.path.join(tmpdir, "stamps", "DEB_PACKAGE_INDEX_CLEAN"))
306
307 sstate_setscene(d)
308}
309addtask do_package_write_deb_setscene
310
311python () {
312 if d.getVar('PACKAGES') != '':
313 deps = ' dpkg-native:do_populate_sysroot virtual/fakeroot-native:do_populate_sysroot'
314 d.appendVarFlag('do_package_write_deb', 'depends', deps)
315 d.setVarFlag('do_package_write_deb', 'fakeroot', "1")
316}
317
318python do_package_write_deb () {
319 bb.build.exec_func("read_subpackage_metadata", d)
320 bb.build.exec_func("do_package_deb", d)
321}
322do_package_write_deb[dirs] = "${PKGWRITEDIRDEB}"
323do_package_write_deb[cleandirs] = "${PKGWRITEDIRDEB}"
324do_package_write_deb[depends] += "${@oe.utils.build_depends_string(d.getVar('PACKAGE_WRITE_DEPS'), 'do_populate_sysroot')}"
325addtask package_write_deb after do_packagedata do_package do_deploy_source_date_epoch before do_build
326do_build[rdeptask] += "do_package_write_deb"
327
328PACKAGEINDEXDEPS += "dpkg-native:do_populate_sysroot"
329PACKAGEINDEXDEPS += "apt-native:do_populate_sysroot"
diff --git a/meta/classes/package_ipk.bbclass b/meta/classes/package_ipk.bbclass
deleted file mode 100644
index c43592af7e..0000000000
--- a/meta/classes/package_ipk.bbclass
+++ /dev/null
@@ -1,292 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7inherit package
8
9IMAGE_PKGTYPE ?= "ipk"
10
11IPKGCONF_TARGET = "${WORKDIR}/opkg.conf"
12IPKGCONF_SDK = "${WORKDIR}/opkg-sdk.conf"
13IPKGCONF_SDK_TARGET = "${WORKDIR}/opkg-sdk-target.conf"
14
15PKGWRITEDIRIPK = "${WORKDIR}/deploy-ipks"
16
17# Program to be used to build opkg packages
18OPKGBUILDCMD ??= 'opkg-build -Z xz -a "${XZ_DEFAULTS}"'
19
20OPKG_ARGS += "--force_postinstall --prefer-arch-to-version"
21OPKG_ARGS += "${@['', '--no-install-recommends'][d.getVar("NO_RECOMMENDATIONS") == "1"]}"
22OPKG_ARGS += "${@['', '--add-exclude ' + ' --add-exclude '.join((d.getVar('PACKAGE_EXCLUDE') or "").split())][(d.getVar("PACKAGE_EXCLUDE") or "").strip() != ""]}"
23
24OPKGLIBDIR ??= "${localstatedir}/lib"
25
26python do_package_ipk () {
27 workdir = d.getVar('WORKDIR')
28 outdir = d.getVar('PKGWRITEDIRIPK')
29 tmpdir = d.getVar('TMPDIR')
30 pkgdest = d.getVar('PKGDEST')
31 if not workdir or not outdir or not tmpdir:
32 bb.error("Variables incorrectly set, unable to package")
33 return
34
35 packages = d.getVar('PACKAGES')
36 if not packages or packages == '':
37 bb.debug(1, "No packages; nothing to do")
38 return
39
40 # We're about to add new packages so the index needs to be checked
41 # so remove the appropriate stamp file.
42 if os.access(os.path.join(tmpdir, "stamps", "IPK_PACKAGE_INDEX_CLEAN"), os.R_OK):
43 os.unlink(os.path.join(tmpdir, "stamps", "IPK_PACKAGE_INDEX_CLEAN"))
44
45 oe.utils.multiprocess_launch(ipk_write_pkg, packages.split(), d, extraargs=(d,))
46}
47do_package_ipk[vardeps] += "ipk_write_pkg"
48do_package_ipk[vardepsexclude] = "BB_NUMBER_THREADS"
49
50def ipk_write_pkg(pkg, d):
51 import re, copy
52 import subprocess
53 import textwrap
54 import collections
55 import glob
56
57 def cleanupcontrol(root):
58 for p in ['CONTROL', 'DEBIAN']:
59 p = os.path.join(root, p)
60 if os.path.exists(p):
61 bb.utils.prunedir(p)
62
63 outdir = d.getVar('PKGWRITEDIRIPK')
64 pkgdest = d.getVar('PKGDEST')
65 recipesource = os.path.basename(d.getVar('FILE'))
66
67 localdata = bb.data.createCopy(d)
68 root = "%s/%s" % (pkgdest, pkg)
69
70 lf = bb.utils.lockfile(root + ".lock")
71 try:
72 localdata.setVar('ROOT', '')
73 localdata.setVar('ROOT_%s' % pkg, root)
74 pkgname = localdata.getVar('PKG:%s' % pkg)
75 if not pkgname:
76 pkgname = pkg
77 localdata.setVar('PKG', pkgname)
78
79 localdata.setVar('OVERRIDES', d.getVar("OVERRIDES", False) + ":" + pkg)
80
81 basedir = os.path.join(os.path.dirname(root))
82 arch = localdata.getVar('PACKAGE_ARCH')
83
84 if localdata.getVar('IPK_HIERARCHICAL_FEED', False) == "1":
85 # Spread packages across subdirectories so each isn't too crowded
86 if pkgname.startswith('lib'):
87 pkg_prefix = 'lib' + pkgname[3]
88 else:
89 pkg_prefix = pkgname[0]
90
91 # Keep -dbg, -dev, -doc, -staticdev, -locale and -locale-* packages
92 # together. These package suffixes are taken from the definitions of
93 # PACKAGES and PACKAGES_DYNAMIC in meta/conf/bitbake.conf
94 if pkgname[-4:] in ('-dbg', '-dev', '-doc'):
95 pkg_subdir = pkgname[:-4]
96 elif pkgname.endswith('-staticdev'):
97 pkg_subdir = pkgname[:-10]
98 elif pkgname.endswith('-locale'):
99 pkg_subdir = pkgname[:-7]
100 elif '-locale-' in pkgname:
101 pkg_subdir = pkgname[:pkgname.find('-locale-')]
102 else:
103 pkg_subdir = pkgname
104
105 pkgoutdir = "%s/%s/%s/%s" % (outdir, arch, pkg_prefix, pkg_subdir)
106 else:
107 pkgoutdir = "%s/%s" % (outdir, arch)
108
109 bb.utils.mkdirhier(pkgoutdir)
110 os.chdir(root)
111 cleanupcontrol(root)
112 g = glob.glob('*')
113 if not g and localdata.getVar('ALLOW_EMPTY', False) != "1":
114 bb.note("Not creating empty archive for %s-%s-%s" % (pkg, localdata.getVar('PKGV'), localdata.getVar('PKGR')))
115 return
116
117 controldir = os.path.join(root, 'CONTROL')
118 bb.utils.mkdirhier(controldir)
119 ctrlfile = open(os.path.join(controldir, 'control'), 'w')
120
121 fields = []
122 pe = d.getVar('PKGE')
123 if pe and int(pe) > 0:
124 fields.append(["Version: %s:%s-%s\n", ['PKGE', 'PKGV', 'PKGR']])
125 else:
126 fields.append(["Version: %s-%s\n", ['PKGV', 'PKGR']])
127 fields.append(["Description: %s\n", ['DESCRIPTION']])
128 fields.append(["Section: %s\n", ['SECTION']])
129 fields.append(["Priority: %s\n", ['PRIORITY']])
130 fields.append(["Maintainer: %s\n", ['MAINTAINER']])
131 fields.append(["License: %s\n", ['LICENSE']])
132 fields.append(["Architecture: %s\n", ['PACKAGE_ARCH']])
133 fields.append(["OE: %s\n", ['PN']])
134 if d.getVar('HOMEPAGE'):
135 fields.append(["Homepage: %s\n", ['HOMEPAGE']])
136
137 def pullData(l, d):
138 l2 = []
139 for i in l:
140 l2.append(d.getVar(i))
141 return l2
142
143 ctrlfile.write("Package: %s\n" % pkgname)
144 # check for required fields
145 for (c, fs) in fields:
146 for f in fs:
147 if localdata.getVar(f, False) is None:
148 raise KeyError(f)
149 # Special behavior for description...
150 if 'DESCRIPTION' in fs:
151 summary = localdata.getVar('SUMMARY') or localdata.getVar('DESCRIPTION') or "."
152 ctrlfile.write('Description: %s\n' % summary)
153 description = localdata.getVar('DESCRIPTION') or "."
154 description = textwrap.dedent(description).strip()
155 if '\\n' in description:
156 # Manually indent: multiline description includes a leading space
157 for t in description.split('\\n'):
158 ctrlfile.write(' %s\n' % (t.strip() or ' .'))
159 else:
160 # Auto indent
161 ctrlfile.write('%s\n' % textwrap.fill(description, width=74, initial_indent=' ', subsequent_indent=' '))
162 else:
163 ctrlfile.write(c % tuple(pullData(fs, localdata)))
164
165 custom_fields_chunk = get_package_additional_metadata("ipk", localdata)
166 if custom_fields_chunk is not None:
167 ctrlfile.write(custom_fields_chunk)
168 ctrlfile.write("\n")
169
170 mapping_rename_hook(localdata)
171
172 def debian_cmp_remap(var):
173 # In debian '>' and '<' do not mean what it appears they mean
174 # '<' = less or equal
175 # '>' = greater or equal
176 # adjust these to the '<<' and '>>' equivalents
177 # Also, "=" specifiers only work if they have the PR in, so 1.2.3 != 1.2.3-r0
178 # so to avoid issues, map this to ">= 1.2.3 << 1.2.3.0"
179 for dep in var:
180 for i, v in enumerate(var[dep]):
181 if (v or "").startswith("< "):
182 var[dep][i] = var[dep][i].replace("< ", "<< ")
183 elif (v or "").startswith("> "):
184 var[dep][i] = var[dep][i].replace("> ", ">> ")
185 elif (v or "").startswith("= ") and "-r" not in v:
186 ver = var[dep][i].replace("= ", "")
187 var[dep][i] = var[dep][i].replace("= ", ">= ")
188 var[dep].append("<< " + ver + ".0")
189
190 rdepends = bb.utils.explode_dep_versions2(localdata.getVar("RDEPENDS") or "")
191 debian_cmp_remap(rdepends)
192 rrecommends = bb.utils.explode_dep_versions2(localdata.getVar("RRECOMMENDS") or "")
193 debian_cmp_remap(rrecommends)
194 rsuggests = bb.utils.explode_dep_versions2(localdata.getVar("RSUGGESTS") or "")
195 debian_cmp_remap(rsuggests)
196 # Deliberately drop version information here, not wanted/supported by ipk
197 rprovides = dict.fromkeys(bb.utils.explode_dep_versions2(localdata.getVar("RPROVIDES") or ""), [])
198 rprovides = collections.OrderedDict(sorted(rprovides.items(), key=lambda x: x[0]))
199 debian_cmp_remap(rprovides)
200 rreplaces = bb.utils.explode_dep_versions2(localdata.getVar("RREPLACES") or "")
201 debian_cmp_remap(rreplaces)
202 rconflicts = bb.utils.explode_dep_versions2(localdata.getVar("RCONFLICTS") or "")
203 debian_cmp_remap(rconflicts)
204
205 if rdepends:
206 ctrlfile.write("Depends: %s\n" % bb.utils.join_deps(rdepends))
207 if rsuggests:
208 ctrlfile.write("Suggests: %s\n" % bb.utils.join_deps(rsuggests))
209 if rrecommends:
210 ctrlfile.write("Recommends: %s\n" % bb.utils.join_deps(rrecommends))
211 if rprovides:
212 ctrlfile.write("Provides: %s\n" % bb.utils.join_deps(rprovides))
213 if rreplaces:
214 ctrlfile.write("Replaces: %s\n" % bb.utils.join_deps(rreplaces))
215 if rconflicts:
216 ctrlfile.write("Conflicts: %s\n" % bb.utils.join_deps(rconflicts))
217 ctrlfile.write("Source: %s\n" % recipesource)
218 ctrlfile.close()
219
220 for script in ["preinst", "postinst", "prerm", "postrm"]:
221 scriptvar = localdata.getVar('pkg_%s' % script)
222 if not scriptvar:
223 continue
224 scriptfile = open(os.path.join(controldir, script), 'w')
225 scriptfile.write(scriptvar)
226 scriptfile.close()
227 os.chmod(os.path.join(controldir, script), 0o755)
228
229 conffiles_str = ' '.join(get_conffiles(pkg, d))
230 if conffiles_str:
231 conffiles = open(os.path.join(controldir, 'conffiles'), 'w')
232 for f in conffiles_str.split():
233 if os.path.exists(oe.path.join(root, f)):
234 conffiles.write('%s\n' % f)
235 conffiles.close()
236
237 os.chdir(basedir)
238 subprocess.check_output("PATH=\"%s\" %s %s %s" % (localdata.getVar("PATH"),
239 d.getVar("OPKGBUILDCMD"), pkg, pkgoutdir),
240 stderr=subprocess.STDOUT,
241 shell=True)
242
243 if d.getVar('IPK_SIGN_PACKAGES') == '1':
244 ipkver = "%s-%s" % (localdata.getVar('PKGV'), localdata.getVar('PKGR'))
245 ipk_to_sign = "%s/%s_%s_%s.ipk" % (pkgoutdir, pkgname, ipkver, localdata.getVar('PACKAGE_ARCH'))
246 sign_ipk(d, ipk_to_sign)
247
248 finally:
249 cleanupcontrol(root)
250 bb.utils.unlockfile(lf)
251
252# Have to list any variables referenced as X_<pkg> that aren't in pkgdata here
253IPKEXTRAVARS = "PRIORITY MAINTAINER PACKAGE_ARCH HOMEPAGE PACKAGE_ADD_METADATA_IPK"
254ipk_write_pkg[vardeps] += "${@gen_packagevar(d, 'IPKEXTRAVARS')}"
255
256# Otherwise allarch packages may change depending on override configuration
257ipk_write_pkg[vardepsexclude] = "OVERRIDES"
258
259
260SSTATETASKS += "do_package_write_ipk"
261do_package_write_ipk[sstate-inputdirs] = "${PKGWRITEDIRIPK}"
262do_package_write_ipk[sstate-outputdirs] = "${DEPLOY_DIR_IPK}"
263
264python do_package_write_ipk_setscene () {
265 tmpdir = d.getVar('TMPDIR')
266
267 if os.access(os.path.join(tmpdir, "stamps", "IPK_PACKAGE_INDEX_CLEAN"), os.R_OK):
268 os.unlink(os.path.join(tmpdir, "stamps", "IPK_PACKAGE_INDEX_CLEAN"))
269
270 sstate_setscene(d)
271}
272addtask do_package_write_ipk_setscene
273
274python () {
275 if d.getVar('PACKAGES') != '':
276 deps = ' opkg-utils-native:do_populate_sysroot virtual/fakeroot-native:do_populate_sysroot xz-native:do_populate_sysroot'
277 d.appendVarFlag('do_package_write_ipk', 'depends', deps)
278 d.setVarFlag('do_package_write_ipk', 'fakeroot', "1")
279}
280
281python do_package_write_ipk () {
282 bb.build.exec_func("read_subpackage_metadata", d)
283 bb.build.exec_func("do_package_ipk", d)
284}
285do_package_write_ipk[dirs] = "${PKGWRITEDIRIPK}"
286do_package_write_ipk[cleandirs] = "${PKGWRITEDIRIPK}"
287do_package_write_ipk[depends] += "${@oe.utils.build_depends_string(d.getVar('PACKAGE_WRITE_DEPS'), 'do_populate_sysroot')}"
288addtask package_write_ipk after do_packagedata do_package do_deploy_source_date_epoch before do_build
289do_build[rdeptask] += "do_package_write_ipk"
290
291PACKAGEINDEXDEPS += "opkg-utils-native:do_populate_sysroot"
292PACKAGEINDEXDEPS += "opkg-native:do_populate_sysroot"
diff --git a/meta/classes/package_pkgdata.bbclass b/meta/classes/package_pkgdata.bbclass
deleted file mode 100644
index f653bd9240..0000000000
--- a/meta/classes/package_pkgdata.bbclass
+++ /dev/null
@@ -1,173 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7WORKDIR_PKGDATA = "${WORKDIR}/pkgdata-sysroot"
8
9def package_populate_pkgdata_dir(pkgdatadir, d):
10 import glob
11
12 postinsts = []
13 seendirs = set()
14 stagingdir = d.getVar("PKGDATA_DIR")
15 pkgarchs = ['${MACHINE_ARCH}']
16 pkgarchs = pkgarchs + list(reversed(d.getVar("PACKAGE_EXTRA_ARCHS").split()))
17 pkgarchs.append('allarch')
18
19 bb.utils.mkdirhier(pkgdatadir)
20 for pkgarch in pkgarchs:
21 for manifest in glob.glob(d.expand("${SSTATE_MANIFESTS}/manifest-%s-*.packagedata" % pkgarch)):
22 with open(manifest, "r") as f:
23 for l in f:
24 l = l.strip()
25 dest = l.replace(stagingdir, "")
26 if l.endswith("/"):
27 staging_copydir(l, pkgdatadir, dest, seendirs)
28 continue
29 try:
30 staging_copyfile(l, pkgdatadir, dest, postinsts, seendirs)
31 except FileExistsError:
32 continue
33
34python package_prepare_pkgdata() {
35 import copy
36 import glob
37
38 taskdepdata = d.getVar("BB_TASKDEPDATA", False)
39 mytaskname = d.getVar("BB_RUNTASK")
40 if mytaskname.endswith("_setscene"):
41 mytaskname = mytaskname.replace("_setscene", "")
42 workdir = d.getVar("WORKDIR")
43 pn = d.getVar("PN")
44 stagingdir = d.getVar("PKGDATA_DIR")
45 pkgdatadir = d.getVar("WORKDIR_PKGDATA")
46
47 # Detect bitbake -b usage
48 nodeps = d.getVar("BB_LIMITEDDEPS") or False
49 if nodeps:
50 staging_package_populate_pkgdata_dir(pkgdatadir, d)
51 return
52
53 start = None
54 configuredeps = []
55 for dep in taskdepdata:
56 data = taskdepdata[dep]
57 if data[1] == mytaskname and data[0] == pn:
58 start = dep
59 break
60 if start is None:
61 bb.fatal("Couldn't find ourself in BB_TASKDEPDATA?")
62
63 # We need to figure out which sysroot files we need to expose to this task.
64 # This needs to match what would get restored from sstate, which is controlled
65 # ultimately by calls from bitbake to setscene_depvalid().
66 # That function expects a setscene dependency tree. We build a dependency tree
67 # condensed to inter-sstate task dependencies, similar to that used by setscene
68 # tasks. We can then call into setscene_depvalid() and decide
69 # which dependencies we can "see" and should expose in the recipe specific sysroot.
70 setscenedeps = copy.deepcopy(taskdepdata)
71
72 start = set([start])
73
74 sstatetasks = d.getVar("SSTATETASKS").split()
75 # Add recipe specific tasks referenced by setscene_depvalid()
76 sstatetasks.append("do_stash_locale")
77
78 # If start is an sstate task (like do_package) we need to add in its direct dependencies
79 # else the code below won't recurse into them.
80 for dep in set(start):
81 for dep2 in setscenedeps[dep][3]:
82 start.add(dep2)
83 start.remove(dep)
84
85 # Create collapsed do_populate_sysroot -> do_populate_sysroot tree
86 for dep in taskdepdata:
87 data = setscenedeps[dep]
88 if data[1] not in sstatetasks:
89 for dep2 in setscenedeps:
90 data2 = setscenedeps[dep2]
91 if dep in data2[3]:
92 data2[3].update(setscenedeps[dep][3])
93 data2[3].remove(dep)
94 if dep in start:
95 start.update(setscenedeps[dep][3])
96 start.remove(dep)
97 del setscenedeps[dep]
98
99 # Remove circular references
100 for dep in setscenedeps:
101 if dep in setscenedeps[dep][3]:
102 setscenedeps[dep][3].remove(dep)
103
104 # Direct dependencies should be present and can be depended upon
105 for dep in set(start):
106 if setscenedeps[dep][1] == "do_packagedata":
107 if dep not in configuredeps:
108 configuredeps.append(dep)
109
110 msgbuf = []
111 # Call into setscene_depvalid for each sub-dependency and only copy sysroot files
112 # for ones that would be restored from sstate.
113 done = list(start)
114 next = list(start)
115 while next:
116 new = []
117 for dep in next:
118 data = setscenedeps[dep]
119 for datadep in data[3]:
120 if datadep in done:
121 continue
122 taskdeps = {}
123 taskdeps[dep] = setscenedeps[dep][:2]
124 taskdeps[datadep] = setscenedeps[datadep][:2]
125 retval = setscene_depvalid(datadep, taskdeps, [], d, msgbuf)
126 done.append(datadep)
127 new.append(datadep)
128 if retval:
129 msgbuf.append("Skipping setscene dependency %s" % datadep)
130 continue
131 if datadep not in configuredeps and setscenedeps[datadep][1] == "do_packagedata":
132 configuredeps.append(datadep)
133 msgbuf.append("Adding dependency on %s" % setscenedeps[datadep][0])
134 else:
135 msgbuf.append("Following dependency on %s" % setscenedeps[datadep][0])
136 next = new
137
138 # This logging is too verbose for day to day use sadly
139 #bb.debug(2, "\n".join(msgbuf))
140
141 seendirs = set()
142 postinsts = []
143 multilibs = {}
144 manifests = {}
145
146 msg_adding = []
147
148 for dep in configuredeps:
149 c = setscenedeps[dep][0]
150 msg_adding.append(c)
151
152 manifest, d2 = oe.sstatesig.find_sstate_manifest(c, setscenedeps[dep][2], "packagedata", d, multilibs)
153 destsysroot = pkgdatadir
154
155 if manifest:
156 targetdir = destsysroot
157 with open(manifest, "r") as f:
158 manifests[dep] = manifest
159 for l in f:
160 l = l.strip()
161 dest = targetdir + l.replace(stagingdir, "")
162 if l.endswith("/"):
163 staging_copydir(l, targetdir, dest, seendirs)
164 continue
165 staging_copyfile(l, targetdir, dest, postinsts, seendirs)
166
167 bb.note("Installed into pkgdata-sysroot: %s" % str(msg_adding))
168
169}
170package_prepare_pkgdata[cleandirs] = "${WORKDIR_PKGDATA}"
171package_prepare_pkgdata[vardepsexclude] += "MACHINE_ARCH PACKAGE_EXTRA_ARCHS SDK_ARCH BUILD_ARCH SDK_OS BB_TASKDEPDATA SSTATETASKS"
172
173
diff --git a/meta/classes/package_rpm.bbclass b/meta/classes/package_rpm.bbclass
deleted file mode 100644
index 63c1b077a3..0000000000
--- a/meta/classes/package_rpm.bbclass
+++ /dev/null
@@ -1,761 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7inherit package
8
9IMAGE_PKGTYPE ?= "rpm"
10
11RPM="rpm"
12RPMBUILD="rpmbuild"
13
14PKGWRITEDIRRPM = "${WORKDIR}/deploy-rpms"
15
16# Maintaining the perfile dependencies has singificant overhead when writing the
17# packages. When set, this value merges them for efficiency.
18MERGEPERFILEDEPS = "1"
19
20# Filter dependencies based on a provided function.
21def filter_deps(var, f):
22 import collections
23
24 depends_dict = bb.utils.explode_dep_versions2(var)
25 newdeps_dict = collections.OrderedDict()
26 for dep in depends_dict:
27 if f(dep):
28 newdeps_dict[dep] = depends_dict[dep]
29 return bb.utils.join_deps(newdeps_dict, commasep=False)
30
31# Filter out absolute paths (typically /bin/sh and /usr/bin/env) and any perl
32# dependencies for nativesdk packages.
33def filter_nativesdk_deps(srcname, var):
34 if var and srcname.startswith("nativesdk-"):
35 var = filter_deps(var, lambda dep: not dep.startswith('/') and dep != 'perl' and not dep.startswith('perl('))
36 return var
37
38# Construct per file dependencies file
39def write_rpm_perfiledata(srcname, d):
40 workdir = d.getVar('WORKDIR')
41 packages = d.getVar('PACKAGES')
42 pkgd = d.getVar('PKGD')
43
44 def dump_filerdeps(varname, outfile, d):
45 outfile.write("#!/usr/bin/env python3\n\n")
46 outfile.write("# Dependency table\n")
47 outfile.write('deps = {\n')
48 for pkg in packages.split():
49 dependsflist_key = 'FILE' + varname + 'FLIST' + ":" + pkg
50 dependsflist = (d.getVar(dependsflist_key) or "")
51 for dfile in dependsflist.split():
52 key = "FILE" + varname + ":" + dfile + ":" + pkg
53 deps = filter_nativesdk_deps(srcname, d.getVar(key) or "")
54 depends_dict = bb.utils.explode_dep_versions(deps)
55 file = dfile.replace("@underscore@", "_")
56 file = file.replace("@closebrace@", "]")
57 file = file.replace("@openbrace@", "[")
58 file = file.replace("@tab@", "\t")
59 file = file.replace("@space@", " ")
60 file = file.replace("@at@", "@")
61 outfile.write('"' + pkgd + file + '" : "')
62 for dep in depends_dict:
63 ver = depends_dict[dep]
64 if dep and ver:
65 ver = ver.replace("(","")
66 ver = ver.replace(")","")
67 outfile.write(dep + " " + ver + " ")
68 else:
69 outfile.write(dep + " ")
70 outfile.write('",\n')
71 outfile.write('}\n\n')
72 outfile.write("import sys\n")
73 outfile.write("while 1:\n")
74 outfile.write("\tline = sys.stdin.readline().strip()\n")
75 outfile.write("\tif not line:\n")
76 outfile.write("\t\tsys.exit(0)\n")
77 outfile.write("\tif line in deps:\n")
78 outfile.write("\t\tprint(deps[line] + '\\n')\n")
79
80 # OE-core dependencies a.k.a. RPM requires
81 outdepends = workdir + "/" + srcname + ".requires"
82
83 dependsfile = open(outdepends, 'w')
84
85 dump_filerdeps('RDEPENDS', dependsfile, d)
86
87 dependsfile.close()
88 os.chmod(outdepends, 0o755)
89
90 # OE-core / RPM Provides
91 outprovides = workdir + "/" + srcname + ".provides"
92
93 providesfile = open(outprovides, 'w')
94
95 dump_filerdeps('RPROVIDES', providesfile, d)
96
97 providesfile.close()
98 os.chmod(outprovides, 0o755)
99
100 return (outdepends, outprovides)
101
102
103python write_specfile () {
104 import oe.packagedata
105
106 # append information for logs and patches to %prep
107 def add_prep(d,spec_files_bottom):
108 if d.getVarFlag('ARCHIVER_MODE', 'srpm') == '1' and bb.data.inherits_class('archiver', d):
109 spec_files_bottom.append('%%prep -n %s' % d.getVar('PN') )
110 spec_files_bottom.append('%s' % "echo \"include logs and patches, Please check them in SOURCES\"")
111 spec_files_bottom.append('')
112
113 # append the name of tarball to key word 'SOURCE' in xxx.spec.
114 def tail_source(d):
115 if d.getVarFlag('ARCHIVER_MODE', 'srpm') == '1' and bb.data.inherits_class('archiver', d):
116 ar_outdir = d.getVar('ARCHIVER_OUTDIR')
117 if not os.path.exists(ar_outdir):
118 return
119 source_list = os.listdir(ar_outdir)
120 source_number = 0
121 for source in source_list:
122 # do_deploy_archives may have already run (from sstate) meaning a .src.rpm may already
123 # exist in ARCHIVER_OUTDIR so skip if present.
124 if source.endswith(".src.rpm"):
125 continue
126 # The rpmbuild doesn't need the root permission, but it needs
127 # to know the file's user and group name, the only user and
128 # group in fakeroot is "root" when working in fakeroot.
129 f = os.path.join(ar_outdir, source)
130 os.chown(f, 0, 0)
131 spec_preamble_top.append('Source%s: %s' % (source_number, source))
132 source_number += 1
133
134 # In RPM, dependencies are of the format: pkg <>= Epoch:Version-Release
135 # This format is similar to OE, however there are restrictions on the
136 # characters that can be in a field. In the Version field, "-"
137 # characters are not allowed. "-" is allowed in the Release field.
138 #
139 # We translate the "-" in the version to a "+", by loading the PKGV
140 # from the dependent recipe, replacing the - with a +, and then using
141 # that value to do a replace inside of this recipe's dependencies.
142 # This preserves the "-" separator between the version and release, as
143 # well as any "-" characters inside of the release field.
144 #
145 # All of this has to happen BEFORE the mapping_rename_hook as
146 # after renaming we cannot look up the dependencies in the packagedata
147 # store.
148 def translate_vers(varname, d):
149 depends = d.getVar(varname)
150 if depends:
151 depends_dict = bb.utils.explode_dep_versions2(depends)
152 newdeps_dict = {}
153 for dep in depends_dict:
154 verlist = []
155 for ver in depends_dict[dep]:
156 if '-' in ver:
157 subd = oe.packagedata.read_subpkgdata_dict(dep, d)
158 if 'PKGV' in subd:
159 pv = subd['PV']
160 pkgv = subd['PKGV']
161 reppv = pkgv.replace('-', '+')
162 ver = ver.replace(pv, reppv).replace(pkgv, reppv)
163 if 'PKGR' in subd:
164 # Make sure PKGR rather than PR in ver
165 pr = '-' + subd['PR']
166 pkgr = '-' + subd['PKGR']
167 if pkgr not in ver:
168 ver = ver.replace(pr, pkgr)
169 verlist.append(ver)
170 else:
171 verlist.append(ver)
172 newdeps_dict[dep] = verlist
173 depends = bb.utils.join_deps(newdeps_dict)
174 d.setVar(varname, depends.strip())
175
176 # We need to change the style the dependency from BB to RPM
177 # This needs to happen AFTER the mapping_rename_hook
178 def print_deps(variable, tag, array, d):
179 depends = variable
180 if depends:
181 depends_dict = bb.utils.explode_dep_versions2(depends)
182 for dep in depends_dict:
183 for ver in depends_dict[dep]:
184 ver = ver.replace('(', '')
185 ver = ver.replace(')', '')
186 array.append("%s: %s %s" % (tag, dep, ver))
187 if not len(depends_dict[dep]):
188 array.append("%s: %s" % (tag, dep))
189
190 def walk_files(walkpath, target, conffiles, dirfiles):
191 # We can race against the ipk/deb backends which create CONTROL or DEBIAN directories
192 # when packaging. We just ignore these files which are created in
193 # packages-split/ and not package/
194 # We have the odd situation where the CONTROL/DEBIAN directory can be removed in the middle of
195 # of the walk, the isdir() test would then fail and the walk code would assume its a file
196 # hence we check for the names in files too.
197 for rootpath, dirs, files in os.walk(walkpath):
198 path = rootpath.replace(walkpath, "")
199 if path.endswith("DEBIAN") or path.endswith("CONTROL"):
200 continue
201 path = path.replace("%", "%%%%%%%%")
202 path = path.replace("[", "?")
203 path = path.replace("]", "?")
204
205 # Treat all symlinks to directories as normal files.
206 # os.walk() lists them as directories.
207 def move_to_files(dir):
208 if os.path.islink(os.path.join(rootpath, dir)):
209 files.append(dir)
210 return True
211 else:
212 return False
213 dirs[:] = [dir for dir in dirs if not move_to_files(dir)]
214
215 # Directory handling can happen in two ways, either DIRFILES is not set at all
216 # in which case we fall back to the older behaviour of packages owning all their
217 # directories
218 if dirfiles is None:
219 for dir in dirs:
220 if dir == "CONTROL" or dir == "DEBIAN":
221 continue
222 dir = dir.replace("%", "%%%%%%%%")
223 dir = dir.replace("[", "?")
224 dir = dir.replace("]", "?")
225 # All packages own the directories their files are in...
226 target.append('%dir "' + path + '/' + dir + '"')
227 else:
228 # packages own only empty directories or explict directory.
229 # This will prevent the overlapping of security permission.
230 if path and not files and not dirs:
231 target.append('%dir "' + path + '"')
232 elif path and path in dirfiles:
233 target.append('%dir "' + path + '"')
234
235 for file in files:
236 if file == "CONTROL" or file == "DEBIAN":
237 continue
238 file = file.replace("%", "%%%%%%%%")
239 file = file.replace("[", "?")
240 file = file.replace("]", "?")
241 if conffiles.count(path + '/' + file):
242 target.append('%config "' + path + '/' + file + '"')
243 else:
244 target.append('"' + path + '/' + file + '"')
245
246 # Prevent the prerm/postrm scripts from being run during an upgrade
247 def wrap_uninstall(scriptvar):
248 scr = scriptvar.strip()
249 if scr.startswith("#!"):
250 pos = scr.find("\n") + 1
251 else:
252 pos = 0
253 scr = scr[:pos] + 'if [ "$1" = "0" ] ; then\n' + scr[pos:] + '\nfi'
254 return scr
255
256 def get_perfile(varname, pkg, d):
257 deps = []
258 dependsflist_key = 'FILE' + varname + 'FLIST' + ":" + pkg
259 dependsflist = (d.getVar(dependsflist_key) or "")
260 for dfile in dependsflist.split():
261 key = "FILE" + varname + ":" + dfile + ":" + pkg
262 depends = d.getVar(key)
263 if depends:
264 deps.append(depends)
265 return " ".join(deps)
266
267 def append_description(spec_preamble, text):
268 """
269 Add the description to the spec file.
270 """
271 import textwrap
272 dedent_text = textwrap.dedent(text).strip()
273 # Bitbake saves "\n" as "\\n"
274 if '\\n' in dedent_text:
275 for t in dedent_text.split('\\n'):
276 spec_preamble.append(t.strip())
277 else:
278 spec_preamble.append('%s' % textwrap.fill(dedent_text, width=75))
279
280 packages = d.getVar('PACKAGES')
281 if not packages or packages == '':
282 bb.debug(1, "No packages; nothing to do")
283 return
284
285 pkgdest = d.getVar('PKGDEST')
286 if not pkgdest:
287 bb.fatal("No PKGDEST")
288
289 outspecfile = d.getVar('OUTSPECFILE')
290 if not outspecfile:
291 bb.fatal("No OUTSPECFILE")
292
293 # Construct the SPEC file...
294 srcname = d.getVar('PN')
295 localdata = bb.data.createCopy(d)
296 localdata.setVar('OVERRIDES', d.getVar("OVERRIDES", False) + ":" + srcname)
297 srcsummary = (localdata.getVar('SUMMARY') or localdata.getVar('DESCRIPTION') or ".")
298 srcversion = localdata.getVar('PKGV').replace('-', '+')
299 srcrelease = localdata.getVar('PKGR')
300 srcepoch = (localdata.getVar('PKGE') or "")
301 srclicense = localdata.getVar('LICENSE')
302 srcsection = localdata.getVar('SECTION')
303 srcmaintainer = localdata.getVar('MAINTAINER')
304 srchomepage = localdata.getVar('HOMEPAGE')
305 srcdescription = localdata.getVar('DESCRIPTION') or "."
306 srccustomtagschunk = get_package_additional_metadata("rpm", localdata)
307
308 srcdepends = d.getVar('DEPENDS')
309 srcrdepends = ""
310 srcrrecommends = ""
311 srcrsuggests = ""
312 srcrprovides = ""
313 srcrreplaces = ""
314 srcrconflicts = ""
315 srcrobsoletes = ""
316
317 srcrpreinst = []
318 srcrpostinst = []
319 srcrprerm = []
320 srcrpostrm = []
321
322 spec_preamble_top = []
323 spec_preamble_bottom = []
324
325 spec_scriptlets_top = []
326 spec_scriptlets_bottom = []
327
328 spec_files_top = []
329 spec_files_bottom = []
330
331 perfiledeps = (d.getVar("MERGEPERFILEDEPS") or "0") == "0"
332 extra_pkgdata = (d.getVar("RPM_EXTRA_PKGDATA") or "0") == "1"
333
334 for pkg in packages.split():
335 localdata = bb.data.createCopy(d)
336
337 root = "%s/%s" % (pkgdest, pkg)
338
339 localdata.setVar('ROOT', '')
340 localdata.setVar('ROOT_%s' % pkg, root)
341 pkgname = localdata.getVar('PKG:%s' % pkg)
342 if not pkgname:
343 pkgname = pkg
344 localdata.setVar('PKG', pkgname)
345
346 localdata.setVar('OVERRIDES', d.getVar("OVERRIDES", False) + ":" + pkg)
347
348 conffiles = get_conffiles(pkg, d)
349 dirfiles = localdata.getVar('DIRFILES')
350 if dirfiles is not None:
351 dirfiles = dirfiles.split()
352
353 splitname = pkgname
354
355 splitsummary = (localdata.getVar('SUMMARY') or localdata.getVar('DESCRIPTION') or ".")
356 splitversion = (localdata.getVar('PKGV') or "").replace('-', '+')
357 splitrelease = (localdata.getVar('PKGR') or "")
358 splitepoch = (localdata.getVar('PKGE') or "")
359 splitlicense = (localdata.getVar('LICENSE') or "")
360 splitsection = (localdata.getVar('SECTION') or "")
361 splitdescription = (localdata.getVar('DESCRIPTION') or ".")
362 splitcustomtagschunk = get_package_additional_metadata("rpm", localdata)
363
364 translate_vers('RDEPENDS', localdata)
365 translate_vers('RRECOMMENDS', localdata)
366 translate_vers('RSUGGESTS', localdata)
367 translate_vers('RPROVIDES', localdata)
368 translate_vers('RREPLACES', localdata)
369 translate_vers('RCONFLICTS', localdata)
370
371 # Map the dependencies into their final form
372 mapping_rename_hook(localdata)
373
374 splitrdepends = localdata.getVar('RDEPENDS') or ""
375 splitrrecommends = localdata.getVar('RRECOMMENDS') or ""
376 splitrsuggests = localdata.getVar('RSUGGESTS') or ""
377 splitrprovides = localdata.getVar('RPROVIDES') or ""
378 splitrreplaces = localdata.getVar('RREPLACES') or ""
379 splitrconflicts = localdata.getVar('RCONFLICTS') or ""
380 splitrobsoletes = ""
381
382 splitrpreinst = localdata.getVar('pkg_preinst')
383 splitrpostinst = localdata.getVar('pkg_postinst')
384 splitrprerm = localdata.getVar('pkg_prerm')
385 splitrpostrm = localdata.getVar('pkg_postrm')
386
387
388 if not perfiledeps:
389 # Add in summary of per file dependencies
390 splitrdepends = splitrdepends + " " + get_perfile('RDEPENDS', pkg, d)
391 splitrprovides = splitrprovides + " " + get_perfile('RPROVIDES', pkg, d)
392
393 splitrdepends = filter_nativesdk_deps(srcname, splitrdepends)
394
395 # Gather special src/first package data
396 if srcname == splitname:
397 archiving = d.getVarFlag('ARCHIVER_MODE', 'srpm') == '1' and \
398 bb.data.inherits_class('archiver', d)
399 if archiving and srclicense != splitlicense:
400 bb.warn("The SRPM produced may not have the correct overall source license in the License tag. This is due to the LICENSE for the primary package and SRPM conflicting.")
401
402 srclicense = splitlicense
403 srcrdepends = splitrdepends
404 srcrrecommends = splitrrecommends
405 srcrsuggests = splitrsuggests
406 srcrprovides = splitrprovides
407 srcrreplaces = splitrreplaces
408 srcrconflicts = splitrconflicts
409
410 srcrpreinst = splitrpreinst
411 srcrpostinst = splitrpostinst
412 srcrprerm = splitrprerm
413 srcrpostrm = splitrpostrm
414
415 file_list = []
416 walk_files(root, file_list, conffiles, dirfiles)
417 if not file_list and localdata.getVar('ALLOW_EMPTY', False) != "1":
418 bb.note("Not creating empty RPM package for %s" % splitname)
419 else:
420 spec_files_top.append('%files')
421 if extra_pkgdata:
422 package_rpm_extra_pkgdata(splitname, spec_files_top, localdata)
423 spec_files_top.append('%defattr(-,-,-,-)')
424 if file_list:
425 bb.note("Creating RPM package for %s" % splitname)
426 spec_files_top.extend(file_list)
427 else:
428 bb.note("Creating empty RPM package for %s" % splitname)
429 spec_files_top.append('')
430 continue
431
432 # Process subpackage data
433 spec_preamble_bottom.append('%%package -n %s' % splitname)
434 spec_preamble_bottom.append('Summary: %s' % splitsummary)
435 if srcversion != splitversion:
436 spec_preamble_bottom.append('Version: %s' % splitversion)
437 if srcrelease != splitrelease:
438 spec_preamble_bottom.append('Release: %s' % splitrelease)
439 if srcepoch != splitepoch:
440 spec_preamble_bottom.append('Epoch: %s' % splitepoch)
441 spec_preamble_bottom.append('License: %s' % splitlicense)
442 spec_preamble_bottom.append('Group: %s' % splitsection)
443
444 if srccustomtagschunk != splitcustomtagschunk:
445 spec_preamble_bottom.append(splitcustomtagschunk)
446
447 # Replaces == Obsoletes && Provides
448 robsoletes = bb.utils.explode_dep_versions2(splitrobsoletes)
449 rprovides = bb.utils.explode_dep_versions2(splitrprovides)
450 rreplaces = bb.utils.explode_dep_versions2(splitrreplaces)
451 for dep in rreplaces:
452 if not dep in robsoletes:
453 robsoletes[dep] = rreplaces[dep]
454 if not dep in rprovides:
455 rprovides[dep] = rreplaces[dep]
456 splitrobsoletes = bb.utils.join_deps(robsoletes, commasep=False)
457 splitrprovides = bb.utils.join_deps(rprovides, commasep=False)
458
459 print_deps(splitrdepends, "Requires", spec_preamble_bottom, d)
460 if splitrpreinst:
461 print_deps(splitrdepends, "Requires(pre)", spec_preamble_bottom, d)
462 if splitrpostinst:
463 print_deps(splitrdepends, "Requires(post)", spec_preamble_bottom, d)
464 if splitrprerm:
465 print_deps(splitrdepends, "Requires(preun)", spec_preamble_bottom, d)
466 if splitrpostrm:
467 print_deps(splitrdepends, "Requires(postun)", spec_preamble_bottom, d)
468
469 print_deps(splitrrecommends, "Recommends", spec_preamble_bottom, d)
470 print_deps(splitrsuggests, "Suggests", spec_preamble_bottom, d)
471 print_deps(splitrprovides, "Provides", spec_preamble_bottom, d)
472 print_deps(splitrobsoletes, "Obsoletes", spec_preamble_bottom, d)
473 print_deps(splitrconflicts, "Conflicts", spec_preamble_bottom, d)
474
475 spec_preamble_bottom.append('')
476
477 spec_preamble_bottom.append('%%description -n %s' % splitname)
478 append_description(spec_preamble_bottom, splitdescription)
479
480 spec_preamble_bottom.append('')
481
482 # Now process scriptlets
483 if splitrpreinst:
484 spec_scriptlets_bottom.append('%%pre -n %s' % splitname)
485 spec_scriptlets_bottom.append('# %s - preinst' % splitname)
486 spec_scriptlets_bottom.append(splitrpreinst)
487 spec_scriptlets_bottom.append('')
488 if splitrpostinst:
489 spec_scriptlets_bottom.append('%%post -n %s' % splitname)
490 spec_scriptlets_bottom.append('# %s - postinst' % splitname)
491 spec_scriptlets_bottom.append(splitrpostinst)
492 spec_scriptlets_bottom.append('')
493 if splitrprerm:
494 spec_scriptlets_bottom.append('%%preun -n %s' % splitname)
495 spec_scriptlets_bottom.append('# %s - prerm' % splitname)
496 scriptvar = wrap_uninstall(splitrprerm)
497 spec_scriptlets_bottom.append(scriptvar)
498 spec_scriptlets_bottom.append('')
499 if splitrpostrm:
500 spec_scriptlets_bottom.append('%%postun -n %s' % splitname)
501 spec_scriptlets_bottom.append('# %s - postrm' % splitname)
502 scriptvar = wrap_uninstall(splitrpostrm)
503 spec_scriptlets_bottom.append(scriptvar)
504 spec_scriptlets_bottom.append('')
505
506 # Now process files
507 file_list = []
508 walk_files(root, file_list, conffiles, dirfiles)
509 if not file_list and localdata.getVar('ALLOW_EMPTY', False) != "1":
510 bb.note("Not creating empty RPM package for %s" % splitname)
511 else:
512 spec_files_bottom.append('%%files -n %s' % splitname)
513 if extra_pkgdata:
514 package_rpm_extra_pkgdata(splitname, spec_files_bottom, localdata)
515 spec_files_bottom.append('%defattr(-,-,-,-)')
516 if file_list:
517 bb.note("Creating RPM package for %s" % splitname)
518 spec_files_bottom.extend(file_list)
519 else:
520 bb.note("Creating empty RPM package for %s" % splitname)
521 spec_files_bottom.append('')
522
523 del localdata
524
525 add_prep(d,spec_files_bottom)
526 spec_preamble_top.append('Summary: %s' % srcsummary)
527 spec_preamble_top.append('Name: %s' % srcname)
528 spec_preamble_top.append('Version: %s' % srcversion)
529 spec_preamble_top.append('Release: %s' % srcrelease)
530 if srcepoch and srcepoch.strip() != "":
531 spec_preamble_top.append('Epoch: %s' % srcepoch)
532 spec_preamble_top.append('License: %s' % srclicense)
533 spec_preamble_top.append('Group: %s' % srcsection)
534 spec_preamble_top.append('Packager: %s' % srcmaintainer)
535 if srchomepage:
536 spec_preamble_top.append('URL: %s' % srchomepage)
537 if srccustomtagschunk:
538 spec_preamble_top.append(srccustomtagschunk)
539 tail_source(d)
540
541 # Replaces == Obsoletes && Provides
542 robsoletes = bb.utils.explode_dep_versions2(srcrobsoletes)
543 rprovides = bb.utils.explode_dep_versions2(srcrprovides)
544 rreplaces = bb.utils.explode_dep_versions2(srcrreplaces)
545 for dep in rreplaces:
546 if not dep in robsoletes:
547 robsoletes[dep] = rreplaces[dep]
548 if not dep in rprovides:
549 rprovides[dep] = rreplaces[dep]
550 srcrobsoletes = bb.utils.join_deps(robsoletes, commasep=False)
551 srcrprovides = bb.utils.join_deps(rprovides, commasep=False)
552
553 print_deps(srcdepends, "BuildRequires", spec_preamble_top, d)
554 print_deps(srcrdepends, "Requires", spec_preamble_top, d)
555 if srcrpreinst:
556 print_deps(srcrdepends, "Requires(pre)", spec_preamble_top, d)
557 if srcrpostinst:
558 print_deps(srcrdepends, "Requires(post)", spec_preamble_top, d)
559 if srcrprerm:
560 print_deps(srcrdepends, "Requires(preun)", spec_preamble_top, d)
561 if srcrpostrm:
562 print_deps(srcrdepends, "Requires(postun)", spec_preamble_top, d)
563
564 print_deps(srcrrecommends, "Recommends", spec_preamble_top, d)
565 print_deps(srcrsuggests, "Suggests", spec_preamble_top, d)
566 print_deps(srcrprovides, "Provides", spec_preamble_top, d)
567 print_deps(srcrobsoletes, "Obsoletes", spec_preamble_top, d)
568 print_deps(srcrconflicts, "Conflicts", spec_preamble_top, d)
569
570 spec_preamble_top.append('')
571
572 spec_preamble_top.append('%description')
573 append_description(spec_preamble_top, srcdescription)
574
575 spec_preamble_top.append('')
576
577 if srcrpreinst:
578 spec_scriptlets_top.append('%pre')
579 spec_scriptlets_top.append('# %s - preinst' % srcname)
580 spec_scriptlets_top.append(srcrpreinst)
581 spec_scriptlets_top.append('')
582 if srcrpostinst:
583 spec_scriptlets_top.append('%post')
584 spec_scriptlets_top.append('# %s - postinst' % srcname)
585 spec_scriptlets_top.append(srcrpostinst)
586 spec_scriptlets_top.append('')
587 if srcrprerm:
588 spec_scriptlets_top.append('%preun')
589 spec_scriptlets_top.append('# %s - prerm' % srcname)
590 scriptvar = wrap_uninstall(srcrprerm)
591 spec_scriptlets_top.append(scriptvar)
592 spec_scriptlets_top.append('')
593 if srcrpostrm:
594 spec_scriptlets_top.append('%postun')
595 spec_scriptlets_top.append('# %s - postrm' % srcname)
596 scriptvar = wrap_uninstall(srcrpostrm)
597 spec_scriptlets_top.append(scriptvar)
598 spec_scriptlets_top.append('')
599
600 # Write the SPEC file
601 specfile = open(outspecfile, 'w')
602
603 # RPMSPEC_PREAMBLE is a way to add arbitrary text to the top
604 # of the generated spec file
605 external_preamble = d.getVar("RPMSPEC_PREAMBLE")
606 if external_preamble:
607 specfile.write(external_preamble + "\n")
608
609 for line in spec_preamble_top:
610 specfile.write(line + "\n")
611
612 for line in spec_preamble_bottom:
613 specfile.write(line + "\n")
614
615 for line in spec_scriptlets_top:
616 specfile.write(line + "\n")
617
618 for line in spec_scriptlets_bottom:
619 specfile.write(line + "\n")
620
621 for line in spec_files_top:
622 specfile.write(line + "\n")
623
624 for line in spec_files_bottom:
625 specfile.write(line + "\n")
626
627 specfile.close()
628}
629# Otherwise allarch packages may change depending on override configuration
630write_specfile[vardepsexclude] = "OVERRIDES"
631
632# Have to list any variables referenced as X_<pkg> that aren't in pkgdata here
633RPMEXTRAVARS = "PACKAGE_ADD_METADATA_RPM"
634write_specfile[vardeps] += "${@gen_packagevar(d, 'RPMEXTRAVARS')}"
635
636python do_package_rpm () {
637 workdir = d.getVar('WORKDIR')
638 tmpdir = d.getVar('TMPDIR')
639 pkgd = d.getVar('PKGD')
640 pkgdest = d.getVar('PKGDEST')
641 if not workdir or not pkgd or not tmpdir:
642 bb.error("Variables incorrectly set, unable to package")
643 return
644
645 packages = d.getVar('PACKAGES')
646 if not packages or packages == '':
647 bb.debug(1, "No packages; nothing to do")
648 return
649
650 # Construct the spec file...
651 # If the spec file already exist, and has not been stored into
652 # pseudo's files.db, it maybe cause rpmbuild src.rpm fail,
653 # so remove it before doing rpmbuild src.rpm.
654 srcname = d.getVar('PN')
655 outspecfile = workdir + "/" + srcname + ".spec"
656 if os.path.isfile(outspecfile):
657 os.remove(outspecfile)
658 d.setVar('OUTSPECFILE', outspecfile)
659 bb.build.exec_func('write_specfile', d)
660
661 perfiledeps = (d.getVar("MERGEPERFILEDEPS") or "0") == "0"
662 if perfiledeps:
663 outdepends, outprovides = write_rpm_perfiledata(srcname, d)
664
665 # Setup the rpmbuild arguments...
666 rpmbuild = d.getVar('RPMBUILD')
667 targetsys = d.getVar('TARGET_SYS')
668 targetvendor = d.getVar('HOST_VENDOR')
669
670 # Too many places in dnf stack assume that arch-independent packages are "noarch".
671 # Let's not fight against this.
672 package_arch = (d.getVar('PACKAGE_ARCH') or "").replace("-", "_")
673 if package_arch == "all":
674 package_arch = "noarch"
675
676 sdkpkgsuffix = (d.getVar('SDKPKGSUFFIX') or "nativesdk").replace("-", "_")
677 d.setVar('PACKAGE_ARCH_EXTEND', package_arch)
678 pkgwritedir = d.expand('${PKGWRITEDIRRPM}/${PACKAGE_ARCH_EXTEND}')
679 d.setVar('RPM_PKGWRITEDIR', pkgwritedir)
680 bb.debug(1, 'PKGWRITEDIR: %s' % d.getVar('RPM_PKGWRITEDIR'))
681 pkgarch = d.expand('${PACKAGE_ARCH_EXTEND}${HOST_VENDOR}-linux')
682 bb.utils.mkdirhier(pkgwritedir)
683 os.chmod(pkgwritedir, 0o755)
684
685 cmd = rpmbuild
686 cmd = cmd + " --noclean --nodeps --short-circuit --target " + pkgarch + " --buildroot " + pkgd
687 cmd = cmd + " --define '_topdir " + workdir + "' --define '_rpmdir " + pkgwritedir + "'"
688 cmd = cmd + " --define '_builddir " + d.getVar('B') + "'"
689 cmd = cmd + " --define '_build_name_fmt %%{NAME}-%%{VERSION}-%%{RELEASE}.%%{ARCH}.rpm'"
690 cmd = cmd + " --define '_use_internal_dependency_generator 0'"
691 cmd = cmd + " --define '_binaries_in_noarch_packages_terminate_build 0'"
692 cmd = cmd + " --define '_build_id_links none'"
693 cmd = cmd + " --define '_binary_payload w19T%d.zstdio'" % int(d.getVar("ZSTD_THREADS"))
694 cmd = cmd + " --define '_source_payload w19T%d.zstdio'" % int(d.getVar("ZSTD_THREADS"))
695 cmd = cmd + " --define 'clamp_mtime_to_source_date_epoch 1'"
696 cmd = cmd + " --define 'use_source_date_epoch_as_buildtime 1'"
697 cmd = cmd + " --define '_buildhost reproducible'"
698 cmd = cmd + " --define '__font_provides %{nil}'"
699 if perfiledeps:
700 cmd = cmd + " --define '__find_requires " + outdepends + "'"
701 cmd = cmd + " --define '__find_provides " + outprovides + "'"
702 else:
703 cmd = cmd + " --define '__find_requires %{nil}'"
704 cmd = cmd + " --define '__find_provides %{nil}'"
705 cmd = cmd + " --define '_unpackaged_files_terminate_build 0'"
706 cmd = cmd + " --define 'debug_package %{nil}'"
707 cmd = cmd + " --define '_tmppath " + workdir + "'"
708 if d.getVarFlag('ARCHIVER_MODE', 'srpm') == '1' and bb.data.inherits_class('archiver', d):
709 cmd = cmd + " --define '_sourcedir " + d.getVar('ARCHIVER_OUTDIR') + "'"
710 cmdsrpm = cmd + " --define '_srcrpmdir " + d.getVar('ARCHIVER_RPMOUTDIR') + "'"
711 cmdsrpm = cmdsrpm + " -bs " + outspecfile
712 # Build the .src.rpm
713 d.setVar('SBUILDSPEC', cmdsrpm + "\n")
714 d.setVarFlag('SBUILDSPEC', 'func', '1')
715 bb.build.exec_func('SBUILDSPEC', d)
716 cmd = cmd + " -bb " + outspecfile
717
718 # rpm 4 creates various empty directories in _topdir, let's clean them up
719 cleanupcmd = "rm -rf %s/BUILDROOT %s/SOURCES %s/SPECS %s/SRPMS" % (workdir, workdir, workdir, workdir)
720
721 # Build the rpm package!
722 d.setVar('BUILDSPEC', cmd + "\n" + cleanupcmd + "\n")
723 d.setVarFlag('BUILDSPEC', 'func', '1')
724 bb.build.exec_func('BUILDSPEC', d)
725
726 if d.getVar('RPM_SIGN_PACKAGES') == '1':
727 bb.build.exec_func("sign_rpm", d)
728}
729
730python () {
731 if d.getVar('PACKAGES') != '':
732 deps = ' rpm-native:do_populate_sysroot virtual/fakeroot-native:do_populate_sysroot'
733 d.appendVarFlag('do_package_write_rpm', 'depends', deps)
734 d.setVarFlag('do_package_write_rpm', 'fakeroot', '1')
735}
736
737SSTATETASKS += "do_package_write_rpm"
738do_package_write_rpm[sstate-inputdirs] = "${PKGWRITEDIRRPM}"
739do_package_write_rpm[sstate-outputdirs] = "${DEPLOY_DIR_RPM}"
740# Take a shared lock, we can write multiple packages at the same time...
741# but we need to stop the rootfs/solver from running while we do...
742do_package_write_rpm[sstate-lockfile-shared] += "${DEPLOY_DIR_RPM}/rpm.lock"
743
744python do_package_write_rpm_setscene () {
745 sstate_setscene(d)
746}
747addtask do_package_write_rpm_setscene
748
749python do_package_write_rpm () {
750 bb.build.exec_func("read_subpackage_metadata", d)
751 bb.build.exec_func("do_package_rpm", d)
752}
753
754do_package_write_rpm[dirs] = "${PKGWRITEDIRRPM}"
755do_package_write_rpm[cleandirs] = "${PKGWRITEDIRRPM}"
756do_package_write_rpm[depends] += "${@oe.utils.build_depends_string(d.getVar('PACKAGE_WRITE_DEPS'), 'do_populate_sysroot')}"
757addtask package_write_rpm after do_packagedata do_package do_deploy_source_date_epoch before do_build
758do_build[rdeptask] += "do_package_write_rpm"
759
760PACKAGEINDEXDEPS += "rpm-native:do_populate_sysroot"
761PACKAGEINDEXDEPS += "createrepo-c-native:do_populate_sysroot"
diff --git a/meta/classes/package_tar.bbclass b/meta/classes/package_tar.bbclass
deleted file mode 100644
index de995f9747..0000000000
--- a/meta/classes/package_tar.bbclass
+++ /dev/null
@@ -1,77 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7inherit package
8
9IMAGE_PKGTYPE ?= "tar"
10
11python do_package_tar () {
12 import subprocess
13
14 oldcwd = os.getcwd()
15
16 workdir = d.getVar('WORKDIR')
17 if not workdir:
18 bb.error("WORKDIR not defined, unable to package")
19 return
20
21 outdir = d.getVar('DEPLOY_DIR_TAR')
22 if not outdir:
23 bb.error("DEPLOY_DIR_TAR not defined, unable to package")
24 return
25
26 dvar = d.getVar('D')
27 if not dvar:
28 bb.error("D not defined, unable to package")
29 return
30
31 packages = d.getVar('PACKAGES')
32 if not packages:
33 bb.debug(1, "PACKAGES not defined, nothing to package")
34 return
35
36 pkgdest = d.getVar('PKGDEST')
37
38 bb.utils.mkdirhier(outdir)
39 bb.utils.mkdirhier(dvar)
40
41 for pkg in packages.split():
42 localdata = bb.data.createCopy(d)
43 root = "%s/%s" % (pkgdest, pkg)
44
45 overrides = localdata.getVar('OVERRIDES', False)
46 localdata.setVar('OVERRIDES', '%s:%s' % (overrides, pkg))
47
48 bb.utils.mkdirhier(root)
49 basedir = os.path.dirname(root)
50 tarfn = localdata.expand("${DEPLOY_DIR_TAR}/${PKG}-${PKGV}-${PKGR}.tar.gz")
51 os.chdir(root)
52 dlist = os.listdir(root)
53 if not dlist:
54 bb.note("Not creating empty archive for %s-%s-%s" % (pkg, localdata.getVar('PKGV'), localdata.getVar('PKGR')))
55 continue
56 args = "tar -cz --exclude=CONTROL --exclude=DEBIAN -f".split()
57 ret = subprocess.call(args + [tarfn] + dlist)
58 if ret != 0:
59 bb.error("Creation of tar %s failed." % tarfn)
60
61 os.chdir(oldcwd)
62}
63
64python () {
65 if d.getVar('PACKAGES') != '':
66 deps = ' tar-native:do_populate_sysroot virtual/fakeroot-native:do_populate_sysroot'
67 d.appendVarFlag('do_package_write_tar', 'depends', deps)
68 d.setVarFlag('do_package_write_tar', 'fakeroot', "1")
69}
70
71
72python do_package_write_tar () {
73 bb.build.exec_func("read_subpackage_metadata", d)
74 bb.build.exec_func("do_package_tar", d)
75}
76do_package_write_tar[dirs] = "${D}"
77addtask package_write_tar before do_build after do_packagedata do_package
diff --git a/meta/classes/packagedata.bbclass b/meta/classes/packagedata.bbclass
deleted file mode 100644
index 9f72c01d77..0000000000
--- a/meta/classes/packagedata.bbclass
+++ /dev/null
@@ -1,40 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7python read_subpackage_metadata () {
8 import oe.packagedata
9
10 vars = {
11 "PN" : d.getVar('PN'),
12 "PE" : d.getVar('PE'),
13 "PV" : d.getVar('PV'),
14 "PR" : d.getVar('PR'),
15 }
16
17 data = oe.packagedata.read_pkgdata(vars["PN"], d)
18
19 for key in data.keys():
20 d.setVar(key, data[key])
21
22 for pkg in d.getVar('PACKAGES').split():
23 sdata = oe.packagedata.read_subpkgdata(pkg, d)
24 for key in sdata.keys():
25 if key in vars:
26 if sdata[key] != vars[key]:
27 if key == "PN":
28 bb.fatal("Recipe %s is trying to create package %s which was already written by recipe %s. This will cause corruption, please resolve this and only provide the package from one recipe or the other or only build one of the recipes." % (vars[key], pkg, sdata[key]))
29 bb.fatal("Recipe %s is trying to change %s from '%s' to '%s'. This will cause do_package_write_* failures since the incorrect data will be used and they will be unable to find the right workdir." % (vars["PN"], key, vars[key], sdata[key]))
30 continue
31 #
32 # If we set unsuffixed variables here there is a chance they could clobber override versions
33 # of that variable, e.g. DESCRIPTION could clobber DESCRIPTION:<pkgname>
34 # We therefore don't clobber for the unsuffixed variable versions
35 #
36 if key.endswith(":" + pkg):
37 d.setVar(key, sdata[key])
38 else:
39 d.setVar(key, sdata[key], parsing=True)
40}
diff --git a/meta/classes/packagegroup.bbclass b/meta/classes/packagegroup.bbclass
deleted file mode 100644
index 6f17fc73b0..0000000000
--- a/meta/classes/packagegroup.bbclass
+++ /dev/null
@@ -1,67 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# Class for packagegroup (package group) recipes
8
9# By default, only the packagegroup package itself is in PACKAGES.
10# -dbg and -dev flavours are handled by the anonfunc below.
11# This means that packagegroup recipes used to build multiple packagegroup
12# packages have to modify PACKAGES after inheriting packagegroup.bbclass.
13PACKAGES = "${PN}"
14
15# By default, packagegroup packages do not depend on a certain architecture.
16# Only if dependencies are modified by MACHINE_FEATURES, packages
17# need to be set to MACHINE_ARCH before inheriting packagegroup.bbclass
18PACKAGE_ARCH ?= "all"
19
20# Fully expanded - so it applies the overrides as well
21PACKAGE_ARCH_EXPANDED := "${PACKAGE_ARCH}"
22
23LICENSE ?= "MIT"
24
25inherit ${@oe.utils.ifelse(d.getVar('PACKAGE_ARCH_EXPANDED') == 'all', 'allarch', '')}
26
27# This automatically adds -dbg and -dev flavours of all PACKAGES
28# to the list. Their dependencies (RRECOMMENDS) are handled as usual
29# by package_depchains in a following step.
30# Also mark all packages as ALLOW_EMPTY
31python () {
32 packages = d.getVar('PACKAGES').split()
33 if d.getVar('PACKAGEGROUP_DISABLE_COMPLEMENTARY') != '1':
34 types = ['', '-dbg', '-dev']
35 if bb.utils.contains('DISTRO_FEATURES', 'ptest', True, False, d):
36 types.append('-ptest')
37 packages = [pkg + suffix for pkg in packages
38 for suffix in types]
39 d.setVar('PACKAGES', ' '.join(packages))
40 for pkg in packages:
41 d.setVar('ALLOW_EMPTY:%s' % pkg, '1')
42}
43
44# We don't want to look at shared library dependencies for the
45# dbg packages
46DEPCHAIN_DBGDEFAULTDEPS = "1"
47
48# We only need the packaging tasks - disable the rest
49deltask do_fetch
50deltask do_unpack
51deltask do_patch
52deltask do_configure
53deltask do_compile
54deltask do_install
55deltask do_populate_sysroot
56
57INHIBIT_DEFAULT_DEPS = "1"
58
59python () {
60 if bb.data.inherits_class('nativesdk', d):
61 return
62 initman = d.getVar("VIRTUAL-RUNTIME_init_manager")
63 if initman and initman in ['sysvinit', 'systemd'] and not bb.utils.contains('DISTRO_FEATURES', initman, True, False, d):
64 bb.fatal("Please ensure that your setting of VIRTUAL-RUNTIME_init_manager (%s) matches the entries enabled in DISTRO_FEATURES" % initman)
65}
66
67CVE_PRODUCT = ""
diff --git a/meta/classes/patch.bbclass b/meta/classes/patch.bbclass
deleted file mode 100644
index e3157c7b18..0000000000
--- a/meta/classes/patch.bbclass
+++ /dev/null
@@ -1,171 +0,0 @@
1# Copyright (C) 2006 OpenedHand LTD
2#
3# SPDX-License-Identifier: MIT
4
5# Point to an empty file so any user's custom settings don't break things
6QUILTRCFILE ?= "${STAGING_ETCDIR_NATIVE}/quiltrc"
7
8PATCHDEPENDENCY = "${PATCHTOOL}-native:do_populate_sysroot"
9
10# There is a bug in patch 2.7.3 and earlier where index lines
11# in patches can change file modes when they shouldn't:
12# http://git.savannah.gnu.org/cgit/patch.git/patch/?id=82b800c9552a088a241457948219d25ce0a407a4
13# This leaks into debug sources in particular. Add the dependency
14# to target recipes to avoid this problem until we can rely on 2.7.4 or later.
15PATCHDEPENDENCY:append:class-target = " patch-replacement-native:do_populate_sysroot"
16
17PATCH_GIT_USER_NAME ?= "OpenEmbedded"
18PATCH_GIT_USER_EMAIL ?= "oe.patch@oe"
19
20inherit terminal
21
22python () {
23 if d.getVar('PATCHTOOL') == 'git' and d.getVar('PATCH_COMMIT_FUNCTIONS') == '1':
24 extratasks = bb.build.tasksbetween('do_unpack', 'do_patch', d)
25 try:
26 extratasks.remove('do_unpack')
27 except ValueError:
28 # For some recipes do_unpack doesn't exist, ignore it
29 pass
30
31 d.appendVarFlag('do_patch', 'prefuncs', ' patch_task_patch_prefunc')
32 for task in extratasks:
33 d.appendVarFlag(task, 'postfuncs', ' patch_task_postfunc')
34}
35
36python patch_task_patch_prefunc() {
37 # Prefunc for do_patch
38 srcsubdir = d.getVar('S')
39
40 workdir = os.path.abspath(d.getVar('WORKDIR'))
41 testsrcdir = os.path.abspath(srcsubdir)
42 if (testsrcdir + os.sep).startswith(workdir + os.sep):
43 # Double-check that either workdir or S or some directory in-between is a git repository
44 found = False
45 while testsrcdir != workdir:
46 if os.path.exists(os.path.join(testsrcdir, '.git')):
47 found = True
48 break
49 if testsrcdir == workdir:
50 break
51 testsrcdir = os.path.dirname(testsrcdir)
52 if not found:
53 bb.fatal('PATCHTOOL = "git" set for source tree that is not a git repository. Refusing to continue as that may result in commits being made in your metadata repository.')
54
55 patchdir = os.path.join(srcsubdir, 'patches')
56 if os.path.exists(patchdir):
57 if os.listdir(patchdir):
58 d.setVar('PATCH_HAS_PATCHES_DIR', '1')
59 else:
60 os.rmdir(patchdir)
61}
62
63python patch_task_postfunc() {
64 # Prefunc for task functions between do_unpack and do_patch
65 import oe.patch
66 import shutil
67 func = d.getVar('BB_RUNTASK')
68 srcsubdir = d.getVar('S')
69
70 if os.path.exists(srcsubdir):
71 if func == 'do_patch':
72 haspatches = (d.getVar('PATCH_HAS_PATCHES_DIR') == '1')
73 patchdir = os.path.join(srcsubdir, 'patches')
74 if os.path.exists(patchdir):
75 shutil.rmtree(patchdir)
76 if haspatches:
77 stdout, _ = bb.process.run('git status --porcelain patches', cwd=srcsubdir)
78 if stdout:
79 bb.process.run('git checkout patches', cwd=srcsubdir)
80 stdout, _ = bb.process.run('git status --porcelain .', cwd=srcsubdir)
81 if stdout:
82 useroptions = []
83 oe.patch.GitApplyTree.gitCommandUserOptions(useroptions, d=d)
84 bb.process.run('git add .; git %s commit -a -m "Committing changes from %s\n\n%s"' % (' '.join(useroptions), func, oe.patch.GitApplyTree.ignore_commit_prefix + ' - from %s' % func), cwd=srcsubdir)
85}
86
87def src_patches(d, all=False, expand=True):
88 import oe.patch
89 return oe.patch.src_patches(d, all, expand)
90
91def should_apply(parm, d):
92 """Determine if we should apply the given patch"""
93 import oe.patch
94 return oe.patch.should_apply(parm, d)
95
96should_apply[vardepsexclude] = "DATE SRCDATE"
97
98python patch_do_patch() {
99 import oe.patch
100
101 patchsetmap = {
102 "patch": oe.patch.PatchTree,
103 "quilt": oe.patch.QuiltTree,
104 "git": oe.patch.GitApplyTree,
105 }
106
107 cls = patchsetmap[d.getVar('PATCHTOOL') or 'quilt']
108
109 resolvermap = {
110 "noop": oe.patch.NOOPResolver,
111 "user": oe.patch.UserResolver,
112 }
113
114 rcls = resolvermap[d.getVar('PATCHRESOLVE') or 'user']
115
116 classes = {}
117
118 s = d.getVar('S')
119
120 os.putenv('PATH', d.getVar('PATH'))
121
122 # We must use one TMPDIR per process so that the "patch" processes
123 # don't generate the same temp file name.
124
125 import tempfile
126 process_tmpdir = tempfile.mkdtemp()
127 os.environ['TMPDIR'] = process_tmpdir
128
129 for patch in src_patches(d):
130 _, _, local, _, _, parm = bb.fetch.decodeurl(patch)
131
132 if "patchdir" in parm:
133 patchdir = parm["patchdir"]
134 if not os.path.isabs(patchdir):
135 patchdir = os.path.join(s, patchdir)
136 if not os.path.isdir(patchdir):
137 bb.fatal("Target directory '%s' not found, patchdir '%s' is incorrect in patch file '%s'" %
138 (patchdir, parm["patchdir"], parm['patchname']))
139 else:
140 patchdir = s
141
142 if not patchdir in classes:
143 patchset = cls(patchdir, d)
144 resolver = rcls(patchset, oe_terminal)
145 classes[patchdir] = (patchset, resolver)
146 patchset.Clean()
147 else:
148 patchset, resolver = classes[patchdir]
149
150 bb.note("Applying patch '%s' (%s)" % (parm['patchname'], oe.path.format_display(local, d)))
151 try:
152 patchset.Import({"file":local, "strippath": parm['striplevel']}, True)
153 except Exception as exc:
154 bb.utils.remove(process_tmpdir, True)
155 bb.fatal("Importing patch '%s' with striplevel '%s'\n%s" % (parm['patchname'], parm['striplevel'], repr(exc).replace("\\n", "\n")))
156 try:
157 resolver.Resolve()
158 except bb.BBHandledException as e:
159 bb.utils.remove(process_tmpdir, True)
160 bb.fatal("Applying patch '%s' on target directory '%s'\n%s" % (parm['patchname'], patchdir, repr(e).replace("\\n", "\n")))
161
162 bb.utils.remove(process_tmpdir, True)
163 del os.environ['TMPDIR']
164}
165patch_do_patch[vardepsexclude] = "PATCHRESOLVE"
166
167addtask patch after do_unpack
168do_patch[dirs] = "${WORKDIR}"
169do_patch[depends] = "${PATCHDEPENDENCY}"
170
171EXPORT_FUNCTIONS do_patch
diff --git a/meta/classes/perl-version.bbclass b/meta/classes/perl-version.bbclass
deleted file mode 100644
index 269ac9eb31..0000000000
--- a/meta/classes/perl-version.bbclass
+++ /dev/null
@@ -1,72 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7PERL_OWN_DIR = ""
8
9# Determine the staged version of perl from the perl configuration file
10# Assign vardepvalue, because otherwise signature is changed before and after
11# perl is built (from None to real version in config.sh).
12get_perl_version[vardepvalue] = "${PERL_OWN_DIR}"
13def get_perl_version(d):
14 import re
15 cfg = d.expand('${STAGING_LIBDIR}${PERL_OWN_DIR}/perl5/config.sh')
16 try:
17 f = open(cfg, 'r')
18 except IOError:
19 return None
20 l = f.readlines();
21 f.close();
22 r = re.compile(r"^version='(\d*\.\d*\.\d*)'")
23 for s in l:
24 m = r.match(s)
25 if m:
26 return m.group(1)
27 return None
28
29PERLVERSION := "${@get_perl_version(d)}"
30PERLVERSION[vardepvalue] = ""
31
32
33# Determine the staged arch of perl from the perl configuration file
34# Assign vardepvalue, because otherwise signature is changed before and after
35# perl is built (from None to real version in config.sh).
36def get_perl_arch(d):
37 import re
38 cfg = d.expand('${STAGING_LIBDIR}${PERL_OWN_DIR}/perl5/config.sh')
39 try:
40 f = open(cfg, 'r')
41 except IOError:
42 return None
43 l = f.readlines();
44 f.close();
45 r = re.compile("^archname='([^']*)'")
46 for s in l:
47 m = r.match(s)
48 if m:
49 return m.group(1)
50 return None
51
52PERLARCH := "${@get_perl_arch(d)}"
53PERLARCH[vardepvalue] = ""
54
55# Determine the staged arch of perl-native from the perl configuration file
56# Assign vardepvalue, because otherwise signature is changed before and after
57# perl is built (from None to real version in config.sh).
58def get_perl_hostarch(d):
59 import re
60 cfg = d.expand('${STAGING_LIBDIR_NATIVE}/perl5/config.sh')
61 try:
62 f = open(cfg, 'r')
63 except IOError:
64 return None
65 l = f.readlines();
66 f.close();
67 r = re.compile("^archname='([^']*)'")
68 for s in l:
69 m = r.match(s)
70 if m:
71 return m.group(1)
72 return None
diff --git a/meta/classes/perlnative.bbclass b/meta/classes/perlnative.bbclass
deleted file mode 100644
index d56ec4ae72..0000000000
--- a/meta/classes/perlnative.bbclass
+++ /dev/null
@@ -1,9 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7EXTRANATIVEPATH += "perl-native"
8DEPENDS += "perl-native"
9OECMAKE_PERLNATIVE_DIR = "${STAGING_BINDIR_NATIVE}/perl-native"
diff --git a/meta/classes/pixbufcache.bbclass b/meta/classes/pixbufcache.bbclass
deleted file mode 100644
index 107e38885e..0000000000
--- a/meta/classes/pixbufcache.bbclass
+++ /dev/null
@@ -1,69 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7#
8# This class will generate the proper postinst/postrm scriptlets for pixbuf
9# packages.
10#
11
12DEPENDS:append:class-target = " qemu-native"
13inherit qemu
14
15PIXBUF_PACKAGES ??= "${PN}"
16
17PACKAGE_WRITE_DEPS += "qemu-native gdk-pixbuf-native"
18
19pixbufcache_common() {
20if [ "x$D" != "x" ]; then
21 $INTERCEPT_DIR/postinst_intercept update_pixbuf_cache ${PKG} mlprefix=${MLPREFIX} binprefix=${MLPREFIX} libdir=${libdir} \
22 bindir=${bindir} base_libdir=${base_libdir}
23else
24
25 # Update the pixbuf loaders in case they haven't been registered yet
26 ${libdir}/gdk-pixbuf-2.0/gdk-pixbuf-query-loaders --update-cache
27
28 if [ -x ${bindir}/gtk-update-icon-cache ] && [ -d ${datadir}/icons ]; then
29 for icondir in /usr/share/icons/*; do
30 if [ -d ${icondir} ]; then
31 gtk-update-icon-cache -t -q ${icondir}
32 fi
33 done
34 fi
35fi
36}
37
38python populate_packages:append() {
39 pixbuf_pkgs = d.getVar('PIXBUF_PACKAGES').split()
40
41 for pkg in pixbuf_pkgs:
42 bb.note("adding pixbuf postinst and postrm scripts to %s" % pkg)
43 postinst = d.getVar('pkg_postinst:%s' % pkg) or d.getVar('pkg_postinst')
44 if not postinst:
45 postinst = '#!/bin/sh\n'
46 postinst += d.getVar('pixbufcache_common')
47 d.setVar('pkg_postinst:%s' % pkg, postinst)
48
49 postrm = d.getVar('pkg_postrm:%s' % pkg) or d.getVar('pkg_postrm')
50 if not postrm:
51 postrm = '#!/bin/sh\n'
52 postrm += d.getVar('pixbufcache_common')
53 d.setVar('pkg_postrm:%s' % pkg, postrm)
54}
55
56gdkpixbuf_complete() {
57GDK_PIXBUF_FATAL_LOADER=1 ${STAGING_LIBDIR_NATIVE}/gdk-pixbuf-2.0/gdk-pixbuf-query-loaders --update-cache || exit 1
58}
59
60DEPENDS:append:class-native = " gdk-pixbuf-native"
61SYSROOT_PREPROCESS_FUNCS:append:class-native = " pixbufcache_sstate_postinst"
62
63pixbufcache_sstate_postinst() {
64 mkdir -p ${SYSROOT_DESTDIR}${bindir}
65 dest=${SYSROOT_DESTDIR}${bindir}/postinst-${PN}
66 echo '#!/bin/sh' > $dest
67 echo "${gdkpixbuf_complete}" >> $dest
68 chmod 0755 $dest
69}
diff --git a/meta/classes/pkgconfig.bbclass b/meta/classes/pkgconfig.bbclass
deleted file mode 100644
index 1e1f3824dd..0000000000
--- a/meta/classes/pkgconfig.bbclass
+++ /dev/null
@@ -1,8 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7DEPENDS:prepend = "pkgconfig-native "
8
diff --git a/meta/classes/populate_sdk.bbclass b/meta/classes/populate_sdk.bbclass
deleted file mode 100644
index caeef5d2b2..0000000000
--- a/meta/classes/populate_sdk.bbclass
+++ /dev/null
@@ -1,13 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# The majority of populate_sdk is located in populate_sdk_base
8# This chunk simply facilitates compatibility with SDK only recipes.
9
10inherit populate_sdk_base
11
12addtask populate_sdk after do_install before do_build
13
diff --git a/meta/classes/populate_sdk_base.bbclass b/meta/classes/populate_sdk_base.bbclass
deleted file mode 100644
index 0be108ad98..0000000000
--- a/meta/classes/populate_sdk_base.bbclass
+++ /dev/null
@@ -1,384 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7PACKAGES = ""
8
9inherit image-postinst-intercepts image-artifact-names
10
11# Wildcards specifying complementary packages to install for every package that has been explicitly
12# installed into the rootfs
13COMPLEMENTARY_GLOB[dev-pkgs] = '*-dev'
14COMPLEMENTARY_GLOB[staticdev-pkgs] = '*-staticdev'
15COMPLEMENTARY_GLOB[doc-pkgs] = '*-doc'
16COMPLEMENTARY_GLOB[dbg-pkgs] = '*-dbg'
17COMPLEMENTARY_GLOB[src-pkgs] = '*-src'
18COMPLEMENTARY_GLOB[ptest-pkgs] = '*-ptest'
19COMPLEMENTARY_GLOB[bash-completion-pkgs] = '*-bash-completion'
20
21def complementary_globs(featurevar, d):
22 all_globs = d.getVarFlags('COMPLEMENTARY_GLOB')
23 globs = []
24 features = set((d.getVar(featurevar) or '').split())
25 for name, glob in all_globs.items():
26 if name in features:
27 globs.append(glob)
28 return ' '.join(globs)
29
30SDKIMAGE_FEATURES ??= "dev-pkgs dbg-pkgs src-pkgs ${@bb.utils.contains('DISTRO_FEATURES', 'api-documentation', 'doc-pkgs', '', d)}"
31SDKIMAGE_INSTALL_COMPLEMENTARY = '${@complementary_globs("SDKIMAGE_FEATURES", d)}'
32SDKIMAGE_INSTALL_COMPLEMENTARY[vardeps] += "SDKIMAGE_FEATURES"
33
34PACKAGE_ARCHS:append:task-populate-sdk = " sdk-provides-dummy-target"
35SDK_PACKAGE_ARCHS += "sdk-provides-dummy-${SDKPKGSUFFIX}"
36
37# List of locales to install, or "all" for all of them, or unset for none.
38SDKIMAGE_LINGUAS ?= "all"
39
40inherit rootfs_${IMAGE_PKGTYPE}
41
42SDK_DIR = "${WORKDIR}/sdk"
43SDK_OUTPUT = "${SDK_DIR}/image"
44SDK_DEPLOY = "${DEPLOY_DIR}/sdk"
45
46SDKDEPLOYDIR = "${WORKDIR}/${SDKMACHINE}-deploy-${PN}-populate-sdk"
47
48B:task-populate-sdk = "${SDK_DIR}"
49
50SDKTARGETSYSROOT = "${SDKPATH}/sysroots/${REAL_MULTIMACH_TARGET_SYS}"
51
52SDK_TOOLCHAIN_LANGS ??= ""
53SDK_TOOLCHAIN_LANGS:remove:sdkmingw32 = "rust"
54# libstd-rs doesn't build for mips n32 with compiler constraint errors
55SDK_TOOLCHAIN_LANGS:remove:mipsarchn32 = "rust"
56
57TOOLCHAIN_HOST_TASK ?= " \
58 nativesdk-packagegroup-sdk-host \
59 packagegroup-cross-canadian-${MACHINE} \
60 ${@bb.utils.contains('SDK_TOOLCHAIN_LANGS', 'go', 'packagegroup-go-cross-canadian-${MACHINE}', '', d)} \
61 ${@bb.utils.contains('SDK_TOOLCHAIN_LANGS', 'rust', 'packagegroup-rust-cross-canadian-${MACHINE}', '', d)} \
62"
63TOOLCHAIN_HOST_TASK_ATTEMPTONLY ?= ""
64TOOLCHAIN_TARGET_TASK ?= " \
65 ${@multilib_pkg_extend(d, 'packagegroup-core-standalone-sdk-target')} \
66 ${@bb.utils.contains('SDK_TOOLCHAIN_LANGS', 'go', multilib_pkg_extend(d, 'packagegroup-go-sdk-target'), '', d)} \
67 ${@bb.utils.contains('SDK_TOOLCHAIN_LANGS', 'rust', multilib_pkg_extend(d, 'libstd-rs'), '', d)} \
68 target-sdk-provides-dummy \
69"
70TOOLCHAIN_TARGET_TASK_ATTEMPTONLY ?= ""
71TOOLCHAIN_OUTPUTNAME ?= "${SDK_NAME}-toolchain-${SDK_VERSION}"
72
73# Default archived SDK's suffix
74SDK_ARCHIVE_TYPE ?= "tar.xz"
75SDK_XZ_COMPRESSION_LEVEL ?= "-9"
76SDK_XZ_OPTIONS ?= "${XZ_DEFAULTS} ${SDK_XZ_COMPRESSION_LEVEL}"
77
78# To support different sdk type according to SDK_ARCHIVE_TYPE, now support zip and tar.xz
79python () {
80 if d.getVar('SDK_ARCHIVE_TYPE') == 'zip':
81 d.setVar('SDK_ARCHIVE_DEPENDS', 'zip-native')
82 # SDK_ARCHIVE_CMD used to generate archived sdk ${TOOLCHAIN_OUTPUTNAME}.${SDK_ARCHIVE_TYPE} from input dir ${SDK_OUTPUT}/${SDKPATH} to output dir ${SDKDEPLOYDIR}
83 # recommand to cd into input dir first to avoid archive with buildpath
84 d.setVar('SDK_ARCHIVE_CMD', 'cd ${SDK_OUTPUT}/${SDKPATH}; zip -r -y ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.${SDK_ARCHIVE_TYPE} .')
85 else:
86 d.setVar('SDK_ARCHIVE_DEPENDS', 'xz-native')
87 d.setVar('SDK_ARCHIVE_CMD', 'cd ${SDK_OUTPUT}/${SDKPATH}; tar ${SDKTAROPTS} -cf - . | xz ${SDK_XZ_OPTIONS} > ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.${SDK_ARCHIVE_TYPE}')
88}
89
90SDK_RDEPENDS = "${TOOLCHAIN_TARGET_TASK} ${TOOLCHAIN_HOST_TASK}"
91SDK_DEPENDS = "virtual/fakeroot-native ${SDK_ARCHIVE_DEPENDS} cross-localedef-native nativesdk-qemuwrapper-cross ${@' '.join(["%s-qemuwrapper-cross" % m for m in d.getVar("MULTILIB_VARIANTS").split()])} qemuwrapper-cross"
92PATH:prepend = "${WORKDIR}/recipe-sysroot/${SDKPATHNATIVE}${bindir}/crossscripts:${@":".join(all_multilib_tune_values(d, 'STAGING_BINDIR_CROSS').split())}:"
93SDK_DEPENDS += "nativesdk-glibc-locale"
94
95# We want the MULTIARCH_TARGET_SYS to point to the TUNE_PKGARCH, not PACKAGE_ARCH as it
96# could be set to the MACHINE_ARCH
97REAL_MULTIMACH_TARGET_SYS = "${TUNE_PKGARCH}${TARGET_VENDOR}-${TARGET_OS}"
98
99PID = "${@os.getpid()}"
100
101EXCLUDE_FROM_WORLD = "1"
102
103SDK_PACKAGING_FUNC ?= "create_shar"
104SDK_PRE_INSTALL_COMMAND ?= ""
105SDK_POST_INSTALL_COMMAND ?= ""
106SDK_RELOCATE_AFTER_INSTALL ?= "1"
107
108SDKEXTPATH ??= "~/${@d.getVar('DISTRO')}_sdk"
109SDK_TITLE ??= "${@d.getVar('DISTRO_NAME') or d.getVar('DISTRO')} SDK"
110
111SDK_TARGET_MANIFEST = "${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.target.manifest"
112SDK_HOST_MANIFEST = "${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.host.manifest"
113SDK_EXT_TARGET_MANIFEST = "${SDK_DEPLOY}/${TOOLCHAINEXT_OUTPUTNAME}.target.manifest"
114SDK_EXT_HOST_MANIFEST = "${SDK_DEPLOY}/${TOOLCHAINEXT_OUTPUTNAME}.host.manifest"
115
116SDK_PRUNE_SYSROOT_DIRS ?= "/dev"
117
118python write_target_sdk_manifest () {
119 from oe.sdk import sdk_list_installed_packages
120 from oe.utils import format_pkg_list
121 sdkmanifestdir = os.path.dirname(d.getVar("SDK_TARGET_MANIFEST"))
122 pkgs = sdk_list_installed_packages(d, True)
123 if not os.path.exists(sdkmanifestdir):
124 bb.utils.mkdirhier(sdkmanifestdir)
125 with open(d.getVar('SDK_TARGET_MANIFEST'), 'w') as output:
126 output.write(format_pkg_list(pkgs, 'ver'))
127}
128
129sdk_prune_dirs () {
130 for d in ${SDK_PRUNE_SYSROOT_DIRS}; do
131 rm -rf ${SDK_OUTPUT}${SDKTARGETSYSROOT}$d
132 done
133}
134
135python write_sdk_test_data() {
136 from oe.data import export2json
137 testdata = "%s/%s.testdata.json" % (d.getVar('SDKDEPLOYDIR'), d.getVar('TOOLCHAIN_OUTPUTNAME'))
138 bb.utils.mkdirhier(os.path.dirname(testdata))
139 export2json(d, testdata)
140}
141
142python write_host_sdk_manifest () {
143 from oe.sdk import sdk_list_installed_packages
144 from oe.utils import format_pkg_list
145 sdkmanifestdir = os.path.dirname(d.getVar("SDK_HOST_MANIFEST"))
146 pkgs = sdk_list_installed_packages(d, False)
147 if not os.path.exists(sdkmanifestdir):
148 bb.utils.mkdirhier(sdkmanifestdir)
149 with open(d.getVar('SDK_HOST_MANIFEST'), 'w') as output:
150 output.write(format_pkg_list(pkgs, 'ver'))
151}
152
153POPULATE_SDK_POST_TARGET_COMMAND:append = " write_sdk_test_data ; "
154POPULATE_SDK_POST_TARGET_COMMAND:append:task-populate-sdk = " write_target_sdk_manifest; sdk_prune_dirs; "
155POPULATE_SDK_POST_HOST_COMMAND:append:task-populate-sdk = " write_host_sdk_manifest; "
156
157SDK_PACKAGING_COMMAND = "${@'${SDK_PACKAGING_FUNC};' if '${SDK_PACKAGING_FUNC}' else ''}"
158SDK_POSTPROCESS_COMMAND = " create_sdk_files; check_sdk_sysroots; archive_sdk; ${SDK_PACKAGING_COMMAND} "
159
160def populate_sdk_common(d):
161 from oe.sdk import populate_sdk
162 from oe.manifest import create_manifest, Manifest
163
164 # Handle package exclusions
165 excl_pkgs = (d.getVar("PACKAGE_EXCLUDE") or "").split()
166 inst_pkgs = (d.getVar("PACKAGE_INSTALL") or "").split()
167 inst_attempt_pkgs = (d.getVar("PACKAGE_INSTALL_ATTEMPTONLY") or "").split()
168
169 d.setVar('PACKAGE_INSTALL_ORIG', ' '.join(inst_pkgs))
170 d.setVar('PACKAGE_INSTALL_ATTEMPTONLY', ' '.join(inst_attempt_pkgs))
171
172 for pkg in excl_pkgs:
173 if pkg in inst_pkgs:
174 bb.warn("Package %s, set to be excluded, is in %s PACKAGE_INSTALL (%s). It will be removed from the list." % (pkg, d.getVar('PN'), inst_pkgs))
175 inst_pkgs.remove(pkg)
176
177 if pkg in inst_attempt_pkgs:
178 bb.warn("Package %s, set to be excluded, is in %s PACKAGE_INSTALL_ATTEMPTONLY (%s). It will be removed from the list." % (pkg, d.getVar('PN'), inst_pkgs))
179 inst_attempt_pkgs.remove(pkg)
180
181 d.setVar("PACKAGE_INSTALL", ' '.join(inst_pkgs))
182 d.setVar("PACKAGE_INSTALL_ATTEMPTONLY", ' '.join(inst_attempt_pkgs))
183
184 pn = d.getVar('PN')
185 runtime_mapping_rename("TOOLCHAIN_TARGET_TASK", pn, d)
186 runtime_mapping_rename("TOOLCHAIN_TARGET_TASK_ATTEMPTONLY", pn, d)
187
188 ld = bb.data.createCopy(d)
189 ld.setVar("PKGDATA_DIR", "${STAGING_DIR}/${SDK_ARCH}-${SDKPKGSUFFIX}${SDK_VENDOR}-${SDK_OS}/pkgdata")
190 runtime_mapping_rename("TOOLCHAIN_HOST_TASK", pn, ld)
191 runtime_mapping_rename("TOOLCHAIN_HOST_TASK_ATTEMPTONLY", pn, ld)
192 d.setVar("TOOLCHAIN_HOST_TASK", ld.getVar("TOOLCHAIN_HOST_TASK"))
193 d.setVar("TOOLCHAIN_HOST_TASK_ATTEMPTONLY", ld.getVar("TOOLCHAIN_HOST_TASK_ATTEMPTONLY"))
194
195 # create target/host SDK manifests
196 create_manifest(d, manifest_dir=d.getVar('SDK_DIR'),
197 manifest_type=Manifest.MANIFEST_TYPE_SDK_HOST)
198 create_manifest(d, manifest_dir=d.getVar('SDK_DIR'),
199 manifest_type=Manifest.MANIFEST_TYPE_SDK_TARGET)
200
201 populate_sdk(d)
202
203fakeroot python do_populate_sdk() {
204 populate_sdk_common(d)
205}
206SSTATETASKS += "do_populate_sdk"
207SSTATE_SKIP_CREATION:task-populate-sdk = '1'
208do_populate_sdk[cleandirs] = "${SDKDEPLOYDIR}"
209do_populate_sdk[sstate-inputdirs] = "${SDKDEPLOYDIR}"
210do_populate_sdk[sstate-outputdirs] = "${SDK_DEPLOY}"
211do_populate_sdk[stamp-extra-info] = "${MACHINE_ARCH}${SDKMACHINE}"
212python do_populate_sdk_setscene () {
213 sstate_setscene(d)
214}
215addtask do_populate_sdk_setscene
216
217PSEUDO_IGNORE_PATHS .= ",${SDKDEPLOYDIR},${WORKDIR}/oe-sdk-repo,${WORKDIR}/sstate-build-populate_sdk"
218
219fakeroot create_sdk_files() {
220 cp ${COREBASE}/scripts/relocate_sdk.py ${SDK_OUTPUT}/${SDKPATH}/
221
222 # Replace the ##DEFAULT_INSTALL_DIR## with the correct pattern.
223 # Escape special characters like '+' and '.' in the SDKPATH
224 escaped_sdkpath=$(echo ${SDKPATH} |sed -e "s:[\+\.]:\\\\\\\\\0:g")
225 sed -i -e "s:##DEFAULT_INSTALL_DIR##:$escaped_sdkpath:" ${SDK_OUTPUT}/${SDKPATH}/relocate_sdk.py
226
227 mkdir -p ${SDK_OUTPUT}/${SDKPATHNATIVE}${sysconfdir}/
228 echo '${SDKPATHNATIVE}${libdir_nativesdk}
229${SDKPATHNATIVE}${base_libdir_nativesdk}
230include /etc/ld.so.conf' > ${SDK_OUTPUT}/${SDKPATHNATIVE}${sysconfdir}/ld.so.conf
231}
232
233python check_sdk_sysroots() {
234 # Fails build if there are broken or dangling symlinks in SDK sysroots
235
236 if d.getVar('CHECK_SDK_SYSROOTS') != '1':
237 # disabled, bail out
238 return
239
240 def norm_path(path):
241 return os.path.abspath(path)
242
243 # Get scan root
244 SCAN_ROOT = norm_path("%s/%s/sysroots/" % (d.getVar('SDK_OUTPUT'),
245 d.getVar('SDKPATH')))
246
247 bb.note('Checking SDK sysroots at ' + SCAN_ROOT)
248
249 def check_symlink(linkPath):
250 if not os.path.islink(linkPath):
251 return
252
253 linkDirPath = os.path.dirname(linkPath)
254
255 targetPath = os.readlink(linkPath)
256 if not os.path.isabs(targetPath):
257 targetPath = os.path.join(linkDirPath, targetPath)
258 targetPath = norm_path(targetPath)
259
260 if SCAN_ROOT != os.path.commonprefix( [SCAN_ROOT, targetPath] ):
261 bb.error("Escaping symlink {0!s} --> {1!s}".format(linkPath, targetPath))
262 return
263
264 if not os.path.exists(targetPath):
265 bb.error("Broken symlink {0!s} --> {1!s}".format(linkPath, targetPath))
266 return
267
268 if os.path.isdir(targetPath):
269 dir_walk(targetPath)
270
271 def walk_error_handler(e):
272 bb.error(str(e))
273
274 def dir_walk(rootDir):
275 for dirPath,subDirEntries,fileEntries in os.walk(rootDir, followlinks=False, onerror=walk_error_handler):
276 entries = subDirEntries + fileEntries
277 for e in entries:
278 ePath = os.path.join(dirPath, e)
279 check_symlink(ePath)
280
281 # start
282 dir_walk(SCAN_ROOT)
283}
284
285SDKTAROPTS = "--owner=root --group=root"
286
287fakeroot archive_sdk() {
288 # Package it up
289 mkdir -p ${SDKDEPLOYDIR}
290 ${SDK_ARCHIVE_CMD}
291}
292
293TOOLCHAIN_SHAR_EXT_TMPL ?= "${COREBASE}/meta/files/toolchain-shar-extract.sh"
294TOOLCHAIN_SHAR_REL_TMPL ?= "${COREBASE}/meta/files/toolchain-shar-relocate.sh"
295
296fakeroot create_shar() {
297 # copy in the template shar extractor script
298 cp ${TOOLCHAIN_SHAR_EXT_TMPL} ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.sh
299
300 rm -f ${T}/pre_install_command ${T}/post_install_command
301
302 if [ "${SDK_RELOCATE_AFTER_INSTALL}" = "1" ] ; then
303 cp ${TOOLCHAIN_SHAR_REL_TMPL} ${T}/post_install_command
304 fi
305 cat << "EOF" >> ${T}/pre_install_command
306${SDK_PRE_INSTALL_COMMAND}
307EOF
308
309 cat << "EOF" >> ${T}/post_install_command
310${SDK_POST_INSTALL_COMMAND}
311EOF
312 sed -i -e '/@SDK_PRE_INSTALL_COMMAND@/r ${T}/pre_install_command' \
313 -e '/@SDK_POST_INSTALL_COMMAND@/r ${T}/post_install_command' \
314 ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.sh
315
316 # substitute variables
317 sed -i -e 's#@SDK_ARCH@#${SDK_ARCH}#g' \
318 -e 's#@SDKPATH@#${SDKPATH}#g' \
319 -e 's#@SDKPATHINSTALL@#${SDKPATHINSTALL}#g' \
320 -e 's#@SDKEXTPATH@#${SDKEXTPATH}#g' \
321 -e 's#@OLDEST_KERNEL@#${SDK_OLDEST_KERNEL}#g' \
322 -e 's#@REAL_MULTIMACH_TARGET_SYS@#${REAL_MULTIMACH_TARGET_SYS}#g' \
323 -e 's#@SDK_TITLE@#${@d.getVar("SDK_TITLE").replace('&', '\\&')}#g' \
324 -e 's#@SDK_VERSION@#${SDK_VERSION}#g' \
325 -e '/@SDK_PRE_INSTALL_COMMAND@/d' \
326 -e '/@SDK_POST_INSTALL_COMMAND@/d' \
327 -e 's#@SDK_GCC_VER@#${@oe.utils.host_gcc_version(d, taskcontextonly=True)}#g' \
328 -e 's#@SDK_ARCHIVE_TYPE@#${SDK_ARCHIVE_TYPE}#g' \
329 ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.sh
330
331 # add execution permission
332 chmod +x ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.sh
333
334 # append the SDK tarball
335 cat ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.${SDK_ARCHIVE_TYPE} >> ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.sh
336
337 # delete the old tarball, we don't need it anymore
338 rm ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.${SDK_ARCHIVE_TYPE}
339}
340
341populate_sdk_log_check() {
342 for target in $*
343 do
344 lf_path="`dirname ${BB_LOGFILE}`/log.do_$target.${PID}"
345
346 echo "log_check: Using $lf_path as logfile"
347
348 if [ -e "$lf_path" ]; then
349 ${IMAGE_PKGTYPE}_log_check $target $lf_path
350 else
351 echo "Cannot find logfile [$lf_path]"
352 fi
353 echo "Logfile is clean"
354 done
355}
356
357def sdk_command_variables(d):
358 return ['OPKG_PREPROCESS_COMMANDS','OPKG_POSTPROCESS_COMMANDS','POPULATE_SDK_POST_HOST_COMMAND','POPULATE_SDK_PRE_TARGET_COMMAND','POPULATE_SDK_POST_TARGET_COMMAND','SDK_POSTPROCESS_COMMAND','RPM_PREPROCESS_COMMANDS','RPM_POSTPROCESS_COMMANDS']
359
360def sdk_variables(d):
361 variables = ['BUILD_IMAGES_FROM_FEEDS','SDK_OS','SDK_OUTPUT','SDKPATHNATIVE','SDKTARGETSYSROOT','SDK_DIR','SDK_VENDOR','SDKIMAGE_INSTALL_COMPLEMENTARY','SDK_PACKAGE_ARCHS','SDK_OUTPUT',
362 'SDKTARGETSYSROOT','MULTILIB_VARIANTS','MULTILIBS','ALL_MULTILIB_PACKAGE_ARCHS','MULTILIB_GLOBAL_VARIANTS','BAD_RECOMMENDATIONS','NO_RECOMMENDATIONS','PACKAGE_ARCHS',
363 'PACKAGE_CLASSES','TARGET_VENDOR','TARGET_VENDOR','TARGET_ARCH','TARGET_OS','BBEXTENDVARIANT','FEED_DEPLOYDIR_BASE_URI', 'PACKAGE_EXCLUDE_COMPLEMENTARY', 'IMAGE_INSTALL_DEBUGFS']
364 variables.extend(sdk_command_variables(d))
365 return " ".join(variables)
366
367do_populate_sdk[vardeps] += "${@sdk_variables(d)}"
368
369python () {
370 variables = sdk_command_variables(d)
371 for var in variables:
372 if d.getVar(var, False):
373 d.setVarFlag(var, 'func', '1')
374}
375
376do_populate_sdk[file-checksums] += "${TOOLCHAIN_SHAR_REL_TMPL}:True \
377 ${TOOLCHAIN_SHAR_EXT_TMPL}:True"
378
379do_populate_sdk[dirs] = "${PKGDATA_DIR} ${TOPDIR}"
380do_populate_sdk[depends] += "${@' '.join([x + ':do_populate_sysroot' for x in d.getVar('SDK_DEPENDS').split()])} ${@d.getVarFlag('do_rootfs', 'depends', False)}"
381do_populate_sdk[rdepends] = "${@' '.join([x + ':do_package_write_${IMAGE_PKGTYPE} ' + x + ':do_packagedata' for x in d.getVar('SDK_RDEPENDS').split()])}"
382do_populate_sdk[recrdeptask] += "do_packagedata do_package_write_rpm do_package_write_ipk do_package_write_deb"
383do_populate_sdk[file-checksums] += "${POSTINST_INTERCEPT_CHECKSUMS}"
384addtask populate_sdk
diff --git a/meta/classes/populate_sdk_ext.bbclass b/meta/classes/populate_sdk_ext.bbclass
deleted file mode 100644
index 56e24c4eed..0000000000
--- a/meta/classes/populate_sdk_ext.bbclass
+++ /dev/null
@@ -1,842 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# Extensible SDK
8
9inherit populate_sdk_base
10
11# Used to override TOOLCHAIN_HOST_TASK in the eSDK case
12TOOLCHAIN_HOST_TASK_ESDK = " \
13 meta-environment-extsdk-${MACHINE} \
14 "
15
16SDK_RELOCATE_AFTER_INSTALL:task-populate-sdk-ext = "0"
17
18SDK_EXT = ""
19SDK_EXT:task-populate-sdk-ext = "-ext"
20
21# Options are full or minimal
22SDK_EXT_TYPE ?= "full"
23SDK_INCLUDE_PKGDATA ?= "0"
24SDK_INCLUDE_TOOLCHAIN ?= "${@'1' if d.getVar('SDK_EXT_TYPE') == 'full' else '0'}"
25SDK_INCLUDE_NATIVESDK ?= "0"
26SDK_INCLUDE_BUILDTOOLS ?= '1'
27
28SDK_RECRDEP_TASKS ?= ""
29SDK_CUSTOM_TEMPLATECONF ?= "0"
30
31ESDK_LOCALCONF_ALLOW ?= ""
32ESDK_LOCALCONF_REMOVE ?= "CONF_VERSION \
33 BB_NUMBER_THREADS \
34 BB_NUMBER_PARSE_THREADS \
35 PARALLEL_MAKE \
36 PRSERV_HOST \
37 SSTATE_MIRRORS \
38 DL_DIR \
39 SSTATE_DIR \
40 TMPDIR \
41 BB_SERVER_TIMEOUT \
42 "
43ESDK_CLASS_INHERIT_DISABLE ?= "buildhistory icecc"
44SDK_UPDATE_URL ?= ""
45
46SDK_TARGETS ?= "${PN}"
47
48def get_sdk_install_targets(d, images_only=False):
49 sdk_install_targets = ''
50 if images_only or d.getVar('SDK_EXT_TYPE') != 'minimal':
51 sdk_install_targets = d.getVar('SDK_TARGETS')
52
53 depd = d.getVar('BB_TASKDEPDATA', False)
54 tasklist = bb.build.tasksbetween('do_image_complete', 'do_build', d)
55 tasklist.remove('do_build')
56 for v in depd.values():
57 if v[1] in tasklist:
58 if v[0] not in sdk_install_targets:
59 sdk_install_targets += ' {}'.format(v[0])
60
61 if not images_only:
62 if d.getVar('SDK_INCLUDE_PKGDATA') == '1':
63 sdk_install_targets += ' meta-world-pkgdata:do_allpackagedata'
64 if d.getVar('SDK_INCLUDE_TOOLCHAIN') == '1':
65 sdk_install_targets += ' meta-extsdk-toolchain:do_populate_sysroot'
66
67 return sdk_install_targets
68
69get_sdk_install_targets[vardepsexclude] = "BB_TASKDEPDATA"
70
71OE_INIT_ENV_SCRIPT ?= "oe-init-build-env"
72
73# The files from COREBASE that you want preserved in the COREBASE copied
74# into the sdk. This allows someone to have their own setup scripts in
75# COREBASE be preserved as well as untracked files.
76COREBASE_FILES ?= " \
77 oe-init-build-env \
78 scripts \
79 LICENSE \
80 .templateconf \
81"
82
83SDK_DIR:task-populate-sdk-ext = "${WORKDIR}/sdk-ext"
84B:task-populate-sdk-ext = "${SDK_DIR}"
85TOOLCHAINEXT_OUTPUTNAME ?= "${SDK_NAME}-toolchain-ext-${SDK_VERSION}"
86TOOLCHAIN_OUTPUTNAME:task-populate-sdk-ext = "${TOOLCHAINEXT_OUTPUTNAME}"
87
88SDK_EXT_TARGET_MANIFEST = "${SDK_DEPLOY}/${TOOLCHAINEXT_OUTPUTNAME}.target.manifest"
89SDK_EXT_HOST_MANIFEST = "${SDK_DEPLOY}/${TOOLCHAINEXT_OUTPUTNAME}.host.manifest"
90
91python write_target_sdk_ext_manifest () {
92 from oe.sdk import get_extra_sdkinfo
93 sstate_dir = d.expand('${SDK_OUTPUT}/${SDKPATH}/sstate-cache')
94 extra_info = get_extra_sdkinfo(sstate_dir)
95
96 target = d.getVar('TARGET_SYS')
97 target_multimach = d.getVar('MULTIMACH_TARGET_SYS')
98 real_target_multimach = d.getVar('REAL_MULTIMACH_TARGET_SYS')
99
100 pkgs = {}
101 os.makedirs(os.path.dirname(d.getVar('SDK_EXT_TARGET_MANIFEST')), exist_ok=True)
102 with open(d.getVar('SDK_EXT_TARGET_MANIFEST'), 'w') as f:
103 for fn in extra_info['filesizes']:
104 info = fn.split(':')
105 if info[2] in (target, target_multimach, real_target_multimach) \
106 or info[5] == 'allarch':
107 if not info[1] in pkgs:
108 f.write("%s %s %s\n" % (info[1], info[2], info[3]))
109 pkgs[info[1]] = {}
110}
111python write_host_sdk_ext_manifest () {
112 from oe.sdk import get_extra_sdkinfo
113 sstate_dir = d.expand('${SDK_OUTPUT}/${SDKPATH}/sstate-cache')
114 extra_info = get_extra_sdkinfo(sstate_dir)
115 host = d.getVar('BUILD_SYS')
116 with open(d.getVar('SDK_EXT_HOST_MANIFEST'), 'w') as f:
117 for fn in extra_info['filesizes']:
118 info = fn.split(':')
119 if info[2] == host:
120 f.write("%s %s %s\n" % (info[1], info[2], info[3]))
121}
122
123SDK_POSTPROCESS_COMMAND:append:task-populate-sdk-ext = "write_target_sdk_ext_manifest; write_host_sdk_ext_manifest; "
124
125SDK_TITLE:task-populate-sdk-ext = "${@d.getVar('DISTRO_NAME') or d.getVar('DISTRO')} Extensible SDK"
126
127def clean_esdk_builddir(d, sdkbasepath):
128 """Clean up traces of the fake build for create_filtered_tasklist()"""
129 import shutil
130 cleanpaths = ['cache', 'tmp']
131 for pth in cleanpaths:
132 fullpth = os.path.join(sdkbasepath, pth)
133 if os.path.isdir(fullpth):
134 shutil.rmtree(fullpth)
135 elif os.path.isfile(fullpth):
136 os.remove(fullpth)
137
138def create_filtered_tasklist(d, sdkbasepath, tasklistfile, conf_initpath):
139 """
140 Create a filtered list of tasks. Also double-checks that the build system
141 within the SDK basically works and required sstate artifacts are available.
142 """
143 import tempfile
144 import shutil
145 import oe.copy_buildsystem
146
147 # Create a temporary build directory that we can pass to the env setup script
148 shutil.copyfile(sdkbasepath + '/conf/local.conf', sdkbasepath + '/conf/local.conf.bak')
149 try:
150 with open(sdkbasepath + '/conf/local.conf', 'a') as f:
151 # Force the use of sstate from the build system
152 f.write('\nSSTATE_DIR:forcevariable = "%s"\n' % d.getVar('SSTATE_DIR'))
153 f.write('SSTATE_MIRRORS:forcevariable = "file://universal/(.*) file://universal-4.9/\\1 file://universal-4.9/(.*) file://universal-4.8/\\1"\n')
154 # Ensure TMPDIR is the default so that clean_esdk_builddir() can delete it
155 f.write('TMPDIR:forcevariable = "${TOPDIR}/tmp"\n')
156 f.write('TCLIBCAPPEND:forcevariable = ""\n')
157 # Drop uninative if the build isn't using it (or else NATIVELSBSTRING will
158 # be different and we won't be able to find our native sstate)
159 if not bb.data.inherits_class('uninative', d):
160 f.write('INHERIT:remove = "uninative"\n')
161
162 # Unfortunately the default SDKPATH (or even a custom value) may contain characters that bitbake
163 # will not allow in its COREBASE path, so we need to rename the directory temporarily
164 temp_sdkbasepath = d.getVar('SDK_OUTPUT') + '/tmp-renamed-sdk'
165 # Delete any existing temp dir
166 try:
167 shutil.rmtree(temp_sdkbasepath)
168 except FileNotFoundError:
169 pass
170 bb.utils.rename(sdkbasepath, temp_sdkbasepath)
171 cmdprefix = '. %s .; ' % conf_initpath
172 logfile = d.getVar('WORKDIR') + '/tasklist_bb_log.txt'
173 try:
174 oe.copy_buildsystem.check_sstate_task_list(d, get_sdk_install_targets(d), tasklistfile, cmdprefix=cmdprefix, cwd=temp_sdkbasepath, logfile=logfile)
175 except bb.process.ExecutionError as e:
176 msg = 'Failed to generate filtered task list for extensible SDK:\n%s' % e.stdout.rstrip()
177 if 'attempted to execute unexpectedly and should have been setscened' in e.stdout:
178 msg += '\n----------\n\nNOTE: "attempted to execute unexpectedly and should have been setscened" errors indicate this may be caused by missing sstate artifacts that were likely produced in earlier builds, but have been subsequently deleted for some reason.\n'
179 bb.fatal(msg)
180 bb.utils.rename(temp_sdkbasepath, sdkbasepath)
181 # Clean out residue of running bitbake, which check_sstate_task_list()
182 # will effectively do
183 clean_esdk_builddir(d, sdkbasepath)
184 finally:
185 localconf = sdkbasepath + '/conf/local.conf'
186 if os.path.exists(localconf + '.bak'):
187 os.replace(localconf + '.bak', localconf)
188
189python copy_buildsystem () {
190 import re
191 import shutil
192 import glob
193 import oe.copy_buildsystem
194
195 oe_init_env_script = d.getVar('OE_INIT_ENV_SCRIPT')
196
197 conf_bbpath = ''
198 conf_initpath = ''
199 core_meta_subdir = ''
200
201 # Copy in all metadata layers + bitbake (as repositories)
202 buildsystem = oe.copy_buildsystem.BuildSystem('extensible SDK', d)
203 baseoutpath = d.getVar('SDK_OUTPUT') + '/' + d.getVar('SDKPATH')
204
205 #check if custome templateconf path is set
206 use_custom_templateconf = d.getVar('SDK_CUSTOM_TEMPLATECONF')
207
208 # Determine if we're building a derivative extensible SDK (from devtool build-sdk)
209 derivative = (d.getVar('SDK_DERIVATIVE') or '') == '1'
210 if derivative:
211 workspace_name = 'orig-workspace'
212 else:
213 workspace_name = None
214
215 corebase, sdkbblayers = buildsystem.copy_bitbake_and_layers(baseoutpath + '/layers', workspace_name)
216 conf_bbpath = os.path.join('layers', corebase, 'bitbake')
217
218 for path in os.listdir(baseoutpath + '/layers'):
219 relpath = os.path.join('layers', path, oe_init_env_script)
220 if os.path.exists(os.path.join(baseoutpath, relpath)):
221 conf_initpath = relpath
222
223 relpath = os.path.join('layers', path, 'scripts', 'devtool')
224 if os.path.exists(os.path.join(baseoutpath, relpath)):
225 scriptrelpath = os.path.dirname(relpath)
226
227 relpath = os.path.join('layers', path, 'meta')
228 if os.path.exists(os.path.join(baseoutpath, relpath, 'lib', 'oe')):
229 core_meta_subdir = relpath
230
231 d.setVar('oe_init_build_env_path', conf_initpath)
232 d.setVar('scriptrelpath', scriptrelpath)
233
234 # Write out config file for devtool
235 import configparser
236 config = configparser.SafeConfigParser()
237 config.add_section('General')
238 config.set('General', 'bitbake_subdir', conf_bbpath)
239 config.set('General', 'init_path', conf_initpath)
240 config.set('General', 'core_meta_subdir', core_meta_subdir)
241 config.add_section('SDK')
242 config.set('SDK', 'sdk_targets', d.getVar('SDK_TARGETS'))
243 updateurl = d.getVar('SDK_UPDATE_URL')
244 if updateurl:
245 config.set('SDK', 'updateserver', updateurl)
246 bb.utils.mkdirhier(os.path.join(baseoutpath, 'conf'))
247 with open(os.path.join(baseoutpath, 'conf', 'devtool.conf'), 'w') as f:
248 config.write(f)
249
250 unlockedsigs = os.path.join(baseoutpath, 'conf', 'unlocked-sigs.inc')
251 with open(unlockedsigs, 'w') as f:
252 pass
253
254 # Create a layer for new recipes / appends
255 bbpath = d.getVar('BBPATH')
256 env = os.environ.copy()
257 env['PYTHONDONTWRITEBYTECODE'] = '1'
258 bb.process.run(['devtool', '--bbpath', bbpath, '--basepath', baseoutpath, 'create-workspace', '--create-only', os.path.join(baseoutpath, 'workspace')], env=env)
259
260 # Create bblayers.conf
261 bb.utils.mkdirhier(baseoutpath + '/conf')
262 with open(baseoutpath + '/conf/bblayers.conf', 'w') as f:
263 f.write('# WARNING: this configuration has been automatically generated and in\n')
264 f.write('# most cases should not be edited. If you need more flexibility than\n')
265 f.write('# this configuration provides, it is strongly suggested that you set\n')
266 f.write('# up a proper instance of the full build system and use that instead.\n\n')
267
268 # LCONF_VERSION may not be set, for example when using meta-poky
269 # so don't error if it isn't found
270 lconf_version = d.getVar('LCONF_VERSION', False)
271 if lconf_version is not None:
272 f.write('LCONF_VERSION = "%s"\n\n' % lconf_version)
273
274 f.write('BBPATH = "$' + '{TOPDIR}"\n')
275 f.write('SDKBASEMETAPATH = "$' + '{TOPDIR}"\n')
276 f.write('BBLAYERS := " \\\n')
277 for layerrelpath in sdkbblayers:
278 f.write(' $' + '{SDKBASEMETAPATH}/layers/%s \\\n' % layerrelpath)
279 f.write(' $' + '{SDKBASEMETAPATH}/workspace \\\n')
280 f.write(' "\n')
281
282 # Copy uninative tarball
283 # For now this is where uninative.bbclass expects the tarball
284 if bb.data.inherits_class('uninative', d):
285 uninative_file = d.expand('${UNINATIVE_DLDIR}/' + d.getVarFlag("UNINATIVE_CHECKSUM", d.getVar("BUILD_ARCH")) + '/${UNINATIVE_TARBALL}')
286 uninative_checksum = bb.utils.sha256_file(uninative_file)
287 uninative_outdir = '%s/downloads/uninative/%s' % (baseoutpath, uninative_checksum)
288 bb.utils.mkdirhier(uninative_outdir)
289 shutil.copy(uninative_file, uninative_outdir)
290
291 env_passthrough = (d.getVar('BB_ENV_PASSTHROUGH_ADDITIONS') or '').split()
292 env_passthrough_values = {}
293
294 # Create local.conf
295 builddir = d.getVar('TOPDIR')
296 if derivative and os.path.exists(builddir + '/conf/site.conf'):
297 shutil.copyfile(builddir + '/conf/site.conf', baseoutpath + '/conf/site.conf')
298 if derivative and os.path.exists(builddir + '/conf/auto.conf'):
299 shutil.copyfile(builddir + '/conf/auto.conf', baseoutpath + '/conf/auto.conf')
300 if derivative:
301 shutil.copyfile(builddir + '/conf/local.conf', baseoutpath + '/conf/local.conf')
302 else:
303 local_conf_allowed = (d.getVar('ESDK_LOCALCONF_ALLOW') or '').split()
304 local_conf_remove = (d.getVar('ESDK_LOCALCONF_REMOVE') or '').split()
305 def handle_var(varname, origvalue, op, newlines):
306 if varname in local_conf_remove or (origvalue.strip().startswith('/') and not varname in local_conf_allowed):
307 newlines.append('# Removed original setting of %s\n' % varname)
308 return None, op, 0, True
309 else:
310 if varname in env_passthrough:
311 env_passthrough_values[varname] = origvalue
312 return origvalue, op, 0, True
313 varlist = ['[^#=+ ]*']
314 oldlines = []
315 if os.path.exists(builddir + '/conf/site.conf'):
316 with open(builddir + '/conf/site.conf', 'r') as f:
317 oldlines += f.readlines()
318 if os.path.exists(builddir + '/conf/auto.conf'):
319 with open(builddir + '/conf/auto.conf', 'r') as f:
320 oldlines += f.readlines()
321 if os.path.exists(builddir + '/conf/local.conf'):
322 with open(builddir + '/conf/local.conf', 'r') as f:
323 oldlines += f.readlines()
324 (updated, newlines) = bb.utils.edit_metadata(oldlines, varlist, handle_var)
325
326 with open(baseoutpath + '/conf/local.conf', 'w') as f:
327 f.write('# WARNING: this configuration has been automatically generated and in\n')
328 f.write('# most cases should not be edited. If you need more flexibility than\n')
329 f.write('# this configuration provides, it is strongly suggested that you set\n')
330 f.write('# up a proper instance of the full build system and use that instead.\n\n')
331 for line in newlines:
332 if line.strip() and not line.startswith('#'):
333 f.write(line)
334 # Write a newline just in case there's none at the end of the original
335 f.write('\n')
336
337 f.write('TMPDIR = "${TOPDIR}/tmp"\n')
338 f.write('TCLIBCAPPEND = ""\n')
339 f.write('DL_DIR = "${TOPDIR}/downloads"\n')
340
341 if bb.data.inherits_class('uninative', d):
342 f.write('INHERIT += "%s"\n' % 'uninative')
343 f.write('UNINATIVE_CHECKSUM[%s] = "%s"\n\n' % (d.getVar('BUILD_ARCH'), uninative_checksum))
344 f.write('CONF_VERSION = "%s"\n\n' % d.getVar('CONF_VERSION', False))
345
346 # Some classes are not suitable for SDK, remove them from INHERIT
347 f.write('INHERIT:remove = "%s"\n' % d.getVar('ESDK_CLASS_INHERIT_DISABLE', False))
348
349 # Bypass the default connectivity check if any
350 f.write('CONNECTIVITY_CHECK_URIS = ""\n\n')
351
352 # This warning will come out if reverse dependencies for a task
353 # don't have sstate as well as the task itself. We already know
354 # this will be the case for the extensible sdk, so turn off the
355 # warning.
356 f.write('SIGGEN_LOCKEDSIGS_SSTATE_EXISTS_CHECK = "none"\n\n')
357
358 # Warn if the sigs in the locked-signature file don't match
359 # the sig computed from the metadata.
360 f.write('SIGGEN_LOCKEDSIGS_TASKSIG_CHECK = "warn"\n\n')
361
362 # We want to be able to set this without a full reparse
363 f.write('BB_HASHCONFIG_IGNORE_VARS:append = " SIGGEN_UNLOCKED_RECIPES"\n\n')
364
365 # Set up which tasks are ignored for run on install
366 f.write('BB_SETSCENE_ENFORCE_IGNORE_TASKS = "%:* *:do_shared_workdir *:do_rm_work wic-tools:* *:do_addto_recipe_sysroot"\n\n')
367
368 # Hide the config information from bitbake output (since it's fixed within the SDK)
369 f.write('BUILDCFG_HEADER = ""\n\n')
370
371 # Write METADATA_REVISION
372 f.write('METADATA_REVISION = "%s"\n\n' % d.getVar('METADATA_REVISION'))
373
374 f.write('# Provide a flag to indicate we are in the EXT_SDK Context\n')
375 f.write('WITHIN_EXT_SDK = "1"\n\n')
376
377 # Map gcc-dependent uninative sstate cache for installer usage
378 f.write('SSTATE_MIRRORS += " file://universal/(.*) file://universal-4.9/\\1 file://universal-4.9/(.*) file://universal-4.8/\\1"\n\n')
379
380 if d.getVar("PRSERV_HOST"):
381 # Override this, we now include PR data, so it should only point ot the local database
382 f.write('PRSERV_HOST = "localhost:0"\n\n')
383
384 # Allow additional config through sdk-extra.conf
385 fn = bb.cookerdata.findConfigFile('sdk-extra.conf', d)
386 if fn:
387 with open(fn, 'r') as xf:
388 for line in xf:
389 f.write(line)
390
391 # If you define a sdk_extraconf() function then it can contain additional config
392 # (Though this is awkward; sdk-extra.conf should probably be used instead)
393 extraconf = (d.getVar('sdk_extraconf') or '').strip()
394 if extraconf:
395 # Strip off any leading / trailing spaces
396 for line in extraconf.splitlines():
397 f.write(line.strip() + '\n')
398
399 f.write('require conf/locked-sigs.inc\n')
400 f.write('require conf/unlocked-sigs.inc\n')
401
402 # Copy multiple configurations if they exist in the users config directory
403 if d.getVar('BBMULTICONFIG') is not None:
404 bb.utils.mkdirhier(os.path.join(baseoutpath, 'conf', 'multiconfig'))
405 for mc in d.getVar('BBMULTICONFIG').split():
406 dest_stub = "/conf/multiconfig/%s.conf" % (mc,)
407 if os.path.exists(builddir + dest_stub):
408 shutil.copyfile(builddir + dest_stub, baseoutpath + dest_stub)
409
410 cachedir = os.path.join(baseoutpath, 'cache')
411 bb.utils.mkdirhier(cachedir)
412 bb.parse.siggen.copy_unitaskhashes(cachedir)
413
414 # If PR Service is in use, we need to export this as well
415 bb.note('Do we have a pr database?')
416 if d.getVar("PRSERV_HOST"):
417 bb.note('Writing PR database...')
418 # Based on the code in classes/prexport.bbclass
419 import oe.prservice
420 #dump meta info of tables
421 localdata = d.createCopy()
422 localdata.setVar('PRSERV_DUMPOPT_COL', "1")
423 localdata.setVar('PRSERV_DUMPDIR', os.path.join(baseoutpath, 'conf'))
424 localdata.setVar('PRSERV_DUMPFILE', '${PRSERV_DUMPDIR}/prserv.inc')
425
426 bb.note('PR Database write to %s' % (localdata.getVar('PRSERV_DUMPFILE')))
427
428 retval = oe.prservice.prserv_dump_db(localdata)
429 if not retval:
430 bb.error("prexport_handler: export failed!")
431 return
432 (metainfo, datainfo) = retval
433 oe.prservice.prserv_export_tofile(localdata, metainfo, datainfo, True)
434
435 # Use templateconf.cfg file from builddir if exists
436 if os.path.exists(builddir + '/conf/templateconf.cfg') and use_custom_templateconf == '1':
437 shutil.copyfile(builddir + '/conf/templateconf.cfg', baseoutpath + '/conf/templateconf.cfg')
438 else:
439 # Write a templateconf.cfg
440 with open(baseoutpath + '/conf/templateconf.cfg', 'w') as f:
441 f.write('meta/conf\n')
442
443 # Ensure any variables set from the external environment (by way of
444 # BB_ENV_PASSTHROUGH_ADDITIONS) are set in the SDK's configuration
445 extralines = []
446 for name, value in env_passthrough_values.items():
447 actualvalue = d.getVar(name) or ''
448 if value != actualvalue:
449 extralines.append('%s = "%s"\n' % (name, actualvalue))
450 if extralines:
451 with open(baseoutpath + '/conf/local.conf', 'a') as f:
452 f.write('\n')
453 f.write('# Extra settings from environment:\n')
454 for line in extralines:
455 f.write(line)
456 f.write('\n')
457
458 # Filter the locked signatures file to just the sstate tasks we are interested in
459 excluded_targets = get_sdk_install_targets(d, images_only=True)
460 sigfile = d.getVar('WORKDIR') + '/locked-sigs.inc'
461 lockedsigs_pruned = baseoutpath + '/conf/locked-sigs.inc'
462 #nativesdk-only sigfile to merge into locked-sigs.inc
463 sdk_include_nativesdk = (d.getVar("SDK_INCLUDE_NATIVESDK") == '1')
464 nativesigfile = d.getVar('WORKDIR') + '/locked-sigs_nativesdk.inc'
465 nativesigfile_pruned = d.getVar('WORKDIR') + '/locked-sigs_nativesdk_pruned.inc'
466
467 if sdk_include_nativesdk:
468 oe.copy_buildsystem.prune_lockedsigs([],
469 excluded_targets.split(),
470 nativesigfile,
471 True,
472 nativesigfile_pruned)
473
474 oe.copy_buildsystem.merge_lockedsigs([],
475 sigfile,
476 nativesigfile_pruned,
477 sigfile)
478
479 oe.copy_buildsystem.prune_lockedsigs([],
480 excluded_targets.split(),
481 sigfile,
482 False,
483 lockedsigs_pruned)
484
485 sstate_out = baseoutpath + '/sstate-cache'
486 bb.utils.remove(sstate_out, True)
487
488 # uninative.bbclass sets NATIVELSBSTRING to 'universal%s' % oe.utils.host_gcc_version(d)
489 fixedlsbstring = "universal%s" % oe.utils.host_gcc_version(d)
490
491 sdk_include_toolchain = (d.getVar('SDK_INCLUDE_TOOLCHAIN') == '1')
492 sdk_ext_type = d.getVar('SDK_EXT_TYPE')
493 if (sdk_ext_type != 'minimal' or sdk_include_toolchain or derivative) and not sdk_include_nativesdk:
494 # Create the filtered task list used to generate the sstate cache shipped with the SDK
495 tasklistfn = d.getVar('WORKDIR') + '/tasklist.txt'
496 create_filtered_tasklist(d, baseoutpath, tasklistfn, conf_initpath)
497 else:
498 tasklistfn = None
499
500
501 cachedir = os.path.join(baseoutpath, 'cache')
502 bb.utils.mkdirhier(cachedir)
503 bb.parse.siggen.copy_unitaskhashes(cachedir)
504
505 # Add packagedata if enabled
506 if d.getVar('SDK_INCLUDE_PKGDATA') == '1':
507 lockedsigs_base = d.getVar('WORKDIR') + '/locked-sigs-base.inc'
508 lockedsigs_copy = d.getVar('WORKDIR') + '/locked-sigs-copy.inc'
509 shutil.move(lockedsigs_pruned, lockedsigs_base)
510 oe.copy_buildsystem.merge_lockedsigs(['do_packagedata'],
511 lockedsigs_base,
512 d.getVar('STAGING_DIR_HOST') + '/world-pkgdata/locked-sigs-pkgdata.inc',
513 lockedsigs_pruned,
514 lockedsigs_copy)
515
516 if sdk_include_toolchain:
517 lockedsigs_base = d.getVar('WORKDIR') + '/locked-sigs-base2.inc'
518 lockedsigs_toolchain = d.expand("${STAGING_DIR}/${TUNE_PKGARCH}/meta-extsdk-toolchain/locked-sigs/locked-sigs-extsdk-toolchain.inc")
519 shutil.move(lockedsigs_pruned, lockedsigs_base)
520 oe.copy_buildsystem.merge_lockedsigs([],
521 lockedsigs_base,
522 lockedsigs_toolchain,
523 lockedsigs_pruned)
524 oe.copy_buildsystem.create_locked_sstate_cache(lockedsigs_toolchain,
525 d.getVar('SSTATE_DIR'),
526 sstate_out, d,
527 fixedlsbstring,
528 filterfile=tasklistfn)
529
530 if sdk_ext_type == 'minimal':
531 if derivative:
532 # Assume the user is not going to set up an additional sstate
533 # mirror, thus we need to copy the additional artifacts (from
534 # workspace recipes) into the derivative SDK
535 lockedsigs_orig = d.getVar('TOPDIR') + '/conf/locked-sigs.inc'
536 if os.path.exists(lockedsigs_orig):
537 lockedsigs_extra = d.getVar('WORKDIR') + '/locked-sigs-extra.inc'
538 oe.copy_buildsystem.merge_lockedsigs(None,
539 lockedsigs_orig,
540 lockedsigs_pruned,
541 None,
542 lockedsigs_extra)
543 oe.copy_buildsystem.create_locked_sstate_cache(lockedsigs_extra,
544 d.getVar('SSTATE_DIR'),
545 sstate_out, d,
546 fixedlsbstring,
547 filterfile=tasklistfn)
548 else:
549 oe.copy_buildsystem.create_locked_sstate_cache(lockedsigs_pruned,
550 d.getVar('SSTATE_DIR'),
551 sstate_out, d,
552 fixedlsbstring,
553 filterfile=tasklistfn)
554
555 # We don't need sstate do_package files
556 for root, dirs, files in os.walk(sstate_out):
557 for name in files:
558 if name.endswith("_package.tar.zst"):
559 f = os.path.join(root, name)
560 os.remove(f)
561
562 # Write manifest file
563 # Note: at the moment we cannot include the env setup script here to keep
564 # it updated, since it gets modified during SDK installation (see
565 # sdk_ext_postinst() below) thus the checksum we take here would always
566 # be different.
567 manifest_file_list = ['conf/*']
568 if d.getVar('BBMULTICONFIG') is not None:
569 manifest_file_list.append('conf/multiconfig/*')
570
571 esdk_manifest_excludes = (d.getVar('ESDK_MANIFEST_EXCLUDES') or '').split()
572 esdk_manifest_excludes_list = []
573 for exclude_item in esdk_manifest_excludes:
574 esdk_manifest_excludes_list += glob.glob(os.path.join(baseoutpath, exclude_item))
575 manifest_file = os.path.join(baseoutpath, 'conf', 'sdk-conf-manifest')
576 with open(manifest_file, 'w') as f:
577 for item in manifest_file_list:
578 for fn in glob.glob(os.path.join(baseoutpath, item)):
579 if fn == manifest_file or os.path.isdir(fn):
580 continue
581 if fn in esdk_manifest_excludes_list:
582 continue
583 chksum = bb.utils.sha256_file(fn)
584 f.write('%s\t%s\n' % (chksum, os.path.relpath(fn, baseoutpath)))
585}
586
587def get_current_buildtools(d):
588 """Get the file name of the current buildtools installer"""
589 import glob
590 btfiles = glob.glob(os.path.join(d.getVar('SDK_DEPLOY'), '*-buildtools-nativesdk-standalone-*.sh'))
591 btfiles.sort(key=os.path.getctime)
592 return os.path.basename(btfiles[-1])
593
594def get_sdk_required_utilities(buildtools_fn, d):
595 """Find required utilities that aren't provided by the buildtools"""
596 sanity_required_utilities = (d.getVar('SANITY_REQUIRED_UTILITIES') or '').split()
597 sanity_required_utilities.append(d.expand('${BUILD_PREFIX}gcc'))
598 sanity_required_utilities.append(d.expand('${BUILD_PREFIX}g++'))
599 if buildtools_fn:
600 buildtools_installer = os.path.join(d.getVar('SDK_DEPLOY'), buildtools_fn)
601 filelist, _ = bb.process.run('%s -l' % buildtools_installer)
602 else:
603 buildtools_installer = None
604 filelist = ""
605 localdata = bb.data.createCopy(d)
606 localdata.setVar('SDKPATH', '.')
607 sdkpathnative = localdata.getVar('SDKPATHNATIVE')
608 sdkbindirs = [localdata.getVar('bindir_nativesdk'),
609 localdata.getVar('sbindir_nativesdk'),
610 localdata.getVar('base_bindir_nativesdk'),
611 localdata.getVar('base_sbindir_nativesdk')]
612 for line in filelist.splitlines():
613 splitline = line.split()
614 if len(splitline) > 5:
615 fn = splitline[5]
616 if not fn.startswith('./'):
617 fn = './%s' % fn
618 if fn.startswith(sdkpathnative):
619 relpth = '/' + os.path.relpath(fn, sdkpathnative)
620 for bindir in sdkbindirs:
621 if relpth.startswith(bindir):
622 relpth = os.path.relpath(relpth, bindir)
623 if relpth in sanity_required_utilities:
624 sanity_required_utilities.remove(relpth)
625 break
626 return ' '.join(sanity_required_utilities)
627
628install_tools() {
629 install -d ${SDK_OUTPUT}/${SDKPATHNATIVE}${bindir_nativesdk}
630 scripts="devtool recipetool oe-find-native-sysroot runqemu* wic"
631 for script in $scripts; do
632 for scriptfn in `find ${SDK_OUTPUT}/${SDKPATH}/${scriptrelpath} -maxdepth 1 -executable -name "$script"`; do
633 targetscriptfn="${SDK_OUTPUT}/${SDKPATHNATIVE}${bindir_nativesdk}/$(basename $scriptfn)"
634 test -e ${targetscriptfn} || ln -rs ${scriptfn} ${targetscriptfn}
635 done
636 done
637 # We can't use the same method as above because files in the sysroot won't exist at this point
638 # (they get populated from sstate on installation)
639 unfsd_path="${SDK_OUTPUT}/${SDKPATHNATIVE}${bindir_nativesdk}/unfsd"
640 if [ "${SDK_INCLUDE_TOOLCHAIN}" = "1" -a ! -e $unfsd_path ] ; then
641 binrelpath=${@os.path.relpath(d.getVar('STAGING_BINDIR_NATIVE'), d.getVar('TMPDIR'))}
642 ln -rs ${SDK_OUTPUT}/${SDKPATH}/tmp/$binrelpath/unfsd $unfsd_path
643 fi
644 touch ${SDK_OUTPUT}/${SDKPATH}/.devtoolbase
645
646 # find latest buildtools-tarball and install it
647 if [ -n "${SDK_BUILDTOOLS_INSTALLER}" ]; then
648 install ${SDK_DEPLOY}/${SDK_BUILDTOOLS_INSTALLER} ${SDK_OUTPUT}/${SDKPATH}
649 fi
650
651 install -m 0644 ${COREBASE}/meta/files/ext-sdk-prepare.py ${SDK_OUTPUT}/${SDKPATH}
652}
653do_populate_sdk_ext[file-checksums] += "${COREBASE}/meta/files/ext-sdk-prepare.py:True"
654
655sdk_ext_preinst() {
656 # Since bitbake won't run as root it doesn't make sense to try and install
657 # the extensible sdk as root.
658 if [ "`id -u`" = "0" ]; then
659 echo "ERROR: The extensible sdk cannot be installed as root."
660 exit 1
661 fi
662 if ! command -v locale > /dev/null; then
663 echo "ERROR: The installer requires the locale command, please install it first"
664 exit 1
665 fi
666 # Check setting of LC_ALL set above
667 canonicalised_locale=`echo $LC_ALL | sed 's/UTF-8/utf8/'`
668 if ! locale -a | grep -q $canonicalised_locale ; then
669 echo "ERROR: the installer requires the $LC_ALL locale to be installed (but not selected), please install it first"
670 exit 1
671 fi
672 # The relocation script used by buildtools installer requires python
673 if ! command -v python3 > /dev/null; then
674 echo "ERROR: The installer requires python3, please install it first"
675 exit 1
676 fi
677 missing_utils=""
678 for util in ${SDK_REQUIRED_UTILITIES}; do
679 if ! command -v $util > /dev/null; then
680 missing_utils="$missing_utils $util"
681 fi
682 done
683 if [ -n "$missing_utils" ] ; then
684 echo "ERROR: the SDK requires the following missing utilities, please install them: $missing_utils"
685 exit 1
686 fi
687 SDK_EXTENSIBLE="1"
688 if [ "$publish" = "1" ] && [ "${SDK_EXT_TYPE}" = "minimal" ] ; then
689 EXTRA_TAR_OPTIONS="$EXTRA_TAR_OPTIONS --exclude=sstate-cache"
690 fi
691}
692SDK_PRE_INSTALL_COMMAND:task-populate-sdk-ext = "${sdk_ext_preinst}"
693
694# FIXME this preparation should be done as part of the SDK construction
695sdk_ext_postinst() {
696 printf "\nExtracting buildtools...\n"
697 cd $target_sdk_dir
698 env_setup_script="$target_sdk_dir/environment-setup-${REAL_MULTIMACH_TARGET_SYS}"
699 if [ -n "${SDK_BUILDTOOLS_INSTALLER}" ]; then
700 printf "buildtools\ny" | ./${SDK_BUILDTOOLS_INSTALLER} > buildtools.log || { printf 'ERROR: buildtools installation failed:\n' ; cat buildtools.log ; echo "printf 'ERROR: this SDK was not fully installed and needs reinstalling\n'" >> $env_setup_script ; exit 1 ; }
701
702 # Delete the buildtools tar file since it won't be used again
703 rm -f ./${SDK_BUILDTOOLS_INSTALLER}
704 # We don't need the log either since it succeeded
705 rm -f buildtools.log
706
707 # Make sure when the user sets up the environment, they also get
708 # the buildtools-tarball tools in their path.
709 echo "# Save and reset OECORE_NATIVE_SYSROOT as buildtools may change it" >> $env_setup_script
710 echo "SAVED=\"\$OECORE_NATIVE_SYSROOT\"" >> $env_setup_script
711 echo ". $target_sdk_dir/buildtools/environment-setup*" >> $env_setup_script
712 echo "OECORE_NATIVE_SYSROOT=\"\$SAVED\"" >> $env_setup_script
713 fi
714
715 # Allow bitbake environment setup to be ran as part of this sdk.
716 echo "export OE_SKIP_SDK_CHECK=1" >> $env_setup_script
717 # Work around runqemu not knowing how to get this information within the eSDK
718 echo "export DEPLOY_DIR_IMAGE=$target_sdk_dir/tmp/${@os.path.relpath(d.getVar('DEPLOY_DIR_IMAGE'), d.getVar('TMPDIR'))}" >> $env_setup_script
719
720 # A bit of another hack, but we need this in the path only for devtool
721 # so put it at the end of $PATH.
722 echo "export PATH=$target_sdk_dir/sysroots/${SDK_SYS}${bindir_nativesdk}:\$PATH" >> $env_setup_script
723
724 echo "printf 'SDK environment now set up; additionally you may now run devtool to perform development tasks.\nRun devtool --help for further details.\n'" >> $env_setup_script
725
726 # Warn if trying to use external bitbake and the ext SDK together
727 echo "(which bitbake > /dev/null 2>&1 && echo 'WARNING: attempting to use the extensible SDK in an environment set up to run bitbake - this may lead to unexpected results. Please source this script in a new shell session instead.') || true" >> $env_setup_script
728
729 if [ "$prepare_buildsystem" != "no" -a -n "${SDK_BUILDTOOLS_INSTALLER}" ]; then
730 printf "Preparing build system...\n"
731 # dash which is /bin/sh on Ubuntu will not preserve the
732 # current working directory when first ran, nor will it set $1 when
733 # sourcing a script. That is why this has to look so ugly.
734 LOGFILE="$target_sdk_dir/preparing_build_system.log"
735 sh -c ". buildtools/environment-setup* > $LOGFILE && cd $target_sdk_dir/`dirname ${oe_init_build_env_path}` && set $target_sdk_dir && . $target_sdk_dir/${oe_init_build_env_path} $target_sdk_dir >> $LOGFILE && python3 $target_sdk_dir/ext-sdk-prepare.py $LOGFILE '${SDK_INSTALL_TARGETS}'" || { echo "printf 'ERROR: this SDK was not fully installed and needs reinstalling\n'" >> $env_setup_script ; exit 1 ; }
736 fi
737 if [ -e $target_sdk_dir/ext-sdk-prepare.py ]; then
738 rm $target_sdk_dir/ext-sdk-prepare.py
739 fi
740 echo done
741}
742
743SDK_POST_INSTALL_COMMAND:task-populate-sdk-ext = "${sdk_ext_postinst}"
744
745SDK_POSTPROCESS_COMMAND:prepend:task-populate-sdk-ext = "copy_buildsystem; install_tools; "
746
747SDK_INSTALL_TARGETS = ""
748fakeroot python do_populate_sdk_ext() {
749 # FIXME hopefully we can remove this restriction at some point, but uninative
750 # currently forces this upon us
751 if d.getVar('SDK_ARCH') != d.getVar('BUILD_ARCH'):
752 bb.fatal('The extensible SDK can currently only be built for the same architecture as the machine being built on - SDK_ARCH is set to %s (likely via setting SDKMACHINE) which is different from the architecture of the build machine (%s). Unable to continue.' % (d.getVar('SDK_ARCH'), d.getVar('BUILD_ARCH')))
753
754 # FIXME hopefully we can remove this restriction at some point, but the eSDK
755 # can only be built for the primary (default) multiconfig
756 if d.getVar('BB_CURRENT_MC') != 'default':
757 bb.fatal('The extensible SDK can currently only be built for the default multiconfig. Currently trying to build for %s.' % d.getVar('BB_CURRENT_MC'))
758
759 # eSDK dependencies don't use the traditional variables and things don't work properly if they are set
760 d.setVar("TOOLCHAIN_HOST_TASK", "${TOOLCHAIN_HOST_TASK_ESDK}")
761 d.setVar("TOOLCHAIN_TARGET_TASK", "")
762
763 d.setVar('SDK_INSTALL_TARGETS', get_sdk_install_targets(d))
764 if d.getVar('SDK_INCLUDE_BUILDTOOLS') == '1':
765 buildtools_fn = get_current_buildtools(d)
766 else:
767 buildtools_fn = None
768 d.setVar('SDK_REQUIRED_UTILITIES', get_sdk_required_utilities(buildtools_fn, d))
769 d.setVar('SDK_BUILDTOOLS_INSTALLER', buildtools_fn)
770 d.setVar('SDKDEPLOYDIR', '${SDKEXTDEPLOYDIR}')
771 # ESDKs have a libc from the buildtools so ensure we don't ship linguas twice
772 d.delVar('SDKIMAGE_LINGUAS')
773 if d.getVar("SDK_INCLUDE_NATIVESDK") == '1':
774 generate_nativesdk_lockedsigs(d)
775 populate_sdk_common(d)
776}
777
778def generate_nativesdk_lockedsigs(d):
779 import oe.copy_buildsystem
780 sigfile = d.getVar('WORKDIR') + '/locked-sigs_nativesdk.inc'
781 oe.copy_buildsystem.generate_locked_sigs(sigfile, d)
782
783def get_ext_sdk_depends(d):
784 # Note: the deps varflag is a list not a string, so we need to specify expand=False
785 deps = d.getVarFlag('do_image_complete', 'deps', False)
786 pn = d.getVar('PN')
787 deplist = ['%s:%s' % (pn, dep) for dep in deps]
788 tasklist = bb.build.tasksbetween('do_image_complete', 'do_build', d)
789 tasklist.append('do_rootfs')
790 for task in tasklist:
791 deplist.extend((d.getVarFlag(task, 'depends') or '').split())
792 return ' '.join(deplist)
793
794python do_sdk_depends() {
795 # We have to do this separately in its own task so we avoid recursing into
796 # dependencies we don't need to (e.g. buildtools-tarball) and bringing those
797 # into the SDK's sstate-cache
798 import oe.copy_buildsystem
799 sigfile = d.getVar('WORKDIR') + '/locked-sigs.inc'
800 oe.copy_buildsystem.generate_locked_sigs(sigfile, d)
801}
802addtask sdk_depends
803
804do_sdk_depends[dirs] = "${WORKDIR}"
805do_sdk_depends[depends] = "${@get_ext_sdk_depends(d)} meta-extsdk-toolchain:do_populate_sysroot"
806do_sdk_depends[recrdeptask] = "${@d.getVarFlag('do_populate_sdk', 'recrdeptask', False)}"
807do_sdk_depends[recrdeptask] += "do_populate_lic do_package_qa do_populate_sysroot do_deploy ${SDK_RECRDEP_TASKS}"
808do_sdk_depends[rdepends] = "${@' '.join([x + ':do_package_write_${IMAGE_PKGTYPE} ' + x + ':do_packagedata' for x in d.getVar('TOOLCHAIN_HOST_TASK_ESDK').split()])}"
809
810do_populate_sdk_ext[dirs] = "${@d.getVarFlag('do_populate_sdk', 'dirs', False)}"
811
812do_populate_sdk_ext[depends] = "${@d.getVarFlag('do_populate_sdk', 'depends', False)} \
813 ${@'buildtools-tarball:do_populate_sdk' if d.getVar('SDK_INCLUDE_BUILDTOOLS') == '1' else ''} \
814 ${@'meta-world-pkgdata:do_collect_packagedata' if d.getVar('SDK_INCLUDE_PKGDATA') == '1' else ''} \
815 ${@'meta-extsdk-toolchain:do_locked_sigs' if d.getVar('SDK_INCLUDE_TOOLCHAIN') == '1' else ''}"
816
817# We must avoid depending on do_build here if rm_work.bbclass is active,
818# because otherwise do_rm_work may run before do_populate_sdk_ext itself.
819# We can't mark do_populate_sdk_ext and do_sdk_depends as having to
820# run before do_rm_work, because then they would also run as part
821# of normal builds.
822do_populate_sdk_ext[rdepends] += "${@' '.join([x + ':' + (d.getVar('RM_WORK_BUILD_WITHOUT') or 'do_build') for x in d.getVar('SDK_TARGETS').split()])}"
823
824# Make sure code changes can result in rebuild
825do_populate_sdk_ext[vardeps] += "copy_buildsystem \
826 sdk_ext_postinst"
827
828# Since any change in the metadata of any layer should cause a rebuild of the
829# sdk(since the layers are put in the sdk) set the task to nostamp so it
830# always runs.
831do_populate_sdk_ext[nostamp] = "1"
832
833SDKEXTDEPLOYDIR = "${WORKDIR}/deploy-${PN}-populate-sdk-ext"
834
835SSTATETASKS += "do_populate_sdk_ext"
836SSTATE_SKIP_CREATION:task-populate-sdk-ext = '1'
837do_populate_sdk_ext[cleandirs] = "${SDKEXTDEPLOYDIR}"
838do_populate_sdk_ext[sstate-inputdirs] = "${SDKEXTDEPLOYDIR}"
839do_populate_sdk_ext[sstate-outputdirs] = "${SDK_DEPLOY}"
840do_populate_sdk_ext[stamp-extra-info] = "${MACHINE_ARCH}"
841
842addtask populate_sdk_ext after do_sdk_depends
diff --git a/meta/classes/ptest-gnome.bbclass b/meta/classes/ptest-gnome.bbclass
deleted file mode 100644
index d4ad22d85d..0000000000
--- a/meta/classes/ptest-gnome.bbclass
+++ /dev/null
@@ -1,14 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7inherit ptest
8
9EXTRA_OECONF:append = " ${@bb.utils.contains('PTEST_ENABLED', '1', '--enable-installed-tests', '--disable-installed-tests', d)}"
10
11FILES:${PN}-ptest += "${libexecdir}/installed-tests/ \
12 ${datadir}/installed-tests/"
13
14RDEPENDS:${PN}-ptest += "gnome-desktop-testing"
diff --git a/meta/classes/ptest-perl.bbclass b/meta/classes/ptest-perl.bbclass
deleted file mode 100644
index c283fdd1fc..0000000000
--- a/meta/classes/ptest-perl.bbclass
+++ /dev/null
@@ -1,36 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7inherit ptest
8
9FILESEXTRAPATHS:prepend := "${COREBASE}/meta/files:"
10
11SRC_URI += "file://ptest-perl/run-ptest"
12
13do_install_ptest_perl() {
14 install -d ${D}${PTEST_PATH}
15 if [ ! -f ${D}${PTEST_PATH}/run-ptest ]; then
16 install -m 0755 ${WORKDIR}/ptest-perl/run-ptest ${D}${PTEST_PATH}
17 fi
18 cp -r ${B}/t ${D}${PTEST_PATH}
19 chown -R root:root ${D}${PTEST_PATH}
20}
21
22FILES:${PN}-ptest:prepend = "${PTEST_PATH}/t/* ${PTEST_PATH}/run-ptest "
23
24RDEPENDS:${PN}-ptest:prepend = "perl "
25
26addtask install_ptest_perl after do_install_ptest_base before do_package
27
28python () {
29 if not bb.data.inherits_class('native', d) and not bb.data.inherits_class('cross', d):
30 d.setVarFlag('do_install_ptest_perl', 'fakeroot', '1')
31
32 # Remove all '*ptest_perl' tasks when ptest is not enabled
33 if not(d.getVar('PTEST_ENABLED') == "1"):
34 for i in ['do_install_ptest_perl']:
35 bb.build.deltask(i, d)
36}
diff --git a/meta/classes/ptest.bbclass b/meta/classes/ptest.bbclass
deleted file mode 100644
index 0383206a6d..0000000000
--- a/meta/classes/ptest.bbclass
+++ /dev/null
@@ -1,142 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7SUMMARY:${PN}-ptest ?= "${SUMMARY} - Package test files"
8DESCRIPTION:${PN}-ptest ?= "${DESCRIPTION} \
9This package contains a test directory ${PTEST_PATH} for package test purposes."
10
11PTEST_PATH ?= "${libdir}/${BPN}/ptest"
12PTEST_BUILD_HOST_FILES ?= "Makefile"
13PTEST_BUILD_HOST_PATTERN ?= ""
14PTEST_PARALLEL_MAKE ?= "${PARALLEL_MAKE}"
15PTEST_PARALLEL_MAKEINST ?= "${PARALLEL_MAKEINST}"
16EXTRA_OEMAKE:prepend:task-compile-ptest-base = "${PTEST_PARALLEL_MAKE} "
17EXTRA_OEMAKE:prepend:task-install-ptest-base = "${PTEST_PARALLEL_MAKEINST} "
18
19FILES:${PN}-ptest += "${PTEST_PATH}"
20SECTION:${PN}-ptest = "devel"
21ALLOW_EMPTY:${PN}-ptest = "1"
22PTEST_ENABLED = "${@bb.utils.contains('DISTRO_FEATURES', 'ptest', '1', '0', d)}"
23PTEST_ENABLED:class-native = ""
24PTEST_ENABLED:class-nativesdk = ""
25PTEST_ENABLED:class-cross-canadian = ""
26RDEPENDS:${PN}-ptest += "${PN}"
27RDEPENDS:${PN}-ptest:class-native = ""
28RDEPENDS:${PN}-ptest:class-nativesdk = ""
29RRECOMMENDS:${PN}-ptest += "ptest-runner"
30
31PACKAGES =+ "${@bb.utils.contains('PTEST_ENABLED', '1', '${PN}-ptest', '', d)}"
32
33require conf/distro/include/ptest-packagelists.inc
34
35do_configure_ptest() {
36 :
37}
38
39do_configure_ptest_base() {
40 do_configure_ptest
41}
42
43do_compile_ptest() {
44 :
45}
46
47do_compile_ptest_base() {
48 do_compile_ptest
49}
50
51do_install_ptest() {
52 :
53}
54
55do_install_ptest_base() {
56 if [ -f ${WORKDIR}/run-ptest ]; then
57 install -D ${WORKDIR}/run-ptest ${D}${PTEST_PATH}/run-ptest
58 fi
59 if grep -q install-ptest: Makefile; then
60 oe_runmake DESTDIR=${D}${PTEST_PATH} install-ptest
61 fi
62 do_install_ptest
63 chown -R root:root ${D}${PTEST_PATH}
64
65 # Strip build host paths from any installed Makefile
66 for filename in ${PTEST_BUILD_HOST_FILES}; do
67 for installed_ptest_file in $(find ${D}${PTEST_PATH} -type f -name $filename); do
68 bbnote "Stripping host paths from: $installed_ptest_file"
69 sed -e 's#${HOSTTOOLS_DIR}/*##g' \
70 -e 's#${WORKDIR}/*=#.=#g' \
71 -e 's#${WORKDIR}/*##g' \
72 -i $installed_ptest_file
73 if [ -n "${PTEST_BUILD_HOST_PATTERN}" ]; then
74 sed -E '/${PTEST_BUILD_HOST_PATTERN}/d' \
75 -i $installed_ptest_file
76 fi
77 done
78 done
79}
80
81PTEST_BINDIR_PKGD_PATH = "${PKGD}${PTEST_PATH}/bin"
82
83# This function needs to run after apply_update_alternative_renames because the
84# aforementioned function will update the ALTERNATIVE_LINK_NAME flag. Append is
85# used here to make this function to run as late as possible.
86PACKAGE_PREPROCESS_FUNCS:append = "${@bb.utils.contains('PTEST_BINDIR', '1', \
87 bb.utils.contains('PTEST_ENABLED', '1', ' ptest_update_alternatives', '', d), '', d)}"
88
89python ptest_update_alternatives() {
90 """
91 This function will generate the symlinks in the PTEST_BINDIR_PKGD_PATH
92 to match the renamed binaries by update-alternatives.
93 """
94
95 if not bb.data.inherits_class('update-alternatives', d) \
96 or not update_alternatives_enabled(d):
97 return
98
99 bb.note("Generating symlinks for ptest")
100 bin_paths = { d.getVar("bindir"), d.getVar("base_bindir"),
101 d.getVar("sbindir"), d.getVar("base_sbindir") }
102 ptest_bindir = d.getVar("PTEST_BINDIR_PKGD_PATH")
103 os.mkdir(ptest_bindir)
104 for pkg in (d.getVar('PACKAGES') or "").split():
105 alternatives = update_alternatives_alt_targets(d, pkg)
106 for alt_name, alt_link, alt_target, _ in alternatives:
107 # Some alternatives are for man pages,
108 # check if the alternative is in PATH
109 if os.path.dirname(alt_link) in bin_paths:
110 os.symlink(alt_target, os.path.join(ptest_bindir, alt_name))
111}
112
113do_configure_ptest_base[dirs] = "${B}"
114do_compile_ptest_base[dirs] = "${B}"
115do_install_ptest_base[dirs] = "${B}"
116do_install_ptest_base[cleandirs] = "${D}${PTEST_PATH}"
117
118addtask configure_ptest_base after do_configure before do_compile
119addtask compile_ptest_base after do_compile before do_install
120addtask install_ptest_base after do_install before do_package do_populate_sysroot
121
122python () {
123 if not bb.data.inherits_class('native', d) and not bb.data.inherits_class('cross', d):
124 d.setVarFlag('do_install_ptest_base', 'fakeroot', '1')
125 d.setVarFlag('do_install_ptest_base', 'umask', '022')
126
127 # Remove all '*ptest_base' tasks when ptest is not enabled
128 if not(d.getVar('PTEST_ENABLED') == "1"):
129 for i in ['do_configure_ptest_base', 'do_compile_ptest_base', 'do_install_ptest_base']:
130 bb.build.deltask(i, d)
131}
132
133QARECIPETEST[missing-ptest] = "package_qa_check_missing_ptest"
134def package_qa_check_missing_ptest(pn, d, messages):
135 # This checks that ptest package is actually included
136 # in standard oe-core ptest images - only for oe-core recipes
137 if not 'meta/recipes' in d.getVar('FILE') or not(d.getVar('PTEST_ENABLED') == "1"):
138 return
139
140 enabled_ptests = " ".join([d.getVar('PTESTS_FAST'), d.getVar('PTESTS_SLOW'), d.getVar('PTESTS_PROBLEMS')]).split()
141 if (pn + "-ptest").replace(d.getVar('MLPREFIX'), '') not in enabled_ptests:
142 oe.qa.handle_error("missing-ptest", "supports ptests but is not included in oe-core's ptest-packagelists.inc", d)
diff --git a/meta/classes/pypi.bbclass b/meta/classes/pypi.bbclass
deleted file mode 100644
index aab04c638f..0000000000
--- a/meta/classes/pypi.bbclass
+++ /dev/null
@@ -1,34 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7def pypi_package(d):
8 bpn = d.getVar('BPN')
9 if bpn.startswith('python-'):
10 return bpn[7:]
11 elif bpn.startswith('python3-'):
12 return bpn[8:]
13 return bpn
14
15PYPI_PACKAGE ?= "${@pypi_package(d)}"
16PYPI_PACKAGE_EXT ?= "tar.gz"
17PYPI_ARCHIVE_NAME ?= "${PYPI_PACKAGE}-${PV}.${PYPI_PACKAGE_EXT}"
18
19def pypi_src_uri(d):
20 package = d.getVar('PYPI_PACKAGE')
21 archive_name = d.getVar('PYPI_ARCHIVE_NAME')
22 return 'https://files.pythonhosted.org/packages/source/%s/%s/%s' % (package[0], package, archive_name)
23
24PYPI_SRC_URI ?= "${@pypi_src_uri(d)}"
25
26HOMEPAGE ?= "https://pypi.python.org/pypi/${PYPI_PACKAGE}/"
27SECTION = "devel/python"
28SRC_URI:prepend = "${PYPI_SRC_URI} "
29S = "${WORKDIR}/${PYPI_PACKAGE}-${PV}"
30
31UPSTREAM_CHECK_URI ?= "https://pypi.org/project/${PYPI_PACKAGE}/"
32UPSTREAM_CHECK_REGEX ?= "/${PYPI_PACKAGE}/(?P<pver>(\d+[\.\-_]*)+)/"
33
34CVE_PRODUCT ?= "python:${PYPI_PACKAGE}"
diff --git a/meta/classes/python3-dir.bbclass b/meta/classes/python3-dir.bbclass
deleted file mode 100644
index 912c67253c..0000000000
--- a/meta/classes/python3-dir.bbclass
+++ /dev/null
@@ -1,11 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7PYTHON_BASEVERSION = "3.10"
8PYTHON_ABI = ""
9PYTHON_DIR = "python${PYTHON_BASEVERSION}"
10PYTHON_PN = "python3"
11PYTHON_SITEPACKAGES_DIR = "${libdir}/${PYTHON_DIR}/site-packages"
diff --git a/meta/classes/python3native.bbclass b/meta/classes/python3native.bbclass
deleted file mode 100644
index 654a002fdb..0000000000
--- a/meta/classes/python3native.bbclass
+++ /dev/null
@@ -1,30 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7inherit python3-dir
8
9PYTHON="${STAGING_BINDIR_NATIVE}/python3-native/python3"
10EXTRANATIVEPATH += "python3-native"
11DEPENDS:append = " python3-native "
12
13# python-config and other scripts are using sysconfig modules
14# which we patch to access these variables
15export STAGING_INCDIR
16export STAGING_LIBDIR
17
18# Packages can use
19# find_package(PythonInterp REQUIRED)
20# find_package(PythonLibs REQUIRED)
21# which ends up using libs/includes from build host
22# Therefore pre-empt that effort
23export PYTHON_LIBRARY="${STAGING_LIBDIR}/lib${PYTHON_DIR}${PYTHON_ABI}.so"
24export PYTHON_INCLUDE_DIR="${STAGING_INCDIR}/${PYTHON_DIR}${PYTHON_ABI}"
25
26# suppress host user's site-packages dirs.
27export PYTHONNOUSERSITE = "1"
28
29# autoconf macros will use their internal default preference otherwise
30export PYTHON
diff --git a/meta/classes/python3targetconfig.bbclass b/meta/classes/python3targetconfig.bbclass
deleted file mode 100644
index 3f89e5e09e..0000000000
--- a/meta/classes/python3targetconfig.bbclass
+++ /dev/null
@@ -1,35 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7inherit python3native
8
9EXTRA_PYTHON_DEPENDS ?= ""
10EXTRA_PYTHON_DEPENDS:class-target = "python3"
11DEPENDS:append = " ${EXTRA_PYTHON_DEPENDS}"
12
13do_configure:prepend:class-target() {
14 export _PYTHON_SYSCONFIGDATA_NAME="_sysconfigdata"
15}
16
17do_compile:prepend:class-target() {
18 export _PYTHON_SYSCONFIGDATA_NAME="_sysconfigdata"
19}
20
21do_install:prepend:class-target() {
22 export _PYTHON_SYSCONFIGDATA_NAME="_sysconfigdata"
23}
24
25do_configure:prepend:class-nativesdk() {
26 export _PYTHON_SYSCONFIGDATA_NAME="_sysconfigdata"
27}
28
29do_compile:prepend:class-nativesdk() {
30 export _PYTHON_SYSCONFIGDATA_NAME="_sysconfigdata"
31}
32
33do_install:prepend:class-nativesdk() {
34 export _PYTHON_SYSCONFIGDATA_NAME="_sysconfigdata"
35}
diff --git a/meta/classes/python_flit_core.bbclass b/meta/classes/python_flit_core.bbclass
deleted file mode 100644
index a0b1feb70a..0000000000
--- a/meta/classes/python_flit_core.bbclass
+++ /dev/null
@@ -1,14 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7inherit python_pep517 python3native python3-dir setuptools3-base
8
9DEPENDS += "python3 python3-flit-core-native"
10
11python_flit_core_do_manual_build () {
12 cd ${PEP517_SOURCE_PATH}
13 nativepython3 -m flit_core.wheel --outdir ${PEP517_WHEEL_PATH} .
14}
diff --git a/meta/classes/python_hatchling.bbclass b/meta/classes/python_hatchling.bbclass
deleted file mode 100644
index b9e6582eb5..0000000000
--- a/meta/classes/python_hatchling.bbclass
+++ /dev/null
@@ -1,9 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7inherit python_pep517 python3native python3-dir setuptools3-base
8
9DEPENDS += "python3-hatchling-native"
diff --git a/meta/classes/python_pep517.bbclass b/meta/classes/python_pep517.bbclass
deleted file mode 100644
index 202dde0bc3..0000000000
--- a/meta/classes/python_pep517.bbclass
+++ /dev/null
@@ -1,60 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# Common infrastructure for Python packages that use PEP-517 compliant packaging.
8# https://www.python.org/dev/peps/pep-0517/
9#
10# This class will build a wheel in do_compile, and use pypa/installer to install
11# it in do_install.
12
13DEPENDS:append = " python3-picobuild-native python3-installer-native"
14
15# Where to execute the build process from
16PEP517_SOURCE_PATH ?= "${S}"
17
18# The directory where wheels will be written
19PEP517_WHEEL_PATH ?= "${WORKDIR}/dist"
20
21PEP517_PICOBUILD_OPTS ?= ""
22
23# The interpreter to use for installed scripts
24PEP517_INSTALL_PYTHON = "python3"
25PEP517_INSTALL_PYTHON:class-native = "nativepython3"
26
27# pypa/installer option to control the bytecode compilation
28INSTALL_WHEEL_COMPILE_BYTECODE ?= "--compile-bytecode=0"
29
30# PEP517 doesn't have a specific configure step, so set an empty do_configure to avoid
31# running base_do_configure.
32python_pep517_do_configure () {
33 :
34}
35
36# When we have Python 3.11 we can parse pyproject.toml to determine the build
37# API entry point directly
38python_pep517_do_compile () {
39 nativepython3 -m picobuild --source ${PEP517_SOURCE_PATH} --dest ${PEP517_WHEEL_PATH} --wheel ${PEP517_PICOBUILD_OPTS}
40}
41do_compile[cleandirs] += "${PEP517_WHEEL_PATH}"
42
43python_pep517_do_install () {
44 COUNT=$(find ${PEP517_WHEEL_PATH} -name '*.whl' | wc -l)
45 if test $COUNT -eq 0; then
46 bbfatal No wheels found in ${PEP517_WHEEL_PATH}
47 elif test $COUNT -gt 1; then
48 bbfatal More than one wheel found in ${PEP517_WHEEL_PATH}, this should not happen
49 fi
50
51 nativepython3 -m installer ${INSTALL_WHEEL_COMPILE_BYTECODE} --interpreter "${USRBINPATH}/env ${PEP517_INSTALL_PYTHON}" --destdir=${D} ${PEP517_WHEEL_PATH}/*.whl
52}
53
54# A manual do_install that just uses unzip for bootstrapping purposes. Callers should DEPEND on unzip-native.
55python_pep517_do_bootstrap_install () {
56 install -d ${D}${PYTHON_SITEPACKAGES_DIR}
57 unzip -d ${D}${PYTHON_SITEPACKAGES_DIR} ${PEP517_WHEEL_PATH}/*.whl
58}
59
60EXPORT_FUNCTIONS do_configure do_compile do_install
diff --git a/meta/classes/python_poetry_core.bbclass b/meta/classes/python_poetry_core.bbclass
deleted file mode 100644
index c7dc5d0382..0000000000
--- a/meta/classes/python_poetry_core.bbclass
+++ /dev/null
@@ -1,9 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7inherit python_pep517 python3native setuptools3-base
8
9DEPENDS += "python3-poetry-core-native"
diff --git a/meta/classes/python_pyo3.bbclass b/meta/classes/python_pyo3.bbclass
deleted file mode 100644
index 9a32eac6fd..0000000000
--- a/meta/classes/python_pyo3.bbclass
+++ /dev/null
@@ -1,36 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7#
8# This class helps make sure that Python extensions built with PyO3
9# and setuptools_rust properly set up the environment for cross compilation
10#
11
12inherit cargo python3-dir siteinfo
13
14export PYO3_CROSS="1"
15export PYO3_CROSS_PYTHON_VERSION="${PYTHON_BASEVERSION}"
16export PYO3_CROSS_LIB_DIR="${STAGING_LIBDIR}"
17export CARGO_BUILD_TARGET="${RUST_HOST_SYS}"
18export RUSTFLAGS
19export PYO3_PYTHON="${PYTHON}"
20export PYO3_CONFIG_FILE="${WORKDIR}/pyo3.config"
21
22python_pyo3_do_configure () {
23 cat > ${WORKDIR}/pyo3.config << EOF
24implementation=CPython
25version=${PYTHON_BASEVERSION}
26shared=true
27abi3=false
28lib_name=${PYTHON_DIR}
29lib_dir=${STAGING_LIBDIR}
30pointer_width=${SITEINFO_BITS}
31build_flags=WITH_THREAD
32suppress_build_script_link_lines=false
33EOF
34}
35
36EXPORT_FUNCTIONS do_configure
diff --git a/meta/classes/python_setuptools3_rust.bbclass b/meta/classes/python_setuptools3_rust.bbclass
deleted file mode 100644
index d6ce2edb96..0000000000
--- a/meta/classes/python_setuptools3_rust.bbclass
+++ /dev/null
@@ -1,17 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7inherit python_pyo3 setuptools3
8
9DEPENDS += "python3-setuptools-rust-native"
10
11python_setuptools3_rust_do_configure() {
12 python_pyo3_do_configure
13 cargo_common_do_configure
14 setuptools3_do_configure
15}
16
17EXPORT_FUNCTIONS do_configure
diff --git a/meta/classes/python_setuptools_build_meta.bbclass b/meta/classes/python_setuptools_build_meta.bbclass
deleted file mode 100644
index 4c84d1e8d0..0000000000
--- a/meta/classes/python_setuptools_build_meta.bbclass
+++ /dev/null
@@ -1,9 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7inherit setuptools3-base python_pep517
8
9DEPENDS += "python3-setuptools-native python3-wheel-native"
diff --git a/meta/classes/qemu.bbclass b/meta/classes/qemu.bbclass
deleted file mode 100644
index 874b15127c..0000000000
--- a/meta/classes/qemu.bbclass
+++ /dev/null
@@ -1,77 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7#
8# This class contains functions for recipes that need QEMU or test for its
9# existence.
10#
11
12def qemu_target_binary(data):
13 package_arch = data.getVar("PACKAGE_ARCH")
14 qemu_target_binary = (data.getVar("QEMU_TARGET_BINARY_%s" % package_arch) or "")
15 if qemu_target_binary:
16 return qemu_target_binary
17
18 target_arch = data.getVar("TARGET_ARCH")
19 if target_arch in ("i486", "i586", "i686"):
20 target_arch = "i386"
21 elif target_arch == "powerpc":
22 target_arch = "ppc"
23 elif target_arch == "powerpc64":
24 target_arch = "ppc64"
25 elif target_arch == "powerpc64le":
26 target_arch = "ppc64le"
27
28 return "qemu-" + target_arch
29
30def qemu_wrapper_cmdline(data, rootfs_path, library_paths):
31 import string
32
33 qemu_binary = qemu_target_binary(data)
34 if qemu_binary == "qemu-allarch":
35 qemu_binary = "qemuwrapper"
36
37 qemu_options = data.getVar("QEMU_OPTIONS")
38
39 return "PSEUDO_UNLOAD=1 " + qemu_binary + " " + qemu_options + " -L " + rootfs_path\
40 + " -E LD_LIBRARY_PATH=" + ":".join(library_paths) + " "
41
42# Next function will return a string containing the command that is needed to
43# to run a certain binary through qemu. For example, in order to make a certain
44# postinstall scriptlet run at do_rootfs time and running the postinstall is
45# architecture dependent, we can run it through qemu. For example, in the
46# postinstall scriptlet, we could use the following:
47#
48# ${@qemu_run_binary(d, '$D', '/usr/bin/test_app')} [test_app arguments]
49#
50def qemu_run_binary(data, rootfs_path, binary):
51 libdir = rootfs_path + data.getVar("libdir", False)
52 base_libdir = rootfs_path + data.getVar("base_libdir", False)
53
54 return qemu_wrapper_cmdline(data, rootfs_path, [libdir, base_libdir]) + rootfs_path + binary
55
56# QEMU_EXTRAOPTIONS is not meant to be directly used, the extensions are
57# PACKAGE_ARCH, *NOT* overrides.
58# In some cases (e.g. ppc) simply being arch specific (apparently) isn't good
59# enough and a PACKAGE_ARCH specific -cpu option is needed (hence we have to do
60# this dance). For others (e.g. arm) a -cpu option is not necessary, since the
61# qemu-arm default CPU supports all required architecture levels.
62
63QEMU_OPTIONS = "-r ${OLDEST_KERNEL} ${@d.getVar("QEMU_EXTRAOPTIONS_%s" % d.getVar('PACKAGE_ARCH')) or ""}"
64QEMU_OPTIONS[vardeps] += "QEMU_EXTRAOPTIONS_${PACKAGE_ARCH}"
65
66QEMU_EXTRAOPTIONS_ppce500v2 = " -cpu e500v2"
67QEMU_EXTRAOPTIONS_ppce500mc = " -cpu e500mc"
68QEMU_EXTRAOPTIONS_ppce5500 = " -cpu e500mc"
69QEMU_EXTRAOPTIONS_ppc64e5500 = " -cpu e500mc"
70QEMU_EXTRAOPTIONS_ppce6500 = " -cpu e500mc"
71QEMU_EXTRAOPTIONS_ppc64e6500 = " -cpu e500mc"
72QEMU_EXTRAOPTIONS_ppc7400 = " -cpu 7400"
73QEMU_EXTRAOPTIONS_powerpc64le = " -cpu POWER9"
74# Some packages e.g. fwupd sets PACKAGE_ARCH = MACHINE_ARCH and uses meson which
75# needs right options to usermode qemu
76QEMU_EXTRAOPTIONS_qemuppc = " -cpu 7400"
77QEMU_EXTRAOPTIONS_qemuppc64 = " -cpu POWER9"
diff --git a/meta/classes/qemuboot.bbclass b/meta/classes/qemuboot.bbclass
deleted file mode 100644
index 018c000ca2..0000000000
--- a/meta/classes/qemuboot.bbclass
+++ /dev/null
@@ -1,171 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# Help runqemu boot target board, "QB" means Qemu Boot, the following
8# vars can be set in conf files, such as <bsp.conf> to make it can be
9# boot by runqemu:
10#
11# QB_SYSTEM_NAME: qemu name, e.g., "qemu-system-i386"
12#
13# QB_OPT_APPEND: options to append to qemu, e.g., "-device usb-mouse"
14#
15# QB_DEFAULT_KERNEL: default kernel to boot, e.g., "bzImage"
16#
17# QB_DEFAULT_FSTYPE: default FSTYPE to boot, e.g., "ext4"
18#
19# QB_MEM: memory, e.g., "-m 512"
20#
21# QB_MACHINE: qemu machine, e.g., "-machine virt"
22#
23# QB_CPU: qemu cpu, e.g., "-cpu qemu32"
24#
25# QB_CPU_KVM: the similar to QB_CPU, but used when kvm, e.g., '-cpu kvm64',
26# set it when support kvm.
27#
28# QB_SMP: amount of CPU cores inside qemu guest, each mapped to a thread on the host,
29# e.g. "-smp 8".
30#
31# QB_KERNEL_CMDLINE_APPEND: options to append to kernel's -append
32# option, e.g., "console=ttyS0 console=tty"
33#
34# QB_DTB: qemu dtb name
35#
36# QB_AUDIO_DRV: qemu audio driver, e.g., "alsa", set it when support audio
37#
38# QB_AUDIO_OPT: qemu audio option, e.g., "-device AC97", used
39# when QB_AUDIO_DRV is set.
40#
41# QB_RNG: Pass-through for host random number generator, it can speedup boot
42# in system mode, where system is experiencing entropy starvation
43#
44# QB_KERNEL_ROOT: kernel's root, e.g., /dev/vda
45# By default "/dev/vda rw" gets passed to the kernel.
46# To mount the rootfs read-only QB_KERNEL_ROOT can be set to e.g. "/dev/vda ro".
47#
48# QB_NETWORK_DEVICE: network device, e.g., "-device virtio-net-pci,netdev=net0,mac=@MAC@",
49# it needs work with QB_TAP_OPT and QB_SLIRP_OPT.
50# Note, runqemu will replace @MAC@ with a predefined mac, you can set
51# a custom one, but that may cause conflicts when multiple qemus are
52# running on the same host.
53# Note: If more than one interface of type -device virtio-net-device gets added,
54# QB_NETWORK_DEVICE:prepend might be used, since Qemu enumerates the eth*
55# devices in reverse order to -device arguments.
56#
57# QB_TAP_OPT: network option for 'tap' mode, e.g.,
58# "-netdev tap,id=net0,ifname=@TAP@,script=no,downscript=no"
59# Note, runqemu will replace "@TAP@" with the one which is used, such as tap0, tap1 ...
60#
61# QB_SLIRP_OPT: network option for SLIRP mode, e.g., -netdev user,id=net0"
62#
63# QB_CMDLINE_IP_SLIRP: If QB_NETWORK_DEVICE adds more than one network interface to qemu, usually the
64# ip= kernel comand line argument needs to be changed accordingly. Details are documented
65# in the kernel docuemntation https://www.kernel.org/doc/Documentation/filesystems/nfs/nfsroot.txt
66# Example to configure only the first interface: "ip=eth0:dhcp"
67# QB_CMDLINE_IP_TAP: This parameter is similar to the QB_CMDLINE_IP_SLIRP parameter. Since the tap interface requires
68# static IP configuration @CLIENT@ and @GATEWAY@ place holders are replaced by the IP and the gateway
69# address of the qemu guest by runqemu.
70# Example: "ip=192.168.7.@CLIENT@::192.168.7.@GATEWAY@:255.255.255.0::eth0"
71#
72# QB_ROOTFS_OPT: used as rootfs, e.g.,
73# "-drive id=disk0,file=@ROOTFS@,if=none,format=raw -device virtio-blk-device,drive=disk0"
74# Note, runqemu will replace "@ROOTFS@" with the one which is used, such as core-image-minimal-qemuarm64.ext4.
75#
76# QB_SERIAL_OPT: serial port, e.g., "-serial mon:stdio"
77#
78# QB_TCPSERIAL_OPT: tcp serial port option, e.g.,
79# " -device virtio-serial-device -chardev socket,id=virtcon,port=@PORT@,host=127.0.0.1 -device virtconsole,chardev=virtcon"
80# Note, runqemu will replace "@PORT@" with the port number which is used.
81#
82# QB_ROOTFS_EXTRA_OPT: extra options to be appended to the rootfs device in case there is none specified by QB_ROOTFS_OPT.
83# Can be used to automatically determine the image from the other variables
84# but define things link 'bootindex' when booting from EFI or 'readonly' when using squashfs
85# without the need to specify a dedicated qemu configuration
86#
87# QB_GRAPHICS: QEMU video card type (e.g. "-vga std")
88#
89# Usage:
90# IMAGE_CLASSES += "qemuboot"
91# See "runqemu help" for more info
92
93QB_MEM ?= "-m 256"
94QB_SMP ?= ""
95QB_SERIAL_OPT ?= "-serial mon:stdio -serial null"
96QB_DEFAULT_KERNEL ?= "${KERNEL_IMAGETYPE}"
97QB_DEFAULT_FSTYPE ?= "ext4"
98QB_RNG ?= "-object rng-random,filename=/dev/urandom,id=rng0 -device virtio-rng-pci,rng=rng0"
99QB_OPT_APPEND ?= ""
100QB_NETWORK_DEVICE ?= "-device virtio-net-pci,netdev=net0,mac=@MAC@"
101QB_CMDLINE_IP_SLIRP ?= "ip=dhcp"
102QB_CMDLINE_IP_TAP ?= "ip=192.168.7.@CLIENT@::192.168.7.@GATEWAY@:255.255.255.0::eth0:off:8.8.8.8"
103QB_ROOTFS_EXTRA_OPT ?= ""
104QB_GRAPHICS ?= ""
105
106# This should be kept align with ROOT_VM
107QB_DRIVE_TYPE ?= "/dev/sd"
108
109inherit image-artifact-names
110
111# Create qemuboot.conf
112addtask do_write_qemuboot_conf after do_rootfs before do_image
113
114def qemuboot_vars(d):
115 build_vars = ['MACHINE', 'TUNE_ARCH', 'DEPLOY_DIR_IMAGE',
116 'KERNEL_IMAGETYPE', 'IMAGE_NAME', 'IMAGE_LINK_NAME',
117 'STAGING_DIR_NATIVE', 'STAGING_BINDIR_NATIVE',
118 'STAGING_DIR_HOST', 'SERIAL_CONSOLES', 'UNINATIVE_LOADER']
119 return build_vars + [k for k in d.keys() if k.startswith('QB_')]
120
121do_write_qemuboot_conf[vardeps] += "${@' '.join(qemuboot_vars(d))}"
122do_write_qemuboot_conf[vardepsexclude] += "TOPDIR"
123python do_write_qemuboot_conf() {
124 import configparser
125
126 qemuboot = "%s/%s.qemuboot.conf" % (d.getVar('IMGDEPLOYDIR'), d.getVar('IMAGE_NAME'))
127 if d.getVar('IMAGE_LINK_NAME'):
128 qemuboot_link = "%s/%s.qemuboot.conf" % (d.getVar('IMGDEPLOYDIR'), d.getVar('IMAGE_LINK_NAME'))
129 else:
130 qemuboot_link = ""
131 finalpath = d.getVar("DEPLOY_DIR_IMAGE")
132 topdir = d.getVar('TOPDIR')
133 cf = configparser.ConfigParser()
134 cf.add_section('config_bsp')
135 for k in sorted(qemuboot_vars(d)):
136 if ":" in k:
137 continue
138 # qemu-helper-native sysroot is not removed by rm_work and
139 # contains all tools required by runqemu
140 if k == 'STAGING_BINDIR_NATIVE':
141 val = os.path.join(d.getVar('BASE_WORKDIR'), d.getVar('BUILD_SYS'),
142 'qemu-helper-native/1.0-r1/recipe-sysroot-native/usr/bin/')
143 else:
144 val = d.getVar(k)
145 if val is None:
146 continue
147 # we only want to write out relative paths so that we can relocate images
148 # and still run them
149 if val.startswith(topdir):
150 val = os.path.relpath(val, finalpath)
151 cf.set('config_bsp', k, '%s' % val)
152
153 # QB_DEFAULT_KERNEL's value of KERNEL_IMAGETYPE is the name of a symlink
154 # to the kernel file, which hinders relocatability of the qb conf.
155 # Read the link and replace it with the full filename of the target.
156 kernel_link = os.path.join(d.getVar('DEPLOY_DIR_IMAGE'), d.getVar('QB_DEFAULT_KERNEL'))
157 kernel = os.path.realpath(kernel_link)
158 # we only want to write out relative paths so that we can relocate images
159 # and still run them
160 kernel = os.path.relpath(kernel, finalpath)
161 cf.set('config_bsp', 'QB_DEFAULT_KERNEL', kernel)
162
163 bb.utils.mkdirhier(os.path.dirname(qemuboot))
164 with open(qemuboot, 'w') as f:
165 cf.write(f)
166
167 if qemuboot_link and qemuboot_link != qemuboot:
168 if os.path.lexists(qemuboot_link):
169 os.remove(qemuboot_link)
170 os.symlink(os.path.basename(qemuboot), qemuboot_link)
171}
diff --git a/meta/classes/rootfs-postcommands.bbclass b/meta/classes/rootfs-postcommands.bbclass
deleted file mode 100644
index d40adf5f0e..0000000000
--- a/meta/classes/rootfs-postcommands.bbclass
+++ /dev/null
@@ -1,440 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# Zap the root password if debug-tweaks and empty-root-password features are not enabled
8ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'empty-root-password' ], "", "zap_empty_root_password; ",d)}'
9
10# Allow dropbear/openssh to accept logins from accounts with an empty password string if debug-tweaks or allow-empty-password is enabled
11ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'allow-empty-password' ], "ssh_allow_empty_password; ", "",d)}'
12
13# Allow dropbear/openssh to accept root logins if debug-tweaks or allow-root-login is enabled
14ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'allow-root-login' ], "ssh_allow_root_login; ", "",d)}'
15
16# Enable postinst logging if debug-tweaks or post-install-logging is enabled
17ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'post-install-logging' ], "postinst_enable_logging; ", "",d)}'
18
19# Create /etc/timestamp during image construction to give a reasonably sane default time setting
20ROOTFS_POSTPROCESS_COMMAND += "rootfs_update_timestamp; "
21
22# Tweak the mount options for rootfs in /etc/fstab if read-only-rootfs is enabled
23ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs", "read_only_rootfs_hook; ", "",d)}'
24
25# We also need to do the same for the kernel boot parameters,
26# otherwise kernel or initramfs end up mounting the rootfs read/write
27# (the default) if supported by the underlying storage.
28#
29# We do this with :append because the default value might get set later with ?=
30# and we don't want to disable such a default that by setting a value here.
31APPEND:append = '${@bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs", " ro", "", d)}'
32
33# Generates test data file with data store variables expanded in json format
34ROOTFS_POSTPROCESS_COMMAND += "write_image_test_data; "
35
36# Write manifest
37IMAGE_MANIFEST = "${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.manifest"
38ROOTFS_POSTUNINSTALL_COMMAND =+ "write_image_manifest ; "
39# Set default postinst log file
40POSTINST_LOGFILE ?= "${localstatedir}/log/postinstall.log"
41# Set default target for systemd images
42SYSTEMD_DEFAULT_TARGET ?= '${@bb.utils.contains_any("IMAGE_FEATURES", [ "x11-base", "weston" ], "graphical.target", "multi-user.target", d)}'
43ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("DISTRO_FEATURES", "systemd", "set_systemd_default_target; systemd_create_users;", "", d)}'
44
45ROOTFS_POSTPROCESS_COMMAND += 'empty_var_volatile;'
46
47ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("DISTRO_FEATURES", "overlayfs", "overlayfs_qa_check; overlayfs_postprocess;", "", d)}'
48
49inherit image-artifact-names
50
51# Sort the user and group entries in /etc by ID in order to make the content
52# deterministic. Package installs are not deterministic, causing the ordering
53# of entries to change between builds. In case that this isn't desired,
54# the command can be overridden.
55#
56# Note that useradd-staticids.bbclass has to be used to ensure that
57# the numeric IDs of dynamically created entries remain stable.
58#
59# We want this to run as late as possible, in particular after
60# systemd_sysusers_create and set_user_group. Using :append is not
61# enough for that, set_user_group is added that way and would end
62# up running after us.
63SORT_PASSWD_POSTPROCESS_COMMAND ??= " sort_passwd; "
64python () {
65 d.appendVar('ROOTFS_POSTPROCESS_COMMAND', '${SORT_PASSWD_POSTPROCESS_COMMAND}')
66 d.appendVar('ROOTFS_POSTPROCESS_COMMAND', 'rootfs_reproducible;')
67}
68
69systemd_create_users () {
70 for conffile in ${IMAGE_ROOTFS}/usr/lib/sysusers.d/*.conf; do
71 [ -e $conffile ] || continue
72 grep -v "^#" $conffile | sed -e '/^$/d' | while read type name id comment; do
73 if [ "$type" = "u" ]; then
74 useradd_params="--shell /sbin/nologin"
75 [ "$id" != "-" ] && useradd_params="$useradd_params --uid $id"
76 [ "$comment" != "-" ] && useradd_params="$useradd_params --comment $comment"
77 useradd_params="$useradd_params --system $name"
78 eval useradd --root ${IMAGE_ROOTFS} $useradd_params || true
79 elif [ "$type" = "g" ]; then
80 groupadd_params=""
81 [ "$id" != "-" ] && groupadd_params="$groupadd_params --gid $id"
82 groupadd_params="$groupadd_params --system $name"
83 eval groupadd --root ${IMAGE_ROOTFS} $groupadd_params || true
84 elif [ "$type" = "m" ]; then
85 group=$id
86 eval groupadd --root ${IMAGE_ROOTFS} --system $group || true
87 eval useradd --root ${IMAGE_ROOTFS} --shell /sbin/nologin --system $name --no-user-group || true
88 eval usermod --root ${IMAGE_ROOTFS} -a -G $group $name
89 fi
90 done
91 done
92}
93
94#
95# A hook function to support read-only-rootfs IMAGE_FEATURES
96#
97read_only_rootfs_hook () {
98 # Tweak the mount option and fs_passno for rootfs in fstab
99 if [ -f ${IMAGE_ROOTFS}/etc/fstab ]; then
100 sed -i -e '/^[#[:space:]]*\/dev\/root/{s/defaults/ro/;s/\([[:space:]]*[[:digit:]]\)\([[:space:]]*\)[[:digit:]]$/\1\20/}' ${IMAGE_ROOTFS}/etc/fstab
101 fi
102
103 # Tweak the "mount -o remount,rw /" command in busybox-inittab inittab
104 if [ -f ${IMAGE_ROOTFS}/etc/inittab ]; then
105 sed -i 's|/bin/mount -o remount,rw /|/bin/mount -o remount,ro /|' ${IMAGE_ROOTFS}/etc/inittab
106 fi
107
108 # If we're using openssh and the /etc/ssh directory has no pre-generated keys,
109 # we should configure openssh to use the configuration file /etc/ssh/sshd_config_readonly
110 # and the keys under /var/run/ssh.
111 if [ -d ${IMAGE_ROOTFS}/etc/ssh ]; then
112 if [ -e ${IMAGE_ROOTFS}/etc/ssh/ssh_host_rsa_key ]; then
113 echo "SYSCONFDIR=\${SYSCONFDIR:-/etc/ssh}" >> ${IMAGE_ROOTFS}/etc/default/ssh
114 echo "SSHD_OPTS=" >> ${IMAGE_ROOTFS}/etc/default/ssh
115 else
116 echo "SYSCONFDIR=\${SYSCONFDIR:-/var/run/ssh}" >> ${IMAGE_ROOTFS}/etc/default/ssh
117 echo "SSHD_OPTS='-f /etc/ssh/sshd_config_readonly'" >> ${IMAGE_ROOTFS}/etc/default/ssh
118 fi
119 fi
120
121 # Also tweak the key location for dropbear in the same way.
122 if [ -d ${IMAGE_ROOTFS}/etc/dropbear ]; then
123 if [ ! -e ${IMAGE_ROOTFS}/etc/dropbear/dropbear_rsa_host_key ]; then
124 echo "DROPBEAR_RSAKEY_DIR=/var/lib/dropbear" >> ${IMAGE_ROOTFS}/etc/default/dropbear
125 fi
126 fi
127
128 if ${@bb.utils.contains("DISTRO_FEATURES", "sysvinit", "true", "false", d)}; then
129 # Change the value of ROOTFS_READ_ONLY in /etc/default/rcS to yes
130 if [ -e ${IMAGE_ROOTFS}/etc/default/rcS ]; then
131 sed -i 's/ROOTFS_READ_ONLY=no/ROOTFS_READ_ONLY=yes/' ${IMAGE_ROOTFS}/etc/default/rcS
132 fi
133 # Run populate-volatile.sh at rootfs time to set up basic files
134 # and directories to support read-only rootfs.
135 if [ -x ${IMAGE_ROOTFS}/etc/init.d/populate-volatile.sh ]; then
136 ${IMAGE_ROOTFS}/etc/init.d/populate-volatile.sh
137 fi
138 fi
139
140 if ${@bb.utils.contains("DISTRO_FEATURES", "systemd", "true", "false", d)}; then
141 # Create machine-id
142 # 20:12 < mezcalero> koen: you have three options: a) run systemd-machine-id-setup at install time, b) have / read-only and an empty file there (for stateless) and c) boot with / writable
143 touch ${IMAGE_ROOTFS}${sysconfdir}/machine-id
144 fi
145}
146
147#
148# This function disallows empty root passwords
149#
150zap_empty_root_password () {
151 if [ -e ${IMAGE_ROOTFS}/etc/shadow ]; then
152 sed -i 's%^root::%root:*:%' ${IMAGE_ROOTFS}/etc/shadow
153 fi
154 if [ -e ${IMAGE_ROOTFS}/etc/passwd ]; then
155 sed -i 's%^root::%root:*:%' ${IMAGE_ROOTFS}/etc/passwd
156 fi
157}
158
159#
160# allow dropbear/openssh to accept logins from accounts with an empty password string
161#
162ssh_allow_empty_password () {
163 for config in sshd_config sshd_config_readonly; do
164 if [ -e ${IMAGE_ROOTFS}${sysconfdir}/ssh/$config ]; then
165 sed -i 's/^[#[:space:]]*PermitEmptyPasswords.*/PermitEmptyPasswords yes/' ${IMAGE_ROOTFS}${sysconfdir}/ssh/$config
166 fi
167 done
168
169 if [ -e ${IMAGE_ROOTFS}${sbindir}/dropbear ] ; then
170 if grep -q DROPBEAR_EXTRA_ARGS ${IMAGE_ROOTFS}${sysconfdir}/default/dropbear 2>/dev/null ; then
171 if ! grep -q "DROPBEAR_EXTRA_ARGS=.*-B" ${IMAGE_ROOTFS}${sysconfdir}/default/dropbear ; then
172 sed -i 's/^DROPBEAR_EXTRA_ARGS="*\([^"]*\)"*/DROPBEAR_EXTRA_ARGS="\1 -B"/' ${IMAGE_ROOTFS}${sysconfdir}/default/dropbear
173 fi
174 else
175 printf '\nDROPBEAR_EXTRA_ARGS="-B"\n' >> ${IMAGE_ROOTFS}${sysconfdir}/default/dropbear
176 fi
177 fi
178
179 if [ -d ${IMAGE_ROOTFS}${sysconfdir}/pam.d ] ; then
180 for f in `find ${IMAGE_ROOTFS}${sysconfdir}/pam.d/* -type f -exec test -e {} \; -print`
181 do
182 sed -i 's/nullok_secure/nullok/' $f
183 done
184 fi
185}
186
187#
188# allow dropbear/openssh to accept root logins
189#
190ssh_allow_root_login () {
191 for config in sshd_config sshd_config_readonly; do
192 if [ -e ${IMAGE_ROOTFS}${sysconfdir}/ssh/$config ]; then
193 sed -i 's/^[#[:space:]]*PermitRootLogin.*/PermitRootLogin yes/' ${IMAGE_ROOTFS}${sysconfdir}/ssh/$config
194 fi
195 done
196
197 if [ -e ${IMAGE_ROOTFS}${sbindir}/dropbear ] ; then
198 if grep -q DROPBEAR_EXTRA_ARGS ${IMAGE_ROOTFS}${sysconfdir}/default/dropbear 2>/dev/null ; then
199 sed -i '/^DROPBEAR_EXTRA_ARGS=/ s/-w//' ${IMAGE_ROOTFS}${sysconfdir}/default/dropbear
200 fi
201 fi
202}
203
204python sort_passwd () {
205 import rootfspostcommands
206 rootfspostcommands.sort_passwd(d.expand('${IMAGE_ROOTFS}${sysconfdir}'))
207}
208
209#
210# Enable postinst logging
211#
212postinst_enable_logging () {
213 mkdir -p ${IMAGE_ROOTFS}${sysconfdir}/default
214 echo "POSTINST_LOGGING=1" >> ${IMAGE_ROOTFS}${sysconfdir}/default/postinst
215 echo "LOGFILE=${POSTINST_LOGFILE}" >> ${IMAGE_ROOTFS}${sysconfdir}/default/postinst
216}
217
218#
219# Modify systemd default target
220#
221set_systemd_default_target () {
222 if [ -d ${IMAGE_ROOTFS}${sysconfdir}/systemd/system -a -e ${IMAGE_ROOTFS}${systemd_system_unitdir}/${SYSTEMD_DEFAULT_TARGET} ]; then
223 ln -sf ${systemd_system_unitdir}/${SYSTEMD_DEFAULT_TARGET} ${IMAGE_ROOTFS}${sysconfdir}/systemd/system/default.target
224 fi
225}
226
227# If /var/volatile is not empty, we have seen problems where programs such as the
228# journal make assumptions based on the contents of /var/volatile. The journal
229# would then write to /var/volatile before it was mounted, thus hiding the
230# items previously written.
231#
232# This change is to attempt to fix those types of issues in a way that doesn't
233# affect users that may not be using /var/volatile.
234empty_var_volatile () {
235 if [ -e ${IMAGE_ROOTFS}/etc/fstab ]; then
236 match=`awk '$1 !~ "#" && $2 ~ /\/var\/volatile/{print $2}' ${IMAGE_ROOTFS}/etc/fstab 2> /dev/null`
237 if [ -n "$match" ]; then
238 find ${IMAGE_ROOTFS}/var/volatile -mindepth 1 -delete
239 fi
240 fi
241}
242
243# Turn any symbolic /sbin/init link into a file
244remove_init_link () {
245 if [ -h ${IMAGE_ROOTFS}/sbin/init ]; then
246 LINKFILE=${IMAGE_ROOTFS}`readlink ${IMAGE_ROOTFS}/sbin/init`
247 rm ${IMAGE_ROOTFS}/sbin/init
248 cp $LINKFILE ${IMAGE_ROOTFS}/sbin/init
249 fi
250}
251
252make_zimage_symlink_relative () {
253 if [ -L ${IMAGE_ROOTFS}/boot/zImage ]; then
254 (cd ${IMAGE_ROOTFS}/boot/ && for i in `ls zImage-* | sort`; do ln -sf $i zImage; done)
255 fi
256}
257
258python write_image_manifest () {
259 from oe.rootfs import image_list_installed_packages
260 from oe.utils import format_pkg_list
261
262 deploy_dir = d.getVar('IMGDEPLOYDIR')
263 link_name = d.getVar('IMAGE_LINK_NAME')
264 manifest_name = d.getVar('IMAGE_MANIFEST')
265
266 if not manifest_name:
267 return
268
269 pkgs = image_list_installed_packages(d)
270 with open(manifest_name, 'w+') as image_manifest:
271 image_manifest.write(format_pkg_list(pkgs, "ver"))
272
273 if os.path.exists(manifest_name) and link_name:
274 manifest_link = deploy_dir + "/" + link_name + ".manifest"
275 if manifest_link != manifest_name:
276 if os.path.lexists(manifest_link):
277 os.remove(manifest_link)
278 os.symlink(os.path.basename(manifest_name), manifest_link)
279}
280
281# Can be used to create /etc/timestamp during image construction to give a reasonably
282# sane default time setting
283rootfs_update_timestamp () {
284 if [ "${REPRODUCIBLE_TIMESTAMP_ROOTFS}" != "" ]; then
285 # Convert UTC into %4Y%2m%2d%2H%2M%2S
286 sformatted=`date -u -d @${REPRODUCIBLE_TIMESTAMP_ROOTFS} +%4Y%2m%2d%2H%2M%2S`
287 else
288 sformatted=`date -u +%4Y%2m%2d%2H%2M%2S`
289 fi
290 echo $sformatted > ${IMAGE_ROOTFS}/etc/timestamp
291 bbnote "rootfs_update_timestamp: set /etc/timestamp to $sformatted"
292}
293
294# Prevent X from being started
295rootfs_no_x_startup () {
296 if [ -f ${IMAGE_ROOTFS}/etc/init.d/xserver-nodm ]; then
297 chmod a-x ${IMAGE_ROOTFS}/etc/init.d/xserver-nodm
298 fi
299}
300
301rootfs_trim_schemas () {
302 for schema in ${IMAGE_ROOTFS}/etc/gconf/schemas/*.schemas
303 do
304 # Need this in case no files exist
305 if [ -e $schema ]; then
306 oe-trim-schemas $schema > $schema.new
307 mv $schema.new $schema
308 fi
309 done
310}
311
312rootfs_check_host_user_contaminated () {
313 contaminated="${S}/host-user-contaminated.txt"
314 HOST_USER_UID="$(PSEUDO_UNLOAD=1 id -u)"
315 HOST_USER_GID="$(PSEUDO_UNLOAD=1 id -g)"
316
317 find "${IMAGE_ROOTFS}" -path "${IMAGE_ROOTFS}/home" -prune -o \
318 -user "$HOST_USER_UID" -print -o -group "$HOST_USER_GID" -print >"$contaminated"
319
320 sed -e "s,${IMAGE_ROOTFS},," $contaminated | while read line; do
321 bbwarn "Path in the rootfs is owned by the same user or group as the user running bitbake:" $line `ls -lan ${IMAGE_ROOTFS}/$line`
322 done
323
324 if [ -s "$contaminated" ]; then
325 bbwarn "/etc/passwd:" `cat ${IMAGE_ROOTFS}/etc/passwd`
326 bbwarn "/etc/group:" `cat ${IMAGE_ROOTFS}/etc/group`
327 fi
328}
329
330# Make any absolute links in a sysroot relative
331rootfs_sysroot_relativelinks () {
332 sysroot-relativelinks.py ${SDK_OUTPUT}/${SDKTARGETSYSROOT}
333}
334
335# Generated test data json file
336python write_image_test_data() {
337 from oe.data import export2json
338
339 deploy_dir = d.getVar('IMGDEPLOYDIR')
340 link_name = d.getVar('IMAGE_LINK_NAME')
341 testdata_name = os.path.join(deploy_dir, "%s.testdata.json" % d.getVar('IMAGE_NAME'))
342
343 searchString = "%s/"%(d.getVar("TOPDIR")).replace("//","/")
344 export2json(d, testdata_name, searchString=searchString, replaceString="")
345
346 if os.path.exists(testdata_name) and link_name:
347 testdata_link = os.path.join(deploy_dir, "%s.testdata.json" % link_name)
348 if testdata_link != testdata_name:
349 if os.path.lexists(testdata_link):
350 os.remove(testdata_link)
351 os.symlink(os.path.basename(testdata_name), testdata_link)
352}
353write_image_test_data[vardepsexclude] += "TOPDIR"
354
355# Check for unsatisfied recommendations (RRECOMMENDS)
356python rootfs_log_check_recommends() {
357 log_path = d.expand("${T}/log.do_rootfs")
358 with open(log_path, 'r') as log:
359 for line in log:
360 if 'log_check' in line:
361 continue
362
363 if 'unsatisfied recommendation for' in line:
364 bb.warn('[log_check] %s: %s' % (d.getVar('PN'), line))
365}
366
367# Perform any additional adjustments needed to make rootf binary reproducible
368rootfs_reproducible () {
369 if [ "${REPRODUCIBLE_TIMESTAMP_ROOTFS}" != "" ]; then
370 # Convert UTC into %4Y%2m%2d%2H%2M%2S
371 sformatted=`date -u -d @${REPRODUCIBLE_TIMESTAMP_ROOTFS} +%4Y%2m%2d%2H%2M%2S`
372 echo $sformatted > ${IMAGE_ROOTFS}/etc/version
373 bbnote "rootfs_reproducible: set /etc/version to $sformatted"
374
375 if [ -d ${IMAGE_ROOTFS}${sysconfdir}/gconf ]; then
376 find ${IMAGE_ROOTFS}${sysconfdir}/gconf -name '%gconf.xml' -print0 | xargs -0r \
377 sed -i -e 's@\bmtime="[0-9][0-9]*"@mtime="'${REPRODUCIBLE_TIMESTAMP_ROOTFS}'"@g'
378 fi
379 fi
380}
381
382# Perform a dumb check for unit existence, not its validity
383python overlayfs_qa_check() {
384 from oe.overlayfs import mountUnitName
385
386 overlayMountPoints = d.getVarFlags("OVERLAYFS_MOUNT_POINT") or {}
387 imagepath = d.getVar("IMAGE_ROOTFS")
388 sysconfdir = d.getVar("sysconfdir")
389 searchpaths = [oe.path.join(imagepath, sysconfdir, "systemd", "system"),
390 oe.path.join(imagepath, d.getVar("systemd_system_unitdir"))]
391 fstabpath = oe.path.join(imagepath, sysconfdir, "fstab")
392
393 if not any(os.path.exists(path) for path in [*searchpaths, fstabpath]):
394 return
395
396 fstabDevices = []
397 if os.path.isfile(fstabpath):
398 with open(fstabpath, 'r') as f:
399 for line in f:
400 if line[0] == '#':
401 continue
402 path = line.split(maxsplit=2)
403 if len(path) > 2:
404 fstabDevices.append(path[1])
405
406 allUnitExist = True;
407 for mountPoint in overlayMountPoints:
408 qaSkip = (d.getVarFlag("OVERLAYFS_QA_SKIP", mountPoint) or "").split()
409 if "mount-configured" in qaSkip:
410 continue
411
412 mountPath = d.getVarFlag('OVERLAYFS_MOUNT_POINT', mountPoint)
413 if mountPath in fstabDevices:
414 continue
415
416 mountUnit = mountUnitName(mountPath)
417 if any(os.path.isfile(oe.path.join(dirpath, mountUnit))
418 for dirpath in searchpaths):
419 continue
420
421 bb.warn(f'Mount path {mountPath} not found in fstab and unit '
422 f'{mountUnit} not found in systemd unit directories.')
423 bb.warn(f'Skip this check by setting OVERLAYFS_QA_SKIP[{mountPoint}] = '
424 '"mount-configured"')
425 allUnitExist = False;
426
427 if not allUnitExist:
428 bb.fatal('Not all mount paths and units are installed in the image')
429}
430
431python overlayfs_postprocess() {
432 import shutil
433
434 # install helper script
435 helperScriptName = "overlayfs-create-dirs.sh"
436 helperScriptSource = oe.path.join(d.getVar("COREBASE"), "meta/files", helperScriptName)
437 helperScriptDest = oe.path.join(d.getVar("IMAGE_ROOTFS"), "/usr/sbin/", helperScriptName)
438 shutil.copyfile(helperScriptSource, helperScriptDest)
439 os.chmod(helperScriptDest, 0o755)
440}
diff --git a/meta/classes/rootfs_deb.bbclass b/meta/classes/rootfs_deb.bbclass
deleted file mode 100644
index c5c6426abb..0000000000
--- a/meta/classes/rootfs_deb.bbclass
+++ /dev/null
@@ -1,41 +0,0 @@
1#
2# Copyright 2006-2007 Openedhand Ltd.
3#
4# SPDX-License-Identifier: MIT
5#
6
7ROOTFS_PKGMANAGE = "dpkg apt"
8
9do_rootfs[depends] += "dpkg-native:do_populate_sysroot apt-native:do_populate_sysroot"
10do_populate_sdk[depends] += "dpkg-native:do_populate_sysroot apt-native:do_populate_sysroot bzip2-native:do_populate_sysroot"
11do_rootfs[recrdeptask] += "do_package_write_deb do_package_qa"
12do_rootfs[vardeps] += "PACKAGE_FEED_URIS PACKAGE_FEED_BASE_PATHS PACKAGE_FEED_ARCHS"
13
14do_rootfs[lockfiles] += "${DEPLOY_DIR_DEB}/deb.lock"
15do_populate_sdk[lockfiles] += "${DEPLOY_DIR_DEB}/deb.lock"
16do_populate_sdk_ext[lockfiles] += "${DEPLOY_DIR_DEB}/deb.lock"
17
18python rootfs_deb_bad_recommendations() {
19 if d.getVar("BAD_RECOMMENDATIONS"):
20 bb.warn("Debian package install does not support BAD_RECOMMENDATIONS")
21}
22do_rootfs[prefuncs] += "rootfs_deb_bad_recommendations"
23
24DEB_POSTPROCESS_COMMANDS = ""
25
26opkglibdir = "${localstatedir}/lib/opkg"
27
28python () {
29 # Map TARGET_ARCH to Debian's ideas about architectures
30 darch = d.getVar('SDK_ARCH')
31 if darch in ["x86", "i486", "i586", "i686", "pentium"]:
32 d.setVar('DEB_SDK_ARCH', 'i386')
33 elif darch == "x86_64":
34 d.setVar('DEB_SDK_ARCH', 'amd64')
35 elif darch == "arm":
36 d.setVar('DEB_SDK_ARCH', 'armel')
37 elif darch == "aarch64":
38 d.setVar('DEB_SDK_ARCH', 'arm64')
39 else:
40 bb.fatal("Unhandled SDK_ARCH %s" % darch)
41}
diff --git a/meta/classes/rootfs_ipk.bbclass b/meta/classes/rootfs_ipk.bbclass
deleted file mode 100644
index a48ad07dfc..0000000000
--- a/meta/classes/rootfs_ipk.bbclass
+++ /dev/null
@@ -1,44 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7#
8# Creates a root filesystem out of IPKs
9#
10# This rootfs can be mounted via root-nfs or it can be put into an cramfs/jffs etc.
11# See image.bbclass for a usage of this.
12#
13
14EXTRAOPKGCONFIG ?= ""
15ROOTFS_PKGMANAGE = "opkg ${EXTRAOPKGCONFIG}"
16
17do_rootfs[depends] += "opkg-native:do_populate_sysroot opkg-utils-native:do_populate_sysroot"
18do_populate_sdk[depends] += "opkg-native:do_populate_sysroot opkg-utils-native:do_populate_sysroot"
19do_rootfs[recrdeptask] += "do_package_write_ipk do_package_qa"
20do_rootfs[vardeps] += "PACKAGE_FEED_URIS PACKAGE_FEED_BASE_PATHS PACKAGE_FEED_ARCHS"
21
22do_rootfs[lockfiles] += "${WORKDIR}/ipk.lock"
23do_populate_sdk[lockfiles] += "${WORKDIR}/sdk-ipk.lock"
24do_populate_sdk_ext[lockfiles] += "${WORKDIR}/sdk-ipk.lock"
25
26OPKG_PREPROCESS_COMMANDS = ""
27
28OPKG_POSTPROCESS_COMMANDS = ""
29
30OPKGLIBDIR ??= "${localstatedir}/lib"
31
32MULTILIBRE_ALLOW_REP = "${OPKGLIBDIR}/opkg|/usr/lib/opkg"
33
34python () {
35
36 if d.getVar('BUILD_IMAGES_FROM_FEEDS'):
37 flags = d.getVarFlag('do_rootfs', 'recrdeptask')
38 flags = flags.replace("do_package_write_ipk", "")
39 flags = flags.replace("do_deploy", "")
40 flags = flags.replace("do_populate_sysroot", "")
41 d.setVarFlag('do_rootfs', 'recrdeptask', flags)
42 d.setVar('OPKG_PREPROCESS_COMMANDS', "")
43 d.setVar('OPKG_POSTPROCESS_COMMANDS', '')
44}
diff --git a/meta/classes/rootfs_rpm.bbclass b/meta/classes/rootfs_rpm.bbclass
deleted file mode 100644
index 6eccd5a959..0000000000
--- a/meta/classes/rootfs_rpm.bbclass
+++ /dev/null
@@ -1,45 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7#
8# Creates a root filesystem out of rpm packages
9#
10
11ROOTFS_PKGMANAGE = "rpm dnf"
12
13# dnf is using our custom sysconfig module, and so will fail without these
14export STAGING_INCDIR
15export STAGING_LIBDIR
16
17# Add 100Meg of extra space for dnf
18IMAGE_ROOTFS_EXTRA_SPACE:append = "${@bb.utils.contains("PACKAGE_INSTALL", "dnf", " + 102400", "", d)}"
19
20# Dnf is python based, so be sure python3-native is available to us.
21EXTRANATIVEPATH += "python3-native"
22
23# opkg is needed for update-alternatives
24RPMROOTFSDEPENDS = "rpm-native:do_populate_sysroot \
25 dnf-native:do_populate_sysroot \
26 createrepo-c-native:do_populate_sysroot \
27 opkg-native:do_populate_sysroot"
28
29do_rootfs[depends] += "${RPMROOTFSDEPENDS}"
30do_populate_sdk[depends] += "${RPMROOTFSDEPENDS}"
31
32do_rootfs[recrdeptask] += "do_package_write_rpm do_package_qa"
33do_rootfs[vardeps] += "PACKAGE_FEED_URIS PACKAGE_FEED_BASE_PATHS PACKAGE_FEED_ARCHS"
34
35python () {
36 if d.getVar('BUILD_IMAGES_FROM_FEEDS'):
37 flags = d.getVarFlag('do_rootfs', 'recrdeptask')
38 flags = flags.replace("do_package_write_rpm", "")
39 flags = flags.replace("do_deploy", "")
40 flags = flags.replace("do_populate_sysroot", "")
41 d.setVarFlag('do_rootfs', 'recrdeptask', flags)
42 d.setVar('RPM_PREPROCESS_COMMANDS', '')
43 d.setVar('RPM_POSTPROCESS_COMMANDS', '')
44
45}
diff --git a/meta/classes/rootfsdebugfiles.bbclass b/meta/classes/rootfsdebugfiles.bbclass
deleted file mode 100644
index cbcf876479..0000000000
--- a/meta/classes/rootfsdebugfiles.bbclass
+++ /dev/null
@@ -1,47 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# This class installs additional files found on the build host
8# directly into the rootfs.
9#
10# One use case is to install a constant ssh host key in
11# an image that gets created for just one machine. This
12# solves two issues:
13# - host key generation on the device can stall when the
14# kernel has not gathered enough entropy yet (seen in practice
15# under qemu)
16# - ssh complains by default when the host key changes
17#
18# For dropbear, with the ssh host key store along side the local.conf:
19# 1. Extend local.conf:
20# INHERIT += "rootfsdebugfiles"
21# ROOTFS_DEBUG_FILES += "${TOPDIR}/conf/dropbear_rsa_host_key ${IMAGE_ROOTFS}/etc/dropbear/dropbear_rsa_host_key ;"
22# 2. Boot the image once, copy the dropbear_rsa_host_key from
23# the device into your build conf directory.
24# 3. A optional parameter can be used to set file mode
25# of the copied target, for instance:
26# ROOTFS_DEBUG_FILES += "${TOPDIR}/conf/dropbear_rsa_host_key ${IMAGE_ROOTFS}/etc/dropbear/dropbear_rsa_host_key 0600;"
27# in case they might be required to have a specific mode. (Shoundn't be too open, for example)
28#
29# Do not use for production images! It bypasses several
30# core build mechanisms (updating the image when one
31# of the files changes, license tracking in the image
32# manifest, ...).
33
34ROOTFS_DEBUG_FILES ?= ""
35ROOTFS_DEBUG_FILES[doc] = "Lists additional files or directories to be installed with 'cp -a' in the format 'source1 target1;source2 target2;...'"
36
37ROOTFS_POSTPROCESS_COMMAND += "rootfs_debug_files;"
38rootfs_debug_files () {
39 #!/bin/sh -e
40 echo "${ROOTFS_DEBUG_FILES}" | sed -e 's/;/\n/g' | while read source target mode; do
41 if [ -e "$source" ]; then
42 mkdir -p $(dirname $target)
43 cp -a $source $target
44 [ -n "$mode" ] && chmod $mode $target
45 fi
46 done
47}
diff --git a/meta/classes/rust-bin.bbclass b/meta/classes/rust-bin.bbclass
deleted file mode 100644
index b8e7ef8191..0000000000
--- a/meta/classes/rust-bin.bbclass
+++ /dev/null
@@ -1,154 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7inherit rust
8
9RDEPENDS:${PN}:append:class-target = " ${RUSTLIB_DEP}"
10
11RUSTC_ARCHFLAGS += "-C opt-level=3 -g -L ${STAGING_DIR_HOST}/${rustlibdir} -C linker=${RUST_TARGET_CCLD}"
12EXTRA_OEMAKE += 'RUSTC_ARCHFLAGS="${RUSTC_ARCHFLAGS}"'
13
14# Some libraries alias with the standard library but libstd is configured to
15# make it difficult or imposisble to use its version. Unfortunately libstd
16# must be explicitly overridden using extern.
17OVERLAP_LIBS = "\
18 libc \
19 log \
20 getopts \
21 rand \
22"
23def get_overlap_deps(d):
24 deps = d.getVar("DEPENDS").split()
25 overlap_deps = []
26 for o in d.getVar("OVERLAP_LIBS").split():
27 l = len([o for dep in deps if (o + '-rs' in dep)])
28 if l > 0:
29 overlap_deps.append(o)
30 return " ".join(overlap_deps)
31OVERLAP_DEPS = "${@get_overlap_deps(d)}"
32
33# Prevents multiple static copies of standard library modules
34# See https://github.com/rust-lang/rust/issues/19680
35RUSTC_PREFER_DYNAMIC = "-C prefer-dynamic"
36RUSTC_FLAGS += "${RUSTC_PREFER_DYNAMIC}"
37
38CRATE_NAME ?= "${@d.getVar('BPN').replace('-rs', '').replace('-', '_')}"
39BINNAME ?= "${BPN}"
40LIBNAME ?= "lib${CRATE_NAME}-rs"
41CRATE_TYPE ?= "dylib"
42BIN_SRC ?= "${S}/src/main.rs"
43LIB_SRC ?= "${S}/src/lib.rs"
44
45rustbindest ?= "${bindir}"
46rustlibdest ?= "${rustlibdir}"
47RUST_RPATH_ABS ?= "${rustlibdir}:${rustlib}"
48
49def relative_rpaths(paths, base):
50 relpaths = set()
51 for p in paths.split(':'):
52 if p == base:
53 relpaths.add('$ORIGIN')
54 continue
55 relpaths.add(os.path.join('$ORIGIN', os.path.relpath(p, base)))
56 return '-rpath=' + ':'.join(relpaths) if len(relpaths) else ''
57
58RUST_LIB_RPATH_FLAGS ?= "${@relative_rpaths(d.getVar('RUST_RPATH_ABS', True), d.getVar('rustlibdest', True))}"
59RUST_BIN_RPATH_FLAGS ?= "${@relative_rpaths(d.getVar('RUST_RPATH_ABS', True), d.getVar('rustbindest', True))}"
60
61def libfilename(d):
62 if d.getVar('CRATE_TYPE', True) == 'dylib':
63 return d.getVar('LIBNAME', True) + '.so'
64 else:
65 return d.getVar('LIBNAME', True) + '.rlib'
66
67def link_args(d, bin):
68 linkargs = []
69 if bin:
70 rpaths = d.getVar('RUST_BIN_RPATH_FLAGS', False)
71 else:
72 rpaths = d.getVar('RUST_LIB_RPATH_FLAGS', False)
73 if d.getVar('CRATE_TYPE', True) == 'dylib':
74 linkargs.append('-soname')
75 linkargs.append(libfilename(d))
76 if len(rpaths):
77 linkargs.append(rpaths)
78 if len(linkargs):
79 return ' '.join(['-Wl,' + arg for arg in linkargs])
80 else:
81 return ''
82
83get_overlap_externs () {
84 externs=
85 for dep in ${OVERLAP_DEPS}; do
86 extern=$(ls ${STAGING_DIR_HOST}/${rustlibdir}/lib$dep-rs.{so,rlib} 2>/dev/null \
87 | awk '{print $1}');
88 if [ -n "$extern" ]; then
89 externs="$externs --extern $dep=$extern"
90 else
91 echo "$dep in depends but no such library found in ${rustlibdir}!" >&2
92 exit 1
93 fi
94 done
95 echo "$externs"
96}
97
98do_configure () {
99}
100
101oe_runrustc () {
102 bbnote ${RUSTC} ${RUSTC_ARCHFLAGS} ${RUSTC_FLAGS} "$@"
103 "${RUSTC}" ${RUSTC_ARCHFLAGS} ${RUSTC_FLAGS} "$@"
104}
105
106oe_compile_rust_lib () {
107 rm -rf ${LIBNAME}.{rlib,so}
108 local -a link_args
109 if [ -n '${@link_args(d, False)}' ]; then
110 link_args[0]='-C'
111 link_args[1]='link-args=${@link_args(d, False)}'
112 fi
113 oe_runrustc $(get_overlap_externs) \
114 "${link_args[@]}" \
115 ${LIB_SRC} \
116 -o ${@libfilename(d)} \
117 --crate-name=${CRATE_NAME} --crate-type=${CRATE_TYPE} \
118 "$@"
119}
120oe_compile_rust_lib[vardeps] += "get_overlap_externs"
121
122oe_compile_rust_bin () {
123 rm -rf ${BINNAME}
124 local -a link_args
125 if [ -n '${@link_args(d, True)}' ]; then
126 link_args[0]='-C'
127 link_args[1]='link-args=${@link_args(d, True)}'
128 fi
129 oe_runrustc $(get_overlap_externs) \
130 "${link_args[@]}" \
131 ${BIN_SRC} -o ${BINNAME} "$@"
132}
133oe_compile_rust_bin[vardeps] += "get_overlap_externs"
134
135oe_install_rust_lib () {
136 for lib in $(ls ${LIBNAME}.{so,rlib} 2>/dev/null); do
137 echo Installing $lib
138 install -D -m 755 $lib ${D}/${rustlibdest}/$lib
139 done
140}
141
142oe_install_rust_bin () {
143 echo Installing ${BINNAME}
144 install -D -m 755 ${BINNAME} ${D}/${rustbindest}/${BINNAME}
145}
146
147do_rust_bin_fixups() {
148 for f in `find ${PKGD} -name '*.so*'`; do
149 echo "Strip rust note: $f"
150 ${OBJCOPY} -R .note.rustc $f $f
151 done
152}
153PACKAGE_PREPROCESS_FUNCS += "do_rust_bin_fixups"
154
diff --git a/meta/classes/rust-common.bbclass b/meta/classes/rust-common.bbclass
deleted file mode 100644
index 93bf6c8be6..0000000000
--- a/meta/classes/rust-common.bbclass
+++ /dev/null
@@ -1,177 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7inherit python3native
8inherit rust-target-config
9
10# Common variables used by all Rust builds
11export rustlibdir = "${libdir}/rustlib/${RUST_HOST_SYS}/lib"
12FILES:${PN} += "${rustlibdir}/*.so"
13FILES:${PN}-dev += "${rustlibdir}/*.rlib ${rustlibdir}/*.rmeta"
14FILES:${PN}-dbg += "${rustlibdir}/.debug"
15
16RUSTLIB = "-L ${STAGING_DIR_HOST}${rustlibdir}"
17RUST_DEBUG_REMAP = "--remap-path-prefix=${WORKDIR}=/usr/src/debug/${PN}/${EXTENDPE}${PV}-${PR}"
18RUSTFLAGS += "${RUSTLIB} ${RUST_DEBUG_REMAP}"
19RUSTLIB_DEP ?= "libstd-rs"
20RUST_PANIC_STRATEGY ?= "unwind"
21
22def target_is_armv7(d):
23 '''Determine if target is armv7'''
24 # TUNE_FEATURES may include arm* even if the target is not arm
25 # in the case of *-native packages
26 if d.getVar('TARGET_ARCH') != 'arm':
27 return False
28
29 feat = d.getVar('TUNE_FEATURES')
30 feat = frozenset(feat.split())
31 mach_overrides = d.getVar('MACHINEOVERRIDES')
32 mach_overrides = frozenset(mach_overrides.split(':'))
33
34 v7=frozenset(['armv7a', 'armv7r', 'armv7m', 'armv7ve'])
35 if mach_overrides.isdisjoint(v7) and feat.isdisjoint(v7):
36 return False
37 else:
38 return True
39target_is_armv7[vardepvalue] = "${@target_is_armv7(d)}"
40
41# Responsible for taking Yocto triples and converting it to Rust triples
42def rust_base_triple(d, thing):
43 '''
44 Mangle bitbake's *_SYS into something that rust might support (see
45 rust/mk/cfg/* for a list)
46
47 Note that os is assumed to be some linux form
48 '''
49
50 # The llvm-target for armv7 is armv7-unknown-linux-gnueabihf
51 if d.getVar('{}_ARCH'.format(thing)) == d.getVar('TARGET_ARCH') and target_is_armv7(d):
52 arch = "armv7"
53 else:
54 arch = oe.rust.arch_to_rust_arch(d.getVar('{}_ARCH'.format(thing)))
55
56 # When bootstrapping rust-native, BUILD must be the same as upstream snapshot tarballs
57 bpn = d.getVar('BPN')
58 if thing == "BUILD" and bpn in ["rust"]:
59 return arch + "-unknown-linux-gnu"
60
61 vendor = d.getVar('{}_VENDOR'.format(thing))
62
63 # Default to glibc
64 libc = "-gnu"
65 os = d.getVar('{}_OS'.format(thing))
66 # This catches ARM targets and appends the necessary hard float bits
67 if os == "linux-gnueabi" or os == "linux-musleabi":
68 libc = bb.utils.contains('TUNE_FEATURES', 'callconvention-hard', 'hf', '', d)
69 elif "musl" in os:
70 libc = "-musl"
71 os = "linux"
72
73 return arch + vendor + '-' + os + libc
74
75
76# In some cases uname and the toolchain differ on their idea of the arch name
77RUST_BUILD_ARCH = "${@oe.rust.arch_to_rust_arch(d.getVar('BUILD_ARCH'))}"
78
79# Naming explanation
80# Yocto
81# - BUILD_SYS - Yocto triple of the build environment
82# - HOST_SYS - What we're building for in Yocto
83# - TARGET_SYS - What we're building for in Yocto
84#
85# So when building '-native' packages BUILD_SYS == HOST_SYS == TARGET_SYS
86# When building packages for the image HOST_SYS == TARGET_SYS
87# This is a gross over simplification as there are other modes but
88# currently this is all that's supported.
89#
90# Rust
91# - TARGET - the system where the binary will run
92# - HOST - the system where the binary is being built
93#
94# Rust additionally will use two additional cases:
95# - undecorated (e.g. CC) - equivalent to TARGET
96# - triple suffix (e.g. CC:x86_64_unknown_linux_gnu) - both
97# see: https://github.com/alexcrichton/gcc-rs
98# The way that Rust's internal triples and Yocto triples are mapped together
99# its likely best to not use the triple suffix due to potential confusion.
100
101RUST_BUILD_SYS = "${@rust_base_triple(d, 'BUILD')}"
102RUST_BUILD_SYS[vardepvalue] = "${RUST_BUILD_SYS}"
103RUST_HOST_SYS = "${@rust_base_triple(d, 'HOST')}"
104RUST_HOST_SYS[vardepvalue] = "${RUST_HOST_SYS}"
105RUST_TARGET_SYS = "${@rust_base_triple(d, 'TARGET')}"
106RUST_TARGET_SYS[vardepvalue] = "${RUST_TARGET_SYS}"
107
108# wrappers to get around the fact that Rust needs a single
109# binary but Yocto's compiler and linker commands have
110# arguments. Technically the archiver is always one command but
111# this is necessary for builds that determine the prefix and then
112# use those commands based on the prefix.
113WRAPPER_DIR = "${WORKDIR}/wrapper"
114RUST_BUILD_CC = "${WRAPPER_DIR}/build-rust-cc"
115RUST_BUILD_CXX = "${WRAPPER_DIR}/build-rust-cxx"
116RUST_BUILD_CCLD = "${WRAPPER_DIR}/build-rust-ccld"
117RUST_BUILD_AR = "${WRAPPER_DIR}/build-rust-ar"
118RUST_TARGET_CC = "${WRAPPER_DIR}/target-rust-cc"
119RUST_TARGET_CXX = "${WRAPPER_DIR}/target-rust-cxx"
120RUST_TARGET_CCLD = "${WRAPPER_DIR}/target-rust-ccld"
121RUST_TARGET_AR = "${WRAPPER_DIR}/target-rust-ar"
122
123create_wrapper_rust () {
124 file="$1"
125 shift
126 extras="$1"
127 shift
128
129 cat <<- EOF > "${file}"
130 #!/usr/bin/env python3
131 import os, sys
132 orig_binary = "$@"
133 extras = "${extras}"
134 binary = orig_binary.split()[0]
135 args = orig_binary.split() + sys.argv[1:]
136 if extras:
137 args.append(extras)
138 os.execvp(binary, args)
139 EOF
140 chmod +x "${file}"
141}
142
143WRAPPER_TARGET_CC = "${CC}"
144WRAPPER_TARGET_CXX = "${CXX}"
145WRAPPER_TARGET_CCLD = "${CCLD}"
146WRAPPER_TARGET_LDFLAGS = "${LDFLAGS}"
147WRAPPER_TARGET_EXTRALD = ""
148WRAPPER_TARGET_AR = "${AR}"
149
150# compiler is used by gcc-rs
151# linker is used by rustc/cargo
152# archiver is used by the build of libstd-rs
153do_rust_create_wrappers () {
154 mkdir -p "${WRAPPER_DIR}"
155
156 # Yocto Build / Rust Host C compiler
157 create_wrapper_rust "${RUST_BUILD_CC}" "" "${BUILD_CC}"
158 # Yocto Build / Rust Host C++ compiler
159 create_wrapper_rust "${RUST_BUILD_CXX}" "" "${BUILD_CXX}"
160 # Yocto Build / Rust Host linker
161 create_wrapper_rust "${RUST_BUILD_CCLD}" "" "${BUILD_CCLD}" "${BUILD_LDFLAGS}"
162 # Yocto Build / Rust Host archiver
163 create_wrapper_rust "${RUST_BUILD_AR}" "" "${BUILD_AR}"
164
165 # Yocto Target / Rust Target C compiler
166 create_wrapper_rust "${RUST_TARGET_CC}" "${WRAPPER_TARGET_EXTRALD}" "${WRAPPER_TARGET_CC}" "${WRAPPER_TARGET_LDFLAGS}"
167 # Yocto Target / Rust Target C++ compiler
168 create_wrapper_rust "${RUST_TARGET_CXX}" "${WRAPPER_TARGET_EXTRALD}" "${WRAPPER_TARGET_CXX}" "${CXXFLAGS}"
169 # Yocto Target / Rust Target linker
170 create_wrapper_rust "${RUST_TARGET_CCLD}" "${WRAPPER_TARGET_EXTRALD}" "${WRAPPER_TARGET_CCLD}" "${WRAPPER_TARGET_LDFLAGS}"
171 # Yocto Target / Rust Target archiver
172 create_wrapper_rust "${RUST_TARGET_AR}" "" "${WRAPPER_TARGET_AR}"
173
174}
175
176addtask rust_create_wrappers before do_configure after do_patch do_prepare_recipe_sysroot
177do_rust_create_wrappers[dirs] += "${WRAPPER_DIR}"
diff --git a/meta/classes/rust-target-config.bbclass b/meta/classes/rust-target-config.bbclass
deleted file mode 100644
index 3405086402..0000000000
--- a/meta/classes/rust-target-config.bbclass
+++ /dev/null
@@ -1,391 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# Right now this is focused on arm-specific tune features.
8# We get away with this for now as one can only use x86-64 as the build host
9# (not arm).
10# Note that TUNE_FEATURES is _always_ refering to the target, so we really
11# don't want to use this for the host/build.
12def llvm_features_from_tune(d):
13 f = []
14 feat = d.getVar('TUNE_FEATURES')
15 if not feat:
16 return []
17 feat = frozenset(feat.split())
18
19 mach_overrides = d.getVar('MACHINEOVERRIDES')
20 mach_overrides = frozenset(mach_overrides.split(':'))
21
22 if 'vfpv4' in feat:
23 f.append("+vfp4")
24 if 'vfpv3' in feat:
25 f.append("+vfp3")
26 if 'vfpv3d16' in feat:
27 f.append("+d16")
28
29 if 'vfpv2' in feat or 'vfp' in feat:
30 f.append("+vfp2")
31
32 if 'neon' in feat:
33 f.append("+neon")
34
35 if 'mips32' in feat:
36 f.append("+mips32")
37
38 if 'mips32r2' in feat:
39 f.append("+mips32r2")
40
41 if target_is_armv7(d):
42 f.append('+v7')
43
44 if ('armv6' in mach_overrides) or ('armv6' in feat):
45 f.append("+v6")
46 if 'armv5te' in feat:
47 f.append("+strict-align")
48 f.append("+v5te")
49 elif 'armv5' in feat:
50 f.append("+strict-align")
51 f.append("+v5")
52
53 if ('armv4' in mach_overrides) or ('armv4' in feat):
54 f.append("+strict-align")
55
56 if 'dsp' in feat:
57 f.append("+dsp")
58
59 if 'thumb' in feat:
60 if d.getVar('ARM_THUMB_OPT') == "thumb":
61 if target_is_armv7(d):
62 f.append('+thumb2')
63 f.append("+thumb-mode")
64
65 if 'cortexa5' in feat:
66 f.append("+a5")
67 if 'cortexa7' in feat:
68 f.append("+a7")
69 if 'cortexa9' in feat:
70 f.append("+a9")
71 if 'cortexa15' in feat:
72 f.append("+a15")
73 if 'cortexa17' in feat:
74 f.append("+a17")
75 if ('riscv64' in feat) or ('riscv32' in feat):
76 f.append("+a,+c,+d,+f,+m")
77 return f
78llvm_features_from_tune[vardepvalue] = "${@llvm_features_from_tune(d)}"
79
80# TARGET_CC_ARCH changes from build/cross/target so it'll do the right thing
81# this should go away when https://github.com/rust-lang/rust/pull/31709 is
82# stable (1.9.0?)
83def llvm_features_from_cc_arch(d):
84 f = []
85 feat = d.getVar('TARGET_CC_ARCH')
86 if not feat:
87 return []
88 feat = frozenset(feat.split())
89
90 if '-mmmx' in feat:
91 f.append("+mmx")
92 if '-msse' in feat:
93 f.append("+sse")
94 if '-msse2' in feat:
95 f.append("+sse2")
96 if '-msse3' in feat:
97 f.append("+sse3")
98 if '-mssse3' in feat:
99 f.append("+ssse3")
100 if '-msse4.1' in feat:
101 f.append("+sse4.1")
102 if '-msse4.2' in feat:
103 f.append("+sse4.2")
104 if '-msse4a' in feat:
105 f.append("+sse4a")
106 if '-mavx' in feat:
107 f.append("+avx")
108 if '-mavx2' in feat:
109 f.append("+avx2")
110
111 return f
112
113def llvm_features_from_target_fpu(d):
114 # TARGET_FPU can be hard or soft. +soft-float tell llvm to use soft float
115 # ABI. There is no option for hard.
116
117 fpu = d.getVar('TARGET_FPU', True)
118 return ["+soft-float"] if fpu == "soft" else []
119
120def llvm_features(d):
121 return ','.join(llvm_features_from_tune(d) +
122 llvm_features_from_cc_arch(d) +
123 llvm_features_from_target_fpu(d))
124
125llvm_features[vardepvalue] = "${@llvm_features(d)}"
126
127## arm-unknown-linux-gnueabihf
128DATA_LAYOUT[arm-eabi] = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
129TARGET_ENDIAN[arm-eabi] = "little"
130TARGET_POINTER_WIDTH[arm-eabi] = "32"
131TARGET_C_INT_WIDTH[arm-eabi] = "32"
132MAX_ATOMIC_WIDTH[arm-eabi] = "64"
133FEATURES[arm-eabi] = "+v6,+vfp2"
134
135## armv7-unknown-linux-gnueabihf
136DATA_LAYOUT[armv7-eabi] = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
137TARGET_ENDIAN[armv7-eabi] = "little"
138TARGET_POINTER_WIDTH[armv7-eabi] = "32"
139TARGET_C_INT_WIDTH[armv7-eabi] = "32"
140MAX_ATOMIC_WIDTH[armv7-eabi] = "64"
141FEATURES[armv7-eabi] = "+v7,+vfp2,+thumb2"
142
143## aarch64-unknown-linux-{gnu, musl}
144DATA_LAYOUT[aarch64] = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
145TARGET_ENDIAN[aarch64] = "little"
146TARGET_POINTER_WIDTH[aarch64] = "64"
147TARGET_C_INT_WIDTH[aarch64] = "32"
148MAX_ATOMIC_WIDTH[aarch64] = "128"
149
150## x86_64-unknown-linux-{gnu, musl}
151DATA_LAYOUT[x86_64] = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
152TARGET_ENDIAN[x86_64] = "little"
153TARGET_POINTER_WIDTH[x86_64] = "64"
154TARGET_C_INT_WIDTH[x86_64] = "32"
155MAX_ATOMIC_WIDTH[x86_64] = "64"
156
157## x86_64-unknown-linux-gnux32
158DATA_LAYOUT[x86_64-x32] = "e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
159TARGET_ENDIAN[x86_64-x32] = "little"
160TARGET_POINTER_WIDTH[x86_64-x32] = "32"
161TARGET_C_INT_WIDTH[x86_64-x32] = "32"
162MAX_ATOMIC_WIDTH[x86_64-x32] = "64"
163
164## i686-unknown-linux-{gnu, musl}
165DATA_LAYOUT[i686] = "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128"
166TARGET_ENDIAN[i686] = "little"
167TARGET_POINTER_WIDTH[i686] = "32"
168TARGET_C_INT_WIDTH[i686] = "32"
169MAX_ATOMIC_WIDTH[i686] = "64"
170
171## XXX: a bit of a hack so qemux86 builds, clone of i686-unknown-linux-{gnu, musl} above
172DATA_LAYOUT[i586] = "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128"
173TARGET_ENDIAN[i586] = "little"
174TARGET_POINTER_WIDTH[i586] = "32"
175TARGET_C_INT_WIDTH[i586] = "32"
176MAX_ATOMIC_WIDTH[i586] = "64"
177
178## mips-unknown-linux-{gnu, musl}
179DATA_LAYOUT[mips] = "E-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32-S64"
180TARGET_ENDIAN[mips] = "big"
181TARGET_POINTER_WIDTH[mips] = "32"
182TARGET_C_INT_WIDTH[mips] = "32"
183MAX_ATOMIC_WIDTH[mips] = "32"
184
185## mipsel-unknown-linux-{gnu, musl}
186DATA_LAYOUT[mipsel] = "e-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32-S64"
187TARGET_ENDIAN[mipsel] = "little"
188TARGET_POINTER_WIDTH[mipsel] = "32"
189TARGET_C_INT_WIDTH[mipsel] = "32"
190MAX_ATOMIC_WIDTH[mipsel] = "32"
191
192## mips64-unknown-linux-{gnu, musl}
193DATA_LAYOUT[mips64] = "E-m:e-i8:8:32-i16:16:32-i64:64-n32:64-S128"
194TARGET_ENDIAN[mips64] = "big"
195TARGET_POINTER_WIDTH[mips64] = "64"
196TARGET_C_INT_WIDTH[mips64] = "64"
197MAX_ATOMIC_WIDTH[mips64] = "64"
198
199## mips64-n32-unknown-linux-{gnu, musl}
200DATA_LAYOUT[mips64-n32] = "E-m:e-p:32:32-i8:8:32-i16:16:32-i64:64-n32:64-S128"
201TARGET_ENDIAN[mips64-n32] = "big"
202TARGET_POINTER_WIDTH[mips64-n32] = "32"
203TARGET_C_INT_WIDTH[mips64-n32] = "32"
204MAX_ATOMIC_WIDTH[mips64-n32] = "64"
205
206## mips64el-unknown-linux-{gnu, musl}
207DATA_LAYOUT[mips64el] = "e-m:e-i8:8:32-i16:16:32-i64:64-n32:64-S128"
208TARGET_ENDIAN[mips64el] = "little"
209TARGET_POINTER_WIDTH[mips64el] = "64"
210TARGET_C_INT_WIDTH[mips64el] = "64"
211MAX_ATOMIC_WIDTH[mips64el] = "64"
212
213## powerpc-unknown-linux-{gnu, musl}
214DATA_LAYOUT[powerpc] = "E-m:e-p:32:32-i64:64-n32"
215TARGET_ENDIAN[powerpc] = "big"
216TARGET_POINTER_WIDTH[powerpc] = "32"
217TARGET_C_INT_WIDTH[powerpc] = "32"
218MAX_ATOMIC_WIDTH[powerpc] = "32"
219
220## powerpc64-unknown-linux-{gnu, musl}
221DATA_LAYOUT[powerpc64] = "E-m:e-i64:64-n32:64-S128-v256:256:256-v512:512:512"
222TARGET_ENDIAN[powerpc64] = "big"
223TARGET_POINTER_WIDTH[powerpc64] = "64"
224TARGET_C_INT_WIDTH[powerpc64] = "64"
225MAX_ATOMIC_WIDTH[powerpc64] = "64"
226
227## powerpc64le-unknown-linux-{gnu, musl}
228DATA_LAYOUT[powerpc64le] = "e-m:e-i64:64-n32:64-v256:256:256-v512:512:512"
229TARGET_ENDIAN[powerpc64le] = "little"
230TARGET_POINTER_WIDTH[powerpc64le] = "64"
231TARGET_C_INT_WIDTH[powerpc64le] = "64"
232MAX_ATOMIC_WIDTH[powerpc64le] = "64"
233
234## riscv32-unknown-linux-{gnu, musl}
235DATA_LAYOUT[riscv32] = "e-m:e-p:32:32-i64:64-n32-S128"
236TARGET_ENDIAN[riscv32] = "little"
237TARGET_POINTER_WIDTH[riscv32] = "32"
238TARGET_C_INT_WIDTH[riscv32] = "32"
239MAX_ATOMIC_WIDTH[riscv32] = "32"
240
241## riscv64-unknown-linux-{gnu, musl}
242DATA_LAYOUT[riscv64] = "e-m:e-p:64:64-i64:64-i128:128-n64-S128"
243TARGET_ENDIAN[riscv64] = "little"
244TARGET_POINTER_WIDTH[riscv64] = "64"
245TARGET_C_INT_WIDTH[riscv64] = "64"
246MAX_ATOMIC_WIDTH[riscv64] = "64"
247
248# Convert a normal arch (HOST_ARCH, TARGET_ARCH, BUILD_ARCH, etc) to something
249# rust's internals won't choke on.
250def arch_to_rust_target_arch(arch):
251 if arch == "i586" or arch == "i686":
252 return "x86"
253 elif arch == "mipsel":
254 return "mips"
255 elif arch == "mip64sel":
256 return "mips64"
257 elif arch == "armv7":
258 return "arm"
259 elif arch == "powerpc64le":
260 return "powerpc64"
261 else:
262 return arch
263
264# generates our target CPU value
265def llvm_cpu(d):
266 cpu = d.getVar('PACKAGE_ARCH')
267 target = d.getVar('TRANSLATED_TARGET_ARCH')
268
269 trans = {}
270 trans['corei7-64'] = "corei7"
271 trans['core2-32'] = "core2"
272 trans['x86-64'] = "x86-64"
273 trans['i686'] = "i686"
274 trans['i586'] = "i586"
275 trans['powerpc'] = "powerpc"
276 trans['mips64'] = "mips64"
277 trans['mips64el'] = "mips64"
278 trans['riscv64'] = "generic-rv64"
279 trans['riscv32'] = "generic-rv32"
280
281 if target in ["mips", "mipsel"]:
282 feat = frozenset(d.getVar('TUNE_FEATURES').split())
283 if "mips32r2" in feat:
284 trans['mipsel'] = "mips32r2"
285 trans['mips'] = "mips32r2"
286 elif "mips32" in feat:
287 trans['mipsel'] = "mips32"
288 trans['mips'] = "mips32"
289
290 try:
291 return trans[cpu]
292 except:
293 return trans.get(target, "generic")
294
295llvm_cpu[vardepvalue] = "${@llvm_cpu(d)}"
296
297def rust_gen_target(d, thing, wd, arch):
298 import json
299
300 build_sys = d.getVar('BUILD_SYS')
301 target_sys = d.getVar('TARGET_SYS')
302
303 sys = d.getVar('{}_SYS'.format(thing))
304 prefix = d.getVar('{}_PREFIX'.format(thing))
305 rustsys = d.getVar('RUST_{}_SYS'.format(thing))
306
307 abi = None
308 cpu = "generic"
309 features = ""
310
311 # Need to apply the target tuning consitently, only if the triplet applies to the target
312 # and not in the native case
313 if sys == target_sys and sys != build_sys:
314 abi = d.getVar('ABIEXTENSION')
315 cpu = llvm_cpu(d)
316 if bb.data.inherits_class('native', d):
317 features = ','.join(llvm_features_from_cc_arch(d))
318 else:
319 features = llvm_features(d) or ""
320 # arm and armv7 have different targets in llvm
321 if arch == "arm" and target_is_armv7(d):
322 arch = 'armv7'
323
324 rust_arch = oe.rust.arch_to_rust_arch(arch)
325
326 if abi:
327 arch_abi = "{}-{}".format(rust_arch, abi)
328 else:
329 arch_abi = rust_arch
330
331 features = features or d.getVarFlag('FEATURES', arch_abi) or ""
332 features = features.strip()
333
334 # build tspec
335 tspec = {}
336 tspec['llvm-target'] = rustsys
337 tspec['data-layout'] = d.getVarFlag('DATA_LAYOUT', arch_abi)
338 if tspec['data-layout'] is None:
339 bb.fatal("No rust target defined for %s" % arch_abi)
340 tspec['max-atomic-width'] = int(d.getVarFlag('MAX_ATOMIC_WIDTH', arch_abi))
341 tspec['target-pointer-width'] = d.getVarFlag('TARGET_POINTER_WIDTH', arch_abi)
342 tspec['target-c-int-width'] = d.getVarFlag('TARGET_C_INT_WIDTH', arch_abi)
343 tspec['target-endian'] = d.getVarFlag('TARGET_ENDIAN', arch_abi)
344 tspec['arch'] = arch_to_rust_target_arch(rust_arch)
345 tspec['os'] = "linux"
346 if "musl" in tspec['llvm-target']:
347 tspec['env'] = "musl"
348 else:
349 tspec['env'] = "gnu"
350 if "riscv64" in tspec['llvm-target']:
351 tspec['llvm-abiname'] = "lp64d"
352 if "riscv32" in tspec['llvm-target']:
353 tspec['llvm-abiname'] = "ilp32d"
354 tspec['vendor'] = "unknown"
355 tspec['target-family'] = "unix"
356 tspec['linker'] = "{}{}gcc".format(d.getVar('CCACHE'), prefix)
357 tspec['cpu'] = cpu
358 if features != "":
359 tspec['features'] = features
360 tspec['dynamic-linking'] = True
361 tspec['executables'] = True
362 tspec['linker-is-gnu'] = True
363 tspec['linker-flavor'] = "gcc"
364 tspec['has-rpath'] = True
365 tspec['has-elf-tls'] = True
366 tspec['position-independent-executables'] = True
367 tspec['panic-strategy'] = d.getVar("RUST_PANIC_STRATEGY")
368
369 # write out the target spec json file
370 with open(wd + rustsys + '.json', 'w') as f:
371 json.dump(tspec, f, indent=4)
372
373# These are accounted for in tmpdir path names so don't need to be in the task sig
374rust_gen_target[vardepsexclude] += "ABIEXTENSION llvm_cpu"
375
376do_rust_gen_targets[vardeps] += "DATA_LAYOUT TARGET_ENDIAN TARGET_POINTER_WIDTH TARGET_C_INT_WIDTH MAX_ATOMIC_WIDTH FEATURES"
377
378RUST_TARGETS_DIR = "${WORKDIR}/rust-targets/"
379export RUST_TARGET_PATH = "${RUST_TARGETS_DIR}"
380
381python do_rust_gen_targets () {
382 wd = d.getVar('RUST_TARGETS_DIR')
383 # Order of BUILD, HOST, TARGET is important in case the files overwrite, most specific last
384 rust_gen_target(d, 'BUILD', wd, d.getVar('BUILD_ARCH'))
385 rust_gen_target(d, 'HOST', wd, d.getVar('HOST_ARCH'))
386 rust_gen_target(d, 'TARGET', wd, d.getVar('TARGET_ARCH'))
387}
388
389addtask rust_gen_targets after do_patch before do_compile
390do_rust_gen_targets[dirs] += "${RUST_TARGETS_DIR}"
391
diff --git a/meta/classes/rust.bbclass b/meta/classes/rust.bbclass
deleted file mode 100644
index dae25cac2a..0000000000
--- a/meta/classes/rust.bbclass
+++ /dev/null
@@ -1,51 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7inherit rust-common
8
9RUSTC = "rustc"
10
11RUSTC_ARCHFLAGS += "--target=${RUST_HOST_SYS} ${RUSTFLAGS}"
12
13def rust_base_dep(d):
14 # Taken from meta/classes/base.bbclass `base_dep_prepend` and modified to
15 # use rust instead of gcc
16 deps = ""
17 if not d.getVar('INHIBIT_DEFAULT_RUST_DEPS'):
18 if (d.getVar('HOST_SYS') != d.getVar('BUILD_SYS')):
19 deps += " rust-native ${RUSTLIB_DEP}"
20 else:
21 deps += " rust-native"
22 return deps
23
24DEPENDS:append = " ${@rust_base_dep(d)}"
25
26# BUILD_LDFLAGS
27# ${STAGING_LIBDIR_NATIVE}
28# ${STAGING_BASE_LIBDIR_NATIVE}
29# BUILDSDK_LDFLAGS
30# ${STAGING_LIBDIR}
31# #{STAGING_DIR_HOST}
32# TARGET_LDFLAGS ?????
33#RUSTC_BUILD_LDFLAGS = "\
34# --sysroot ${STAGING_DIR_NATIVE} \
35# -L${STAGING_LIBDIR_NATIVE} \
36# -L${STAGING_BASE_LIBDIR_NATIVE} \
37#"
38
39# XXX: for some reason bitbake sets BUILD_* & TARGET_* but uses the bare
40# variables for HOST. Alias things to make it easier for us.
41HOST_LDFLAGS ?= "${LDFLAGS}"
42HOST_CFLAGS ?= "${CFLAGS}"
43HOST_CXXFLAGS ?= "${CXXFLAGS}"
44HOST_CPPFLAGS ?= "${CPPFLAGS}"
45
46rustlib_suffix="${TUNE_ARCH}${TARGET_VENDOR}-${TARGET_OS}/rustlib/${RUST_HOST_SYS}/lib"
47# Native sysroot standard library path
48rustlib_src="${prefix}/lib/${rustlib_suffix}"
49# Host sysroot standard library path
50rustlib="${libdir}/${rustlib_suffix}"
51rustlib:class-native="${libdir}/rustlib/${BUILD_SYS}/lib"
diff --git a/meta/classes/sanity.bbclass b/meta/classes/sanity.bbclass
deleted file mode 100644
index 4104694478..0000000000
--- a/meta/classes/sanity.bbclass
+++ /dev/null
@@ -1,1028 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7#
8# Sanity check the users setup for common misconfigurations
9#
10
11SANITY_REQUIRED_UTILITIES ?= "patch diffstat git bzip2 tar \
12 gzip gawk chrpath wget cpio perl file which"
13
14def bblayers_conf_file(d):
15 return os.path.join(d.getVar('TOPDIR'), 'conf/bblayers.conf')
16
17def sanity_conf_read(fn):
18 with open(fn, 'r') as f:
19 lines = f.readlines()
20 return lines
21
22def sanity_conf_find_line(pattern, lines):
23 import re
24 return next(((index, line)
25 for index, line in enumerate(lines)
26 if re.search(pattern, line)), (None, None))
27
28def sanity_conf_update(fn, lines, version_var_name, new_version):
29 index, line = sanity_conf_find_line(r"^%s" % version_var_name, lines)
30 lines[index] = '%s = "%d"\n' % (version_var_name, new_version)
31 with open(fn, "w") as f:
32 f.write(''.join(lines))
33
34# Functions added to this variable MUST throw a NotImplementedError exception unless
35# they successfully changed the config version in the config file. Exceptions
36# are used since exec_func doesn't handle return values.
37BBLAYERS_CONF_UPDATE_FUNCS += " \
38 conf/bblayers.conf:LCONF_VERSION:LAYER_CONF_VERSION:oecore_update_bblayers \
39 conf/local.conf:CONF_VERSION:LOCALCONF_VERSION:oecore_update_localconf \
40 conf/site.conf:SCONF_VERSION:SITE_CONF_VERSION:oecore_update_siteconf \
41"
42
43SANITY_DIFF_TOOL ?= "meld"
44
45SANITY_LOCALCONF_SAMPLE ?= "${COREBASE}/meta*/conf/local.conf.sample"
46python oecore_update_localconf() {
47 # Check we are using a valid local.conf
48 current_conf = d.getVar('CONF_VERSION')
49 conf_version = d.getVar('LOCALCONF_VERSION')
50
51 failmsg = """Your version of local.conf was generated from an older/newer version of
52local.conf.sample and there have been updates made to this file. Please compare the two
53files and merge any changes before continuing.
54
55Matching the version numbers will remove this message.
56
57\"${SANITY_DIFF_TOOL} conf/local.conf ${SANITY_LOCALCONF_SAMPLE}\"
58
59is a good way to visualise the changes."""
60 failmsg = d.expand(failmsg)
61
62 raise NotImplementedError(failmsg)
63}
64
65SANITY_SITECONF_SAMPLE ?= "${COREBASE}/meta*/conf/site.conf.sample"
66python oecore_update_siteconf() {
67 # If we have a site.conf, check it's valid
68 current_sconf = d.getVar('SCONF_VERSION')
69 sconf_version = d.getVar('SITE_CONF_VERSION')
70
71 failmsg = """Your version of site.conf was generated from an older version of
72site.conf.sample and there have been updates made to this file. Please compare the two
73files and merge any changes before continuing.
74
75Matching the version numbers will remove this message.
76
77\"${SANITY_DIFF_TOOL} conf/site.conf ${SANITY_SITECONF_SAMPLE}\"
78
79is a good way to visualise the changes."""
80 failmsg = d.expand(failmsg)
81
82 raise NotImplementedError(failmsg)
83}
84
85SANITY_BBLAYERCONF_SAMPLE ?= "${COREBASE}/meta*/conf/bblayers.conf.sample"
86python oecore_update_bblayers() {
87 # bblayers.conf is out of date, so see if we can resolve that
88
89 current_lconf = int(d.getVar('LCONF_VERSION'))
90 lconf_version = int(d.getVar('LAYER_CONF_VERSION'))
91
92 failmsg = """Your version of bblayers.conf has the wrong LCONF_VERSION (has ${LCONF_VERSION}, expecting ${LAYER_CONF_VERSION}).
93Please compare your file against bblayers.conf.sample and merge any changes before continuing.
94"${SANITY_DIFF_TOOL} conf/bblayers.conf ${SANITY_BBLAYERCONF_SAMPLE}"
95
96is a good way to visualise the changes."""
97 failmsg = d.expand(failmsg)
98
99 if not current_lconf:
100 raise NotImplementedError(failmsg)
101
102 lines = []
103
104 if current_lconf < 4:
105 raise NotImplementedError(failmsg)
106
107 bblayers_fn = bblayers_conf_file(d)
108 lines = sanity_conf_read(bblayers_fn)
109
110 if current_lconf == 4 and lconf_version > 4:
111 topdir_var = '$' + '{TOPDIR}'
112 index, bbpath_line = sanity_conf_find_line('BBPATH', lines)
113 if bbpath_line:
114 start = bbpath_line.find('"')
115 if start != -1 and (len(bbpath_line) != (start + 1)):
116 if bbpath_line[start + 1] == '"':
117 lines[index] = (bbpath_line[:start + 1] +
118 topdir_var + bbpath_line[start + 1:])
119 else:
120 if not topdir_var in bbpath_line:
121 lines[index] = (bbpath_line[:start + 1] +
122 topdir_var + ':' + bbpath_line[start + 1:])
123 else:
124 raise NotImplementedError(failmsg)
125 else:
126 index, bbfiles_line = sanity_conf_find_line('BBFILES', lines)
127 if bbfiles_line:
128 lines.insert(index, 'BBPATH = "' + topdir_var + '"\n')
129 else:
130 raise NotImplementedError(failmsg)
131
132 current_lconf += 1
133 sanity_conf_update(bblayers_fn, lines, 'LCONF_VERSION', current_lconf)
134 bb.note("Your conf/bblayers.conf has been automatically updated.")
135 return
136
137 elif current_lconf == 5 and lconf_version > 5:
138 # Null update, to avoid issues with people switching between poky and other distros
139 current_lconf = 6
140 sanity_conf_update(bblayers_fn, lines, 'LCONF_VERSION', current_lconf)
141 bb.note("Your conf/bblayers.conf has been automatically updated.")
142 return
143
144 status.addresult()
145
146 elif current_lconf == 6 and lconf_version > 6:
147 # Handle rename of meta-yocto -> meta-poky
148 # This marks the start of separate version numbers but code is needed in OE-Core
149 # for the migration, one last time.
150 layers = d.getVar('BBLAYERS').split()
151 layers = [ os.path.basename(path) for path in layers ]
152 if 'meta-yocto' in layers:
153 found = False
154 while True:
155 index, meta_yocto_line = sanity_conf_find_line(r'.*meta-yocto[\'"\s\n]', lines)
156 if meta_yocto_line:
157 lines[index] = meta_yocto_line.replace('meta-yocto', 'meta-poky')
158 found = True
159 else:
160 break
161 if not found:
162 raise NotImplementedError(failmsg)
163 index, meta_yocto_line = sanity_conf_find_line('LCONF_VERSION.*\n', lines)
164 if meta_yocto_line:
165 lines[index] = 'POKY_BBLAYERS_CONF_VERSION = "1"\n'
166 else:
167 raise NotImplementedError(failmsg)
168 with open(bblayers_fn, "w") as f:
169 f.write(''.join(lines))
170 bb.note("Your conf/bblayers.conf has been automatically updated.")
171 return
172 current_lconf += 1
173 sanity_conf_update(bblayers_fn, lines, 'LCONF_VERSION', current_lconf)
174 bb.note("Your conf/bblayers.conf has been automatically updated.")
175 return
176
177 raise NotImplementedError(failmsg)
178}
179
180def raise_sanity_error(msg, d, network_error=False):
181 if d.getVar("SANITY_USE_EVENTS") == "1":
182 try:
183 bb.event.fire(bb.event.SanityCheckFailed(msg, network_error), d)
184 except TypeError:
185 bb.event.fire(bb.event.SanityCheckFailed(msg), d)
186 return
187
188 bb.fatal(""" OE-core's config sanity checker detected a potential misconfiguration.
189 Either fix the cause of this error or at your own risk disable the checker (see sanity.conf).
190 Following is the list of potential problems / advisories:
191
192 %s""" % msg)
193
194# Check a single tune for validity.
195def check_toolchain_tune(data, tune, multilib):
196 tune_errors = []
197 if not tune:
198 return "No tuning found for %s multilib." % multilib
199 localdata = bb.data.createCopy(data)
200 if multilib != "default":
201 # Apply the overrides so we can look at the details.
202 overrides = localdata.getVar("OVERRIDES", False) + ":virtclass-multilib-" + multilib
203 localdata.setVar("OVERRIDES", overrides)
204 bb.debug(2, "Sanity-checking tuning '%s' (%s) features:" % (tune, multilib))
205 features = (localdata.getVar("TUNE_FEATURES:tune-%s" % tune) or "").split()
206 if not features:
207 return "Tuning '%s' has no defined features, and cannot be used." % tune
208 valid_tunes = localdata.getVarFlags('TUNEVALID') or {}
209 conflicts = localdata.getVarFlags('TUNECONFLICTS') or {}
210 # [doc] is the documentation for the variable, not a real feature
211 if 'doc' in valid_tunes:
212 del valid_tunes['doc']
213 if 'doc' in conflicts:
214 del conflicts['doc']
215 for feature in features:
216 if feature in conflicts:
217 for conflict in conflicts[feature].split():
218 if conflict in features:
219 tune_errors.append("Feature '%s' conflicts with '%s'." %
220 (feature, conflict))
221 if feature in valid_tunes:
222 bb.debug(2, " %s: %s" % (feature, valid_tunes[feature]))
223 else:
224 tune_errors.append("Feature '%s' is not defined." % feature)
225 if tune_errors:
226 return "Tuning '%s' has the following errors:\n" % tune + '\n'.join(tune_errors)
227
228def check_toolchain(data):
229 tune_error_set = []
230 deftune = data.getVar("DEFAULTTUNE")
231 tune_errors = check_toolchain_tune(data, deftune, 'default')
232 if tune_errors:
233 tune_error_set.append(tune_errors)
234
235 multilibs = (data.getVar("MULTILIB_VARIANTS") or "").split()
236 global_multilibs = (data.getVar("MULTILIB_GLOBAL_VARIANTS") or "").split()
237
238 if multilibs:
239 seen_libs = []
240 seen_tunes = []
241 for lib in multilibs:
242 if lib in seen_libs:
243 tune_error_set.append("The multilib '%s' appears more than once." % lib)
244 else:
245 seen_libs.append(lib)
246 if not lib in global_multilibs:
247 tune_error_set.append("Multilib %s is not present in MULTILIB_GLOBAL_VARIANTS" % lib)
248 tune = data.getVar("DEFAULTTUNE:virtclass-multilib-%s" % lib)
249 if tune in seen_tunes:
250 tune_error_set.append("The tuning '%s' appears in more than one multilib." % tune)
251 else:
252 seen_libs.append(tune)
253 if tune == deftune:
254 tune_error_set.append("Multilib '%s' (%s) is also the default tuning." % (lib, deftune))
255 else:
256 tune_errors = check_toolchain_tune(data, tune, lib)
257 if tune_errors:
258 tune_error_set.append(tune_errors)
259 if tune_error_set:
260 return "Toolchain tunings invalid:\n" + '\n'.join(tune_error_set) + "\n"
261
262 return ""
263
264def check_conf_exists(fn, data):
265 bbpath = []
266 fn = data.expand(fn)
267 vbbpath = data.getVar("BBPATH", False)
268 if vbbpath:
269 bbpath += vbbpath.split(":")
270 for p in bbpath:
271 currname = os.path.join(data.expand(p), fn)
272 if os.access(currname, os.R_OK):
273 return True
274 return False
275
276def check_create_long_filename(filepath, pathname):
277 import string, random
278 testfile = os.path.join(filepath, ''.join(random.choice(string.ascii_letters) for x in range(200)))
279 try:
280 if not os.path.exists(filepath):
281 bb.utils.mkdirhier(filepath)
282 f = open(testfile, "w")
283 f.close()
284 os.remove(testfile)
285 except IOError as e:
286 import errno
287 err, strerror = e.args
288 if err == errno.ENAMETOOLONG:
289 return "Failed to create a file with a long name in %s. Please use a filesystem that does not unreasonably limit filename length.\n" % pathname
290 else:
291 return "Failed to create a file in %s: %s.\n" % (pathname, strerror)
292 except OSError as e:
293 errno, strerror = e.args
294 return "Failed to create %s directory in which to run long name sanity check: %s.\n" % (pathname, strerror)
295 return ""
296
297def check_path_length(filepath, pathname, limit):
298 if len(filepath) > limit:
299 return "The length of %s is longer than %s, this would cause unexpected errors, please use a shorter path.\n" % (pathname, limit)
300 return ""
301
302def get_filesystem_id(path):
303 import subprocess
304 try:
305 return subprocess.check_output(["stat", "-f", "-c", "%t", path]).decode('utf-8').strip()
306 except subprocess.CalledProcessError:
307 bb.warn("Can't get filesystem id of: %s" % path)
308 return None
309
310# Check that the path isn't located on nfs.
311def check_not_nfs(path, name):
312 # The nfs' filesystem id is 6969
313 if get_filesystem_id(path) == "6969":
314 return "The %s: %s can't be located on nfs.\n" % (name, path)
315 return ""
316
317# Check that the path is on a case-sensitive file system
318def check_case_sensitive(path, name):
319 import tempfile
320 with tempfile.NamedTemporaryFile(prefix='TmP', dir=path) as tmp_file:
321 if os.path.exists(tmp_file.name.lower()):
322 return "The %s (%s) can't be on a case-insensitive file system.\n" % (name, path)
323 return ""
324
325# Check that path isn't a broken symlink
326def check_symlink(lnk, data):
327 if os.path.islink(lnk) and not os.path.exists(lnk):
328 raise_sanity_error("%s is a broken symlink." % lnk, data)
329
330def check_connectivity(d):
331 # URI's to check can be set in the CONNECTIVITY_CHECK_URIS variable
332 # using the same syntax as for SRC_URI. If the variable is not set
333 # the check is skipped
334 test_uris = (d.getVar('CONNECTIVITY_CHECK_URIS') or "").split()
335 retval = ""
336
337 bbn = d.getVar('BB_NO_NETWORK')
338 if bbn not in (None, '0', '1'):
339 return 'BB_NO_NETWORK should be "0" or "1", but it is "%s"' % bbn
340
341 # Only check connectivity if network enabled and the
342 # CONNECTIVITY_CHECK_URIS are set
343 network_enabled = not (bbn == '1')
344 check_enabled = len(test_uris)
345 if check_enabled and network_enabled:
346 # Take a copy of the data store and unset MIRRORS and PREMIRRORS
347 data = bb.data.createCopy(d)
348 data.delVar('PREMIRRORS')
349 data.delVar('MIRRORS')
350 try:
351 fetcher = bb.fetch2.Fetch(test_uris, data)
352 fetcher.checkstatus()
353 except Exception as err:
354 # Allow the message to be configured so that users can be
355 # pointed to a support mechanism.
356 msg = data.getVar('CONNECTIVITY_CHECK_MSG') or ""
357 if len(msg) == 0:
358 msg = "%s.\n" % err
359 msg += " Please ensure your host's network is configured correctly.\n"
360 msg += " If your ISP or network is blocking the above URL,\n"
361 msg += " try with another domain name, for example by setting:\n"
362 msg += " CONNECTIVITY_CHECK_URIS = \"https://www.example.com/\""
363 msg += " You could also set BB_NO_NETWORK = \"1\" to disable network\n"
364 msg += " access if all required sources are on local disk.\n"
365 retval = msg
366
367 return retval
368
369def check_supported_distro(sanity_data):
370 from fnmatch import fnmatch
371
372 tested_distros = sanity_data.getVar('SANITY_TESTED_DISTROS')
373 if not tested_distros:
374 return
375
376 try:
377 distro = oe.lsb.distro_identifier()
378 except Exception:
379 distro = None
380
381 if not distro:
382 bb.warn('Host distribution could not be determined; you may possibly experience unexpected failures. It is recommended that you use a tested distribution.')
383
384 for supported in [x.strip() for x in tested_distros.split('\\n')]:
385 if fnmatch(distro, supported):
386 return
387
388 bb.warn('Host distribution "%s" has not been validated with this version of the build system; you may possibly experience unexpected failures. It is recommended that you use a tested distribution.' % distro)
389
390# Checks we should only make if MACHINE is set correctly
391def check_sanity_validmachine(sanity_data):
392 messages = ""
393
394 # Check TUNE_ARCH is set
395 if sanity_data.getVar('TUNE_ARCH') == 'INVALID':
396 messages = messages + 'TUNE_ARCH is unset. Please ensure your MACHINE configuration includes a valid tune configuration file which will set this correctly.\n'
397
398 # Check TARGET_OS is set
399 if sanity_data.getVar('TARGET_OS') == 'INVALID':
400 messages = messages + 'Please set TARGET_OS directly, or choose a MACHINE or DISTRO that does so.\n'
401
402 # Check that we don't have duplicate entries in PACKAGE_ARCHS & that TUNE_PKGARCH is in PACKAGE_ARCHS
403 pkgarchs = sanity_data.getVar('PACKAGE_ARCHS')
404 tunepkg = sanity_data.getVar('TUNE_PKGARCH')
405 defaulttune = sanity_data.getVar('DEFAULTTUNE')
406 tunefound = False
407 seen = {}
408 dups = []
409
410 for pa in pkgarchs.split():
411 if seen.get(pa, 0) == 1:
412 dups.append(pa)
413 else:
414 seen[pa] = 1
415 if pa == tunepkg:
416 tunefound = True
417
418 if len(dups):
419 messages = messages + "Error, the PACKAGE_ARCHS variable contains duplicates. The following archs are listed more than once: %s" % " ".join(dups)
420
421 if tunefound == False:
422 messages = messages + "Error, the PACKAGE_ARCHS variable (%s) for DEFAULTTUNE (%s) does not contain TUNE_PKGARCH (%s)." % (pkgarchs, defaulttune, tunepkg)
423
424 return messages
425
426# Patch before 2.7 can't handle all the features in git-style diffs. Some
427# patches may incorrectly apply, and others won't apply at all.
428def check_patch_version(sanity_data):
429 import re, subprocess
430
431 try:
432 result = subprocess.check_output(["patch", "--version"], stderr=subprocess.STDOUT).decode('utf-8')
433 version = re.search(r"[0-9.]+", result.splitlines()[0]).group()
434 if bb.utils.vercmp_string_op(version, "2.7", "<"):
435 return "Your version of patch is older than 2.7 and has bugs which will break builds. Please install a newer version of patch.\n"
436 else:
437 return None
438 except subprocess.CalledProcessError as e:
439 return "Unable to execute patch --version, exit code %d:\n%s\n" % (e.returncode, e.output)
440
441# Glibc needs make 4.0 or later, we may as well match at this point
442def check_make_version(sanity_data):
443 import subprocess
444
445 try:
446 result = subprocess.check_output(['make', '--version'], stderr=subprocess.STDOUT).decode('utf-8')
447 except subprocess.CalledProcessError as e:
448 return "Unable to execute make --version, exit code %d\n%s\n" % (e.returncode, e.output)
449 version = result.split()[2]
450 if bb.utils.vercmp_string_op(version, "4.0", "<"):
451 return "Please install a make version of 4.0 or later.\n"
452
453 if bb.utils.vercmp_string_op(version, "4.2.1", "=="):
454 distro = oe.lsb.distro_identifier()
455 if "ubuntu" in distro or "debian" in distro or "linuxmint" in distro:
456 return None
457 return "make version 4.2.1 is known to have issues on Centos/OpenSUSE and other non-Ubuntu systems. Please use a buildtools-make-tarball or a newer version of make.\n"
458 return None
459
460
461# Check if we're running on WSL (Windows Subsystem for Linux).
462# WSLv1 is known not to work but WSLv2 should work properly as
463# long as the VHDX file is optimized often, let the user know
464# upfront.
465# More information on installing WSLv2 at:
466# https://docs.microsoft.com/en-us/windows/wsl/wsl2-install
467def check_wsl(d):
468 with open("/proc/version", "r") as f:
469 verdata = f.readlines()
470 for l in verdata:
471 if "Microsoft" in l:
472 return "OpenEmbedded doesn't work under WSLv1, please upgrade to WSLv2 if you want to run builds on Windows"
473 elif "microsoft" in l:
474 bb.warn("You are running bitbake under WSLv2, this works properly but you should optimize your VHDX file eventually to avoid running out of storage space")
475 return None
476
477# Require at least gcc version 7.5.
478#
479# This can be fixed on CentOS-7 with devtoolset-6+
480# https://www.softwarecollections.org/en/scls/rhscl/devtoolset-6/
481#
482# A less invasive fix is with scripts/install-buildtools (or with user
483# built buildtools-extended-tarball)
484#
485def check_gcc_version(sanity_data):
486 import subprocess
487
488 build_cc, version = oe.utils.get_host_compiler_version(sanity_data)
489 if build_cc.strip() == "gcc":
490 if bb.utils.vercmp_string_op(version, "7.5", "<"):
491 return "Your version of gcc is older than 7.5 and will break builds. Please install a newer version of gcc (you could use the project's buildtools-extended-tarball or use scripts/install-buildtools).\n"
492 return None
493
494# Tar version 1.24 and onwards handle overwriting symlinks correctly
495# but earlier versions do not; this needs to work properly for sstate
496# Version 1.28 is needed so opkg-build works correctly when reproducibile builds are enabled
497def check_tar_version(sanity_data):
498 import subprocess
499 try:
500 result = subprocess.check_output(["tar", "--version"], stderr=subprocess.STDOUT).decode('utf-8')
501 except subprocess.CalledProcessError as e:
502 return "Unable to execute tar --version, exit code %d\n%s\n" % (e.returncode, e.output)
503 version = result.split()[3]
504 if bb.utils.vercmp_string_op(version, "1.28", "<"):
505 return "Your version of tar is older than 1.28 and does not have the support needed to enable reproducible builds. Please install a newer version of tar (you could use the project's buildtools-tarball from our last release or use scripts/install-buildtools).\n"
506 return None
507
508# We use git parameters and functionality only found in 1.7.8 or later
509# The kernel tools assume git >= 1.8.3.1 (verified needed > 1.7.9.5) see #6162
510# The git fetcher also had workarounds for git < 1.7.9.2 which we've dropped
511def check_git_version(sanity_data):
512 import subprocess
513 try:
514 result = subprocess.check_output(["git", "--version"], stderr=subprocess.DEVNULL).decode('utf-8')
515 except subprocess.CalledProcessError as e:
516 return "Unable to execute git --version, exit code %d\n%s\n" % (e.returncode, e.output)
517 version = result.split()[2]
518 if bb.utils.vercmp_string_op(version, "1.8.3.1", "<"):
519 return "Your version of git is older than 1.8.3.1 and has bugs which will break builds. Please install a newer version of git.\n"
520 return None
521
522# Check the required perl modules which may not be installed by default
523def check_perl_modules(sanity_data):
524 import subprocess
525 ret = ""
526 modules = ( "Text::ParseWords", "Thread::Queue", "Data::Dumper" )
527 errresult = ''
528 for m in modules:
529 try:
530 subprocess.check_output(["perl", "-e", "use %s" % m])
531 except subprocess.CalledProcessError as e:
532 errresult += bytes.decode(e.output)
533 ret += "%s " % m
534 if ret:
535 return "Required perl module(s) not found: %s\n\n%s\n" % (ret, errresult)
536 return None
537
538def sanity_check_conffiles(d):
539 funcs = d.getVar('BBLAYERS_CONF_UPDATE_FUNCS').split()
540 for func in funcs:
541 conffile, current_version, required_version, func = func.split(":")
542 if check_conf_exists(conffile, d) and d.getVar(current_version) is not None and \
543 d.getVar(current_version) != d.getVar(required_version):
544 try:
545 bb.build.exec_func(func, d)
546 except NotImplementedError as e:
547 bb.fatal(str(e))
548 d.setVar("BB_INVALIDCONF", True)
549
550def drop_v14_cross_builds(d):
551 import glob
552 indexes = glob.glob(d.expand("${SSTATE_MANIFESTS}/index-${BUILD_ARCH}_*"))
553 for i in indexes:
554 with open(i, "r") as f:
555 lines = f.readlines()
556 for l in reversed(lines):
557 try:
558 (stamp, manifest, workdir) = l.split()
559 except ValueError:
560 bb.fatal("Invalid line '%s' in sstate manifest '%s'" % (l, i))
561 for m in glob.glob(manifest + ".*"):
562 if m.endswith(".postrm"):
563 continue
564 sstate_clean_manifest(m, d)
565 bb.utils.remove(stamp + "*")
566 bb.utils.remove(workdir, recurse = True)
567
568def sanity_handle_abichanges(status, d):
569 #
570 # Check the 'ABI' of TMPDIR
571 #
572 import subprocess
573
574 current_abi = d.getVar('OELAYOUT_ABI')
575 abifile = d.getVar('SANITY_ABIFILE')
576 if os.path.exists(abifile):
577 with open(abifile, "r") as f:
578 abi = f.read().strip()
579 if not abi.isdigit():
580 with open(abifile, "w") as f:
581 f.write(current_abi)
582 elif int(abi) <= 11 and current_abi == "12":
583 status.addresult("The layout of TMPDIR changed for Recipe Specific Sysroots.\nConversion doesn't make sense and this change will rebuild everything so please delete TMPDIR (%s).\n" % d.getVar("TMPDIR"))
584 elif int(abi) <= 13 and current_abi == "14":
585 status.addresult("TMPDIR changed to include path filtering from the pseudo database.\nIt is recommended to use a clean TMPDIR with the new pseudo path filtering so TMPDIR (%s) would need to be removed to continue.\n" % d.getVar("TMPDIR"))
586 elif int(abi) == 14 and current_abi == "15":
587 drop_v14_cross_builds(d)
588 with open(abifile, "w") as f:
589 f.write(current_abi)
590 elif (abi != current_abi):
591 # Code to convert from one ABI to another could go here if possible.
592 status.addresult("Error, TMPDIR has changed its layout version number (%s to %s) and you need to either rebuild, revert or adjust it at your own risk.\n" % (abi, current_abi))
593 else:
594 with open(abifile, "w") as f:
595 f.write(current_abi)
596
597def check_sanity_sstate_dir_change(sstate_dir, data):
598 # Sanity checks to be done when the value of SSTATE_DIR changes
599
600 # Check that SSTATE_DIR isn't on a filesystem with limited filename length (eg. eCryptFS)
601 testmsg = ""
602 if sstate_dir != "":
603 testmsg = check_create_long_filename(sstate_dir, "SSTATE_DIR")
604 # If we don't have permissions to SSTATE_DIR, suggest the user set it as an SSTATE_MIRRORS
605 try:
606 err = testmsg.split(': ')[1].strip()
607 if err == "Permission denied.":
608 testmsg = testmsg + "You could try using %s in SSTATE_MIRRORS rather than as an SSTATE_CACHE.\n" % (sstate_dir)
609 except IndexError:
610 pass
611 return testmsg
612
613def check_sanity_version_change(status, d):
614 # Sanity checks to be done when SANITY_VERSION or NATIVELSBSTRING changes
615 # In other words, these tests run once in a given build directory and then
616 # never again until the sanity version or host distrubution id/version changes.
617
618 # Check the python install is complete. Examples that are often removed in
619 # minimal installations: glib-2.0-natives requries # xml.parsers.expat and icu
620 # requires distutils.sysconfig.
621 try:
622 import xml.parsers.expat
623 import distutils.sysconfig
624 except ImportError as e:
625 status.addresult('Your Python 3 is not a full install. Please install the module %s (see the Getting Started guide for further information).\n' % e.name)
626
627 status.addresult(check_gcc_version(d))
628 status.addresult(check_make_version(d))
629 status.addresult(check_patch_version(d))
630 status.addresult(check_tar_version(d))
631 status.addresult(check_git_version(d))
632 status.addresult(check_perl_modules(d))
633 status.addresult(check_wsl(d))
634
635 missing = ""
636
637 if not check_app_exists("${MAKE}", d):
638 missing = missing + "GNU make,"
639
640 if not check_app_exists('${BUILD_CC}', d):
641 missing = missing + "C Compiler (%s)," % d.getVar("BUILD_CC")
642
643 if not check_app_exists('${BUILD_CXX}', d):
644 missing = missing + "C++ Compiler (%s)," % d.getVar("BUILD_CXX")
645
646 required_utilities = d.getVar('SANITY_REQUIRED_UTILITIES')
647
648 for util in required_utilities.split():
649 if not check_app_exists(util, d):
650 missing = missing + "%s," % util
651
652 if missing:
653 missing = missing.rstrip(',')
654 status.addresult("Please install the following missing utilities: %s\n" % missing)
655
656 assume_provided = d.getVar('ASSUME_PROVIDED').split()
657 # Check user doesn't have ASSUME_PROVIDED = instead of += in local.conf
658 if "diffstat-native" not in assume_provided:
659 status.addresult('Please use ASSUME_PROVIDED +=, not ASSUME_PROVIDED = in your local.conf\n')
660
661 # Check that TMPDIR isn't on a filesystem with limited filename length (eg. eCryptFS)
662 import stat
663 tmpdir = d.getVar('TMPDIR')
664 status.addresult(check_create_long_filename(tmpdir, "TMPDIR"))
665 tmpdirmode = os.stat(tmpdir).st_mode
666 if (tmpdirmode & stat.S_ISGID):
667 status.addresult("TMPDIR is setgid, please don't build in a setgid directory")
668 if (tmpdirmode & stat.S_ISUID):
669 status.addresult("TMPDIR is setuid, please don't build in a setuid directory")
670
671 # Check that a user isn't building in a path in PSEUDO_IGNORE_PATHS
672 pseudoignorepaths = d.getVar('PSEUDO_IGNORE_PATHS', expand=True).split(",")
673 workdir = d.getVar('WORKDIR', expand=True)
674 for i in pseudoignorepaths:
675 if i and workdir.startswith(i):
676 status.addresult("You are building in a path included in PSEUDO_IGNORE_PATHS " + str(i) + " please locate the build outside this path.\n")
677
678 # Check if PSEUDO_IGNORE_PATHS and and paths under pseudo control overlap
679 pseudoignorepaths = d.getVar('PSEUDO_IGNORE_PATHS', expand=True).split(",")
680 pseudo_control_dir = "${D},${PKGD},${PKGDEST},${IMAGEROOTFS},${SDK_OUTPUT}"
681 pseudocontroldir = d.expand(pseudo_control_dir).split(",")
682 for i in pseudoignorepaths:
683 for j in pseudocontroldir:
684 if i and j:
685 if j.startswith(i):
686 status.addresult("A path included in PSEUDO_IGNORE_PATHS " + str(i) + " and the path " + str(j) + " overlap and this will break pseudo permission and ownership tracking. Please set the path " + str(j) + " to a different directory which does not overlap with pseudo controlled directories. \n")
687
688 # Some third-party software apparently relies on chmod etc. being suid root (!!)
689 import stat
690 suid_check_bins = "chown chmod mknod".split()
691 for bin_cmd in suid_check_bins:
692 bin_path = bb.utils.which(os.environ["PATH"], bin_cmd)
693 if bin_path:
694 bin_stat = os.stat(bin_path)
695 if bin_stat.st_uid == 0 and bin_stat.st_mode & stat.S_ISUID:
696 status.addresult('%s has the setuid bit set. This interferes with pseudo and may cause other issues that break the build process.\n' % bin_path)
697
698 # Check that we can fetch from various network transports
699 netcheck = check_connectivity(d)
700 status.addresult(netcheck)
701 if netcheck:
702 status.network_error = True
703
704 nolibs = d.getVar('NO32LIBS')
705 if not nolibs:
706 lib32path = '/lib'
707 if os.path.exists('/lib64') and ( os.path.islink('/lib64') or os.path.islink('/lib') ):
708 lib32path = '/lib32'
709
710 if os.path.exists('%s/libc.so.6' % lib32path) and not os.path.exists('/usr/include/gnu/stubs-32.h'):
711 status.addresult("You have a 32-bit libc, but no 32-bit headers. You must install the 32-bit libc headers.\n")
712
713 bbpaths = d.getVar('BBPATH').split(":")
714 if ("." in bbpaths or "./" in bbpaths or "" in bbpaths):
715 status.addresult("BBPATH references the current directory, either through " \
716 "an empty entry, a './' or a '.'.\n\t This is unsafe and means your "\
717 "layer configuration is adding empty elements to BBPATH.\n\t "\
718 "Please check your layer.conf files and other BBPATH " \
719 "settings to remove the current working directory " \
720 "references.\n" \
721 "Parsed BBPATH is" + str(bbpaths));
722
723 oes_bb_conf = d.getVar( 'OES_BITBAKE_CONF')
724 if not oes_bb_conf:
725 status.addresult('You are not using the OpenEmbedded version of conf/bitbake.conf. This means your environment is misconfigured, in particular check BBPATH.\n')
726
727 # The length of TMPDIR can't be longer than 410
728 status.addresult(check_path_length(tmpdir, "TMPDIR", 410))
729
730 # Check that TMPDIR isn't located on nfs
731 status.addresult(check_not_nfs(tmpdir, "TMPDIR"))
732
733 # Check for case-insensitive file systems (such as Linux in Docker on
734 # macOS with default HFS+ file system)
735 status.addresult(check_case_sensitive(tmpdir, "TMPDIR"))
736
737def sanity_check_locale(d):
738 """
739 Currently bitbake switches locale to en_US.UTF-8 so check that this locale actually exists.
740 """
741 import locale
742 try:
743 locale.setlocale(locale.LC_ALL, "en_US.UTF-8")
744 except locale.Error:
745 raise_sanity_error("Your system needs to support the en_US.UTF-8 locale.", d)
746
747def check_sanity_everybuild(status, d):
748 import os, stat
749 # Sanity tests which test the users environment so need to run at each build (or are so cheap
750 # it makes sense to always run them.
751
752 if 0 == os.getuid():
753 raise_sanity_error("Do not use Bitbake as root.", d)
754
755 # Check the Python version, we now have a minimum of Python 3.6
756 import sys
757 if sys.hexversion < 0x030600F0:
758 status.addresult('The system requires at least Python 3.6 to run. Please update your Python interpreter.\n')
759
760 # Check the bitbake version meets minimum requirements
761 minversion = d.getVar('BB_MIN_VERSION')
762 if bb.utils.vercmp_string_op(bb.__version__, minversion, "<"):
763 status.addresult('Bitbake version %s is required and version %s was found\n' % (minversion, bb.__version__))
764
765 sanity_check_locale(d)
766
767 paths = d.getVar('PATH').split(":")
768 if "." in paths or "./" in paths or "" in paths:
769 status.addresult("PATH contains '.', './' or '' (empty element), which will break the build, please remove this.\nParsed PATH is " + str(paths) + "\n")
770
771 #Check if bitbake is present in PATH environment variable
772 bb_check = bb.utils.which(d.getVar('PATH'), 'bitbake')
773 if not bb_check:
774 bb.warn("bitbake binary is not found in PATH, did you source the script?")
775
776 # Check whether 'inherit' directive is found (used for a class to inherit)
777 # in conf file it's supposed to be uppercase INHERIT
778 inherit = d.getVar('inherit')
779 if inherit:
780 status.addresult("Please don't use inherit directive in your local.conf. The directive is supposed to be used in classes and recipes only to inherit of bbclasses. Here INHERIT should be used.\n")
781
782 # Check that the DISTRO is valid, if set
783 # need to take into account DISTRO renaming DISTRO
784 distro = d.getVar('DISTRO')
785 if distro and distro != "nodistro":
786 if not ( check_conf_exists("conf/distro/${DISTRO}.conf", d) or check_conf_exists("conf/distro/include/${DISTRO}.inc", d) ):
787 status.addresult("DISTRO '%s' not found. Please set a valid DISTRO in your local.conf\n" % d.getVar("DISTRO"))
788
789 # Check that these variables don't use tilde-expansion as we don't do that
790 for v in ("TMPDIR", "DL_DIR", "SSTATE_DIR"):
791 if d.getVar(v).startswith("~"):
792 status.addresult("%s uses ~ but Bitbake will not expand this, use an absolute path or variables." % v)
793
794 # Check that DL_DIR is set, exists and is writable. In theory, we should never even hit the check if DL_DIR isn't
795 # set, since so much relies on it being set.
796 dldir = d.getVar('DL_DIR')
797 if not dldir:
798 status.addresult("DL_DIR is not set. Your environment is misconfigured, check that DL_DIR is set, and if the directory exists, that it is writable. \n")
799 if os.path.exists(dldir) and not os.access(dldir, os.W_OK):
800 status.addresult("DL_DIR: %s exists but you do not appear to have write access to it. \n" % dldir)
801 check_symlink(dldir, d)
802
803 # Check that the MACHINE is valid, if it is set
804 machinevalid = True
805 if d.getVar('MACHINE'):
806 if not check_conf_exists("conf/machine/${MACHINE}.conf", d):
807 status.addresult('MACHINE=%s is invalid. Please set a valid MACHINE in your local.conf, environment or other configuration file.\n' % (d.getVar('MACHINE')))
808 machinevalid = False
809 else:
810 status.addresult(check_sanity_validmachine(d))
811 else:
812 status.addresult('Please set a MACHINE in your local.conf or environment\n')
813 machinevalid = False
814 if machinevalid:
815 status.addresult(check_toolchain(d))
816
817 # Check that the SDKMACHINE is valid, if it is set
818 if d.getVar('SDKMACHINE'):
819 if not check_conf_exists("conf/machine-sdk/${SDKMACHINE}.conf", d):
820 status.addresult('Specified SDKMACHINE value is not valid\n')
821 elif d.getVar('SDK_ARCH', False) == "${BUILD_ARCH}":
822 status.addresult('SDKMACHINE is set, but SDK_ARCH has not been changed as a result - SDKMACHINE may have been set too late (e.g. in the distro configuration)\n')
823
824 # If SDK_VENDOR looks like "-my-sdk" then the triples are badly formed so fail early
825 sdkvendor = d.getVar("SDK_VENDOR")
826 if not (sdkvendor.startswith("-") and sdkvendor.count("-") == 1):
827 status.addresult("SDK_VENDOR should be of the form '-foosdk' with a single dash; found '%s'\n" % sdkvendor)
828
829 check_supported_distro(d)
830
831 omask = os.umask(0o022)
832 if omask & 0o755:
833 status.addresult("Please use a umask which allows a+rx and u+rwx\n")
834 os.umask(omask)
835
836 if d.getVar('TARGET_ARCH') == "arm":
837 # This path is no longer user-readable in modern (very recent) Linux
838 try:
839 if os.path.exists("/proc/sys/vm/mmap_min_addr"):
840 f = open("/proc/sys/vm/mmap_min_addr", "r")
841 try:
842 if (int(f.read().strip()) > 65536):
843 status.addresult("/proc/sys/vm/mmap_min_addr is not <= 65536. This will cause problems with qemu so please fix the value (as root).\n\nTo fix this in later reboots, set vm.mmap_min_addr = 65536 in /etc/sysctl.conf.\n")
844 finally:
845 f.close()
846 except:
847 pass
848
849 for checkdir in ['COREBASE', 'TMPDIR']:
850 val = d.getVar(checkdir)
851 if val.find('..') != -1:
852 status.addresult("Error, you have '..' in your %s directory path. Please ensure the variable contains an absolute path as this can break some recipe builds in obtuse ways." % checkdir)
853 if val.find('+') != -1:
854 status.addresult("Error, you have an invalid character (+) in your %s directory path. Please move the installation to a directory which doesn't include any + characters." % checkdir)
855 if val.find('@') != -1:
856 status.addresult("Error, you have an invalid character (@) in your %s directory path. Please move the installation to a directory which doesn't include any @ characters." % checkdir)
857 if val.find(' ') != -1:
858 status.addresult("Error, you have a space in your %s directory path. Please move the installation to a directory which doesn't include a space since autotools doesn't support this." % checkdir)
859 if val.find('%') != -1:
860 status.addresult("Error, you have an invalid character (%) in your %s directory path which causes problems with python string formatting. Please move the installation to a directory which doesn't include any % characters." % checkdir)
861
862 # Check the format of MIRRORS, PREMIRRORS and SSTATE_MIRRORS
863 import re
864 mirror_vars = ['MIRRORS', 'PREMIRRORS', 'SSTATE_MIRRORS']
865 protocols = ['http', 'ftp', 'file', 'https', \
866 'git', 'gitsm', 'hg', 'osc', 'p4', 'svn', \
867 'bzr', 'cvs', 'npm', 'sftp', 'ssh', 's3', 'az', 'ftps']
868 for mirror_var in mirror_vars:
869 mirrors = (d.getVar(mirror_var) or '').replace('\\n', ' ').split()
870
871 # Split into pairs
872 if len(mirrors) % 2 != 0:
873 bb.warn('Invalid mirror variable value for %s: %s, should contain paired members.' % (mirror_var, str(mirrors)))
874 continue
875 mirrors = list(zip(*[iter(mirrors)]*2))
876
877 for mirror_entry in mirrors:
878 pattern, mirror = mirror_entry
879
880 decoded = bb.fetch2.decodeurl(pattern)
881 try:
882 pattern_scheme = re.compile(decoded[0])
883 except re.error as exc:
884 bb.warn('Invalid scheme regex (%s) in %s; %s' % (pattern, mirror_var, mirror_entry))
885 continue
886
887 if not any(pattern_scheme.match(protocol) for protocol in protocols):
888 bb.warn('Invalid protocol (%s) in %s: %s' % (decoded[0], mirror_var, mirror_entry))
889 continue
890
891 if not any(mirror.startswith(protocol + '://') for protocol in protocols):
892 bb.warn('Invalid protocol in %s: %s' % (mirror_var, mirror_entry))
893 continue
894
895 if mirror.startswith('file://'):
896 import urllib
897 check_symlink(urllib.parse.urlparse(mirror).path, d)
898 # SSTATE_MIRROR ends with a /PATH string
899 if mirror.endswith('/PATH'):
900 # remove /PATH$ from SSTATE_MIRROR to get a working
901 # base directory path
902 mirror_base = urllib.parse.urlparse(mirror[:-1*len('/PATH')]).path
903 check_symlink(mirror_base, d)
904
905 # Check sstate mirrors aren't being used with a local hash server and no remote
906 hashserv = d.getVar("BB_HASHSERVE")
907 if d.getVar("SSTATE_MIRRORS") and hashserv and hashserv.startswith("unix://") and not d.getVar("BB_HASHSERVE_UPSTREAM"):
908 bb.warn("You are using a local hash equivalence server but have configured an sstate mirror. This will likely mean no sstate will match from the mirror. You may wish to disable the hash equivalence use (BB_HASHSERVE), or use a hash equivalence server alongside the sstate mirror.")
909
910 # Check that TMPDIR hasn't changed location since the last time we were run
911 tmpdir = d.getVar('TMPDIR')
912 checkfile = os.path.join(tmpdir, "saved_tmpdir")
913 if os.path.exists(checkfile):
914 with open(checkfile, "r") as f:
915 saved_tmpdir = f.read().strip()
916 if (saved_tmpdir != tmpdir):
917 status.addresult("Error, TMPDIR has changed location. You need to either move it back to %s or delete it and rebuild\n" % saved_tmpdir)
918 else:
919 bb.utils.mkdirhier(tmpdir)
920 # Remove setuid, setgid and sticky bits from TMPDIR
921 try:
922 os.chmod(tmpdir, os.stat(tmpdir).st_mode & ~ stat.S_ISUID)
923 os.chmod(tmpdir, os.stat(tmpdir).st_mode & ~ stat.S_ISGID)
924 os.chmod(tmpdir, os.stat(tmpdir).st_mode & ~ stat.S_ISVTX)
925 except OSError as exc:
926 bb.warn("Unable to chmod TMPDIR: %s" % exc)
927 with open(checkfile, "w") as f:
928 f.write(tmpdir)
929
930 # If /bin/sh is a symlink, check that it points to dash or bash
931 if os.path.islink('/bin/sh'):
932 real_sh = os.path.realpath('/bin/sh')
933 # Due to update-alternatives, the shell name may take various
934 # forms, such as /bin/dash, bin/bash, /bin/bash.bash ...
935 if '/dash' not in real_sh and '/bash' not in real_sh:
936 status.addresult("Error, /bin/sh links to %s, must be dash or bash\n" % real_sh)
937
938def check_sanity(sanity_data):
939 class SanityStatus(object):
940 def __init__(self):
941 self.messages = ""
942 self.network_error = False
943
944 def addresult(self, message):
945 if message:
946 self.messages = self.messages + message
947
948 status = SanityStatus()
949
950 tmpdir = sanity_data.getVar('TMPDIR')
951 sstate_dir = sanity_data.getVar('SSTATE_DIR')
952
953 check_symlink(sstate_dir, sanity_data)
954
955 # Check saved sanity info
956 last_sanity_version = 0
957 last_tmpdir = ""
958 last_sstate_dir = ""
959 last_nativelsbstr = ""
960 sanityverfile = sanity_data.expand("${TOPDIR}/cache/sanity_info")
961 if os.path.exists(sanityverfile):
962 with open(sanityverfile, 'r') as f:
963 for line in f:
964 if line.startswith('SANITY_VERSION'):
965 last_sanity_version = int(line.split()[1])
966 if line.startswith('TMPDIR'):
967 last_tmpdir = line.split()[1]
968 if line.startswith('SSTATE_DIR'):
969 last_sstate_dir = line.split()[1]
970 if line.startswith('NATIVELSBSTRING'):
971 last_nativelsbstr = line.split()[1]
972
973 check_sanity_everybuild(status, sanity_data)
974
975 sanity_version = int(sanity_data.getVar('SANITY_VERSION') or 1)
976 network_error = False
977 # NATIVELSBSTRING var may have been overridden with "universal", so
978 # get actual host distribution id and version
979 nativelsbstr = lsb_distro_identifier(sanity_data)
980 if last_sanity_version < sanity_version or last_nativelsbstr != nativelsbstr:
981 check_sanity_version_change(status, sanity_data)
982 status.addresult(check_sanity_sstate_dir_change(sstate_dir, sanity_data))
983 else:
984 if last_sstate_dir != sstate_dir:
985 status.addresult(check_sanity_sstate_dir_change(sstate_dir, sanity_data))
986
987 if os.path.exists(os.path.dirname(sanityverfile)) and not status.messages:
988 with open(sanityverfile, 'w') as f:
989 f.write("SANITY_VERSION %s\n" % sanity_version)
990 f.write("TMPDIR %s\n" % tmpdir)
991 f.write("SSTATE_DIR %s\n" % sstate_dir)
992 f.write("NATIVELSBSTRING %s\n" % nativelsbstr)
993
994 sanity_handle_abichanges(status, sanity_data)
995
996 if status.messages != "":
997 raise_sanity_error(sanity_data.expand(status.messages), sanity_data, status.network_error)
998
999# Create a copy of the datastore and finalise it to ensure appends and
1000# overrides are set - the datastore has yet to be finalised at ConfigParsed
1001def copy_data(e):
1002 sanity_data = bb.data.createCopy(e.data)
1003 sanity_data.finalize()
1004 return sanity_data
1005
1006addhandler config_reparse_eventhandler
1007config_reparse_eventhandler[eventmask] = "bb.event.ConfigParsed"
1008python config_reparse_eventhandler() {
1009 sanity_check_conffiles(e.data)
1010}
1011
1012addhandler check_sanity_eventhandler
1013check_sanity_eventhandler[eventmask] = "bb.event.SanityCheck bb.event.NetworkTest"
1014python check_sanity_eventhandler() {
1015 if bb.event.getName(e) == "SanityCheck":
1016 sanity_data = copy_data(e)
1017 check_sanity(sanity_data)
1018 if e.generateevents:
1019 sanity_data.setVar("SANITY_USE_EVENTS", "1")
1020 bb.event.fire(bb.event.SanityCheckPassed(), e.data)
1021 elif bb.event.getName(e) == "NetworkTest":
1022 sanity_data = copy_data(e)
1023 if e.generateevents:
1024 sanity_data.setVar("SANITY_USE_EVENTS", "1")
1025 bb.event.fire(bb.event.NetworkTestFailed() if check_connectivity(sanity_data) else bb.event.NetworkTestPassed(), e.data)
1026
1027 return
1028}
diff --git a/meta/classes/scons.bbclass b/meta/classes/scons.bbclass
deleted file mode 100644
index 5f0d4a910b..0000000000
--- a/meta/classes/scons.bbclass
+++ /dev/null
@@ -1,34 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7inherit python3native
8
9DEPENDS += "python3-scons-native"
10
11EXTRA_OESCONS ?= ""
12
13do_configure() {
14 if [ -n "${CONFIGURESTAMPFILE}" -a "${S}" = "${B}" ]; then
15 if [ -e "${CONFIGURESTAMPFILE}" -a "`cat ${CONFIGURESTAMPFILE}`" != "${BB_TASKHASH}" -a "${CLEANBROKEN}" != "1" ]; then
16 ${STAGING_BINDIR_NATIVE}/scons --directory=${S} --clean PREFIX=${prefix} prefix=${prefix} ${EXTRA_OESCONS}
17 fi
18
19 mkdir -p `dirname ${CONFIGURESTAMPFILE}`
20 echo ${BB_TASKHASH} > ${CONFIGURESTAMPFILE}
21 fi
22}
23
24scons_do_compile() {
25 ${STAGING_BINDIR_NATIVE}/scons --directory=${S} ${PARALLEL_MAKE} PREFIX=${prefix} prefix=${prefix} ${EXTRA_OESCONS} || \
26 die "scons build execution failed."
27}
28
29scons_do_install() {
30 ${STAGING_BINDIR_NATIVE}/scons --directory=${S} install_root=${D}${prefix} PREFIX=${prefix} prefix=${prefix} ${EXTRA_OESCONS} install || \
31 die "scons install execution failed."
32}
33
34EXPORT_FUNCTIONS do_compile do_install
diff --git a/meta/classes/setuptools3-base.bbclass b/meta/classes/setuptools3-base.bbclass
deleted file mode 100644
index 21b688ced0..0000000000
--- a/meta/classes/setuptools3-base.bbclass
+++ /dev/null
@@ -1,37 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7DEPENDS:append:class-target = " ${PYTHON_PN}-native ${PYTHON_PN}"
8DEPENDS:append:class-nativesdk = " ${PYTHON_PN}-native ${PYTHON_PN}"
9RDEPENDS:${PN}:append:class-target = " ${PYTHON_PN}-core"
10
11export STAGING_INCDIR
12export STAGING_LIBDIR
13
14# LDSHARED is the ld *command* used to create shared library
15export LDSHARED = "${CCLD} -shared"
16# LDXXSHARED is the ld *command* used to create shared library of C++
17# objects
18export LDCXXSHARED = "${CXX} -shared"
19# CCSHARED are the C *flags* used to create objects to go into a shared
20# library (module)
21export CCSHARED = "-fPIC -DPIC"
22# LINKFORSHARED are the flags passed to the $(CC) command that links
23# the python executable
24export LINKFORSHARED = "${SECURITY_CFLAGS} -Xlinker -export-dynamic"
25
26FILES:${PN} += "${libdir}/* ${libdir}/${PYTHON_DIR}/*"
27
28FILES:${PN}-staticdev += "\
29 ${PYTHON_SITEPACKAGES_DIR}/*.a \
30"
31FILES:${PN}-dev += "\
32 ${datadir}/pkgconfig \
33 ${libdir}/pkgconfig \
34 ${PYTHON_SITEPACKAGES_DIR}/*.la \
35"
36inherit python3native python3targetconfig
37
diff --git a/meta/classes/setuptools3.bbclass b/meta/classes/setuptools3.bbclass
deleted file mode 100644
index 4c6e79ee9a..0000000000
--- a/meta/classes/setuptools3.bbclass
+++ /dev/null
@@ -1,38 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7inherit setuptools3-base python_pep517
8
9DEPENDS += "python3-setuptools-native python3-wheel-native"
10
11SETUPTOOLS_BUILD_ARGS ?= ""
12
13SETUPTOOLS_SETUP_PATH ?= "${S}"
14
15setuptools3_do_configure() {
16 :
17}
18
19setuptools3_do_compile() {
20 cd ${SETUPTOOLS_SETUP_PATH}
21 NO_FETCH_BUILD=1 \
22 STAGING_INCDIR=${STAGING_INCDIR} \
23 STAGING_LIBDIR=${STAGING_LIBDIR} \
24 ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py \
25 bdist_wheel --verbose --dist-dir ${PEP517_WHEEL_PATH} ${SETUPTOOLS_BUILD_ARGS} || \
26 bbfatal_log "'${PYTHON_PN} setup.py bdist_wheel ${SETUPTOOLS_BUILD_ARGS}' execution failed."
27}
28setuptools3_do_compile[vardepsexclude] = "MACHINE"
29do_compile[cleandirs] += "${PEP517_WHEEL_PATH}"
30
31# This could be removed in the future but some recipes in meta-oe still use it
32setuptools3_do_install() {
33 python_pep517_do_install
34}
35
36EXPORT_FUNCTIONS do_configure do_compile do_install
37
38export LDSHARED="${CCLD} -shared"
diff --git a/meta/classes/setuptools3_legacy.bbclass b/meta/classes/setuptools3_legacy.bbclass
deleted file mode 100644
index 21748f922a..0000000000
--- a/meta/classes/setuptools3_legacy.bbclass
+++ /dev/null
@@ -1,84 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# This class is for packages which use the deprecated setuptools behaviour,
8# specifically custom install tasks which don't work correctly with bdist_wheel.
9# This behaviour is deprecated in setuptools[1] and won't work in the future, so
10# all users of this should consider their options: pure Python modules can use a
11# modern Python tool such as build[2], or packages which are doing more (such as
12# installing init scripts) should use a fully-featured build system such as Meson.
13#
14# [1] https://setuptools.pypa.io/en/latest/history.html#id142
15# [2] https://pypi.org/project/build/
16
17inherit setuptools3-base
18
19B = "${WORKDIR}/build"
20
21SETUPTOOLS_BUILD_ARGS ?= ""
22SETUPTOOLS_INSTALL_ARGS ?= "--root=${D} \
23 --prefix=${prefix} \
24 --install-lib=${PYTHON_SITEPACKAGES_DIR} \
25 --install-data=${datadir}"
26
27SETUPTOOLS_PYTHON = "python3"
28SETUPTOOLS_PYTHON:class-native = "nativepython3"
29
30SETUPTOOLS_SETUP_PATH ?= "${S}"
31
32setuptools3_legacy_do_configure() {
33 :
34}
35
36setuptools3_legacy_do_compile() {
37 cd ${SETUPTOOLS_SETUP_PATH}
38 NO_FETCH_BUILD=1 \
39 STAGING_INCDIR=${STAGING_INCDIR} \
40 STAGING_LIBDIR=${STAGING_LIBDIR} \
41 ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py \
42 build --build-base=${B} ${SETUPTOOLS_BUILD_ARGS} || \
43 bbfatal_log "'${PYTHON_PN} setup.py build ${SETUPTOOLS_BUILD_ARGS}' execution failed."
44}
45setuptools3_legacy_do_compile[vardepsexclude] = "MACHINE"
46
47setuptools3_legacy_do_install() {
48 cd ${SETUPTOOLS_SETUP_PATH}
49 install -d ${D}${PYTHON_SITEPACKAGES_DIR}
50 STAGING_INCDIR=${STAGING_INCDIR} \
51 STAGING_LIBDIR=${STAGING_LIBDIR} \
52 PYTHONPATH=${D}${PYTHON_SITEPACKAGES_DIR} \
53 ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py \
54 build --build-base=${B} install --skip-build ${SETUPTOOLS_INSTALL_ARGS} || \
55 bbfatal_log "'${PYTHON_PN} setup.py install ${SETUPTOOLS_INSTALL_ARGS}' execution failed."
56
57 # support filenames with *spaces*
58 find ${D} -name "*.py" -exec grep -q ${D} {} \; \
59 -exec sed -i -e s:${D}::g {} \;
60
61 for i in ${D}${bindir}/* ${D}${sbindir}/*; do
62 if [ -f "$i" ]; then
63 sed -i -e s:${PYTHON}:${USRBINPATH}/env\ ${SETUPTOOLS_PYTHON}:g $i
64 sed -i -e s:${STAGING_BINDIR_NATIVE}:${bindir}:g $i
65 fi
66 done
67
68 rm -f ${D}${PYTHON_SITEPACKAGES_DIR}/easy-install.pth
69
70 #
71 # FIXME: Bandaid against wrong datadir computation
72 #
73 if [ -e ${D}${datadir}/share ]; then
74 mv -f ${D}${datadir}/share/* ${D}${datadir}/
75 rmdir ${D}${datadir}/share
76 fi
77}
78setuptools3_legacy_do_install[vardepsexclude] = "MACHINE"
79
80EXPORT_FUNCTIONS do_configure do_compile do_install
81
82export LDSHARED="${CCLD} -shared"
83DEPENDS += "python3-setuptools-native"
84
diff --git a/meta/classes/siteinfo.bbclass b/meta/classes/siteinfo.bbclass
deleted file mode 100644
index d31c9b2571..0000000000
--- a/meta/classes/siteinfo.bbclass
+++ /dev/null
@@ -1,232 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# This class exists to provide information about the targets that
8# may be needed by other classes and/or recipes. If you add a new
9# target this will probably need to be updated.
10
11#
12# Returns information about 'what' for the named target 'target'
13# where 'target' == "<arch>-<os>"
14#
15# 'what' can be one of
16# * target: Returns the target name ("<arch>-<os>")
17# * endianness: Return "be" for big endian targets, "le" for little endian
18# * bits: Returns the bit size of the target, either "32" or "64"
19# * libc: Returns the name of the c library used by the target
20#
21# It is an error for the target not to exist.
22# If 'what' doesn't exist then an empty value is returned
23#
24def siteinfo_data_for_machine(arch, os, d):
25 archinfo = {
26 "allarch": "endian-little bit-32", # bogus, but better than special-casing the checks below for allarch
27 "aarch64": "endian-little bit-64 arm-common arm-64",
28 "aarch64_be": "endian-big bit-64 arm-common arm-64",
29 "arc": "endian-little bit-32 arc-common",
30 "arceb": "endian-big bit-32 arc-common",
31 "arm": "endian-little bit-32 arm-common arm-32",
32 "armeb": "endian-big bit-32 arm-common arm-32",
33 "avr32": "endian-big bit-32 avr32-common",
34 "bfin": "endian-little bit-32 bfin-common",
35 "epiphany": "endian-little bit-32",
36 "i386": "endian-little bit-32 ix86-common",
37 "i486": "endian-little bit-32 ix86-common",
38 "i586": "endian-little bit-32 ix86-common",
39 "i686": "endian-little bit-32 ix86-common",
40 "ia64": "endian-little bit-64",
41 "lm32": "endian-big bit-32",
42 "m68k": "endian-big bit-32",
43 "microblaze": "endian-big bit-32 microblaze-common",
44 "microblazeel": "endian-little bit-32 microblaze-common",
45 "mips": "endian-big bit-32 mips-common",
46 "mips64": "endian-big bit-64 mips-common",
47 "mips64el": "endian-little bit-64 mips-common",
48 "mipsisa64r6": "endian-big bit-64 mips-common",
49 "mipsisa64r6el": "endian-little bit-64 mips-common",
50 "mipsel": "endian-little bit-32 mips-common",
51 "mipsisa32r6": "endian-big bit-32 mips-common",
52 "mipsisa32r6el": "endian-little bit-32 mips-common",
53 "powerpc": "endian-big bit-32 powerpc-common",
54 "powerpcle": "endian-little bit-32 powerpc-common",
55 "nios2": "endian-little bit-32 nios2-common",
56 "powerpc64": "endian-big bit-64 powerpc-common",
57 "powerpc64le": "endian-little bit-64 powerpc-common",
58 "ppc": "endian-big bit-32 powerpc-common",
59 "ppc64": "endian-big bit-64 powerpc-common",
60 "ppc64le" : "endian-little bit-64 powerpc-common",
61 "riscv32": "endian-little bit-32 riscv-common",
62 "riscv64": "endian-little bit-64 riscv-common",
63 "sh3": "endian-little bit-32 sh-common",
64 "sh3eb": "endian-big bit-32 sh-common",
65 "sh4": "endian-little bit-32 sh-common",
66 "sh4eb": "endian-big bit-32 sh-common",
67 "sparc": "endian-big bit-32",
68 "viac3": "endian-little bit-32 ix86-common",
69 "x86_64": "endian-little", # bitinfo specified in targetinfo
70 }
71 osinfo = {
72 "darwin": "common-darwin",
73 "darwin9": "common-darwin",
74 "linux": "common-linux common-glibc",
75 "linux-gnu": "common-linux common-glibc",
76 "linux-gnu_ilp32": "common-linux common-glibc",
77 "linux-gnux32": "common-linux common-glibc",
78 "linux-gnun32": "common-linux common-glibc",
79 "linux-gnueabi": "common-linux common-glibc",
80 "linux-gnuspe": "common-linux common-glibc",
81 "linux-musl": "common-linux common-musl",
82 "linux-muslx32": "common-linux common-musl",
83 "linux-musleabi": "common-linux common-musl",
84 "linux-muslspe": "common-linux common-musl",
85 "uclinux-uclibc": "common-uclibc",
86 "cygwin": "common-cygwin",
87 "mingw32": "common-mingw",
88 }
89 targetinfo = {
90 "aarch64-linux-gnu": "aarch64-linux",
91 "aarch64_be-linux-gnu": "aarch64_be-linux",
92 "aarch64-linux-gnu_ilp32": "bit-32 aarch64_be-linux arm-32",
93 "aarch64_be-linux-gnu_ilp32": "bit-32 aarch64_be-linux arm-32",
94 "aarch64-linux-musl": "aarch64-linux",
95 "aarch64_be-linux-musl": "aarch64_be-linux",
96 "arm-linux-gnueabi": "arm-linux",
97 "arm-linux-musleabi": "arm-linux",
98 "armeb-linux-gnueabi": "armeb-linux",
99 "armeb-linux-musleabi": "armeb-linux",
100 "microblazeel-linux" : "microblaze-linux",
101 "microblazeel-linux-musl" : "microblaze-linux",
102 "mips-linux-musl": "mips-linux",
103 "mipsel-linux-musl": "mipsel-linux",
104 "mips64-linux-musl": "mips64-linux",
105 "mips64el-linux-musl": "mips64el-linux",
106 "mips64-linux-gnun32": "mips-linux bit-32",
107 "mips64el-linux-gnun32": "mipsel-linux bit-32",
108 "mipsisa64r6-linux-gnun32": "mipsisa32r6-linux bit-32",
109 "mipsisa64r6el-linux-gnun32": "mipsisa32r6el-linux bit-32",
110 "powerpc-linux": "powerpc32-linux powerpc32-linux-glibc",
111 "powerpc-linux-musl": "powerpc-linux powerpc32-linux powerpc32-linux-musl",
112 "powerpcle-linux": "powerpc32-linux powerpc32-linux-glibc",
113 "powerpcle-linux-musl": "powerpc-linux powerpc32-linux powerpc32-linux-musl",
114 "powerpc-linux-gnuspe": "powerpc-linux powerpc32-linux powerpc32-linux-glibc",
115 "powerpc-linux-muslspe": "powerpc-linux powerpc32-linux powerpc32-linux-musl",
116 "powerpc64-linux-gnuspe": "powerpc-linux powerpc64-linux powerpc64-linux-glibc",
117 "powerpc64-linux-muslspe": "powerpc-linux powerpc64-linux powerpc64-linux-musl",
118 "powerpc64-linux": "powerpc-linux powerpc64-linux powerpc64-linux-glibc",
119 "powerpc64-linux-musl": "powerpc-linux powerpc64-linux powerpc64-linux-musl",
120 "powerpc64le-linux": "powerpc-linux powerpc64-linux powerpc64-linux-glibc",
121 "powerpc64le-linux-musl": "powerpc-linux powerpc64-linux powerpc64-linux-musl",
122 "riscv32-linux": "riscv32-linux",
123 "riscv32-linux-musl": "riscv32-linux",
124 "riscv64-linux": "riscv64-linux",
125 "riscv64-linux-musl": "riscv64-linux",
126 "x86_64-cygwin": "bit-64",
127 "x86_64-darwin": "bit-64",
128 "x86_64-darwin9": "bit-64",
129 "x86_64-linux": "bit-64",
130 "x86_64-linux-musl": "x86_64-linux bit-64",
131 "x86_64-linux-muslx32": "bit-32 ix86-common x32-linux",
132 "x86_64-elf": "bit-64",
133 "x86_64-linux-gnu": "bit-64 x86_64-linux",
134 "x86_64-linux-gnux32": "bit-32 ix86-common x32-linux",
135 "x86_64-mingw32": "bit-64",
136 }
137
138 # Add in any extra user supplied data which may come from a BSP layer, removing the
139 # need to always change this class directly
140 extra_siteinfo = (d.getVar("SITEINFO_EXTRA_DATAFUNCS") or "").split()
141 for m in extra_siteinfo:
142 call = m + "(archinfo, osinfo, targetinfo, d)"
143 locs = { "archinfo" : archinfo, "osinfo" : osinfo, "targetinfo" : targetinfo, "d" : d}
144 archinfo, osinfo, targetinfo = bb.utils.better_eval(call, locs)
145
146 target = "%s-%s" % (arch, os)
147
148 sitedata = []
149 if arch in archinfo:
150 sitedata.extend(archinfo[arch].split())
151 if os in osinfo:
152 sitedata.extend(osinfo[os].split())
153 if target in targetinfo:
154 sitedata.extend(targetinfo[target].split())
155 sitedata.append(target)
156 sitedata.append("common")
157
158 bb.debug(1, "SITE files %s" % sitedata);
159 return sitedata
160
161def siteinfo_data(d):
162 return siteinfo_data_for_machine(d.getVar("HOST_ARCH"), d.getVar("HOST_OS"), d)
163
164python () {
165 sitedata = set(siteinfo_data(d))
166 if "endian-little" in sitedata:
167 d.setVar("SITEINFO_ENDIANNESS", "le")
168 elif "endian-big" in sitedata:
169 d.setVar("SITEINFO_ENDIANNESS", "be")
170 else:
171 bb.error("Unable to determine endianness for architecture '%s'" %
172 d.getVar("HOST_ARCH"))
173 bb.fatal("Please add your architecture to siteinfo.bbclass")
174
175 if "bit-32" in sitedata:
176 d.setVar("SITEINFO_BITS", "32")
177 elif "bit-64" in sitedata:
178 d.setVar("SITEINFO_BITS", "64")
179 else:
180 bb.error("Unable to determine bit size for architecture '%s'" %
181 d.getVar("HOST_ARCH"))
182 bb.fatal("Please add your architecture to siteinfo.bbclass")
183}
184
185# Layers with siteconfig need to add a replacement path to this variable so the
186# sstate isn't path specific
187SITEINFO_PATHVARS = "COREBASE"
188
189def siteinfo_get_files(d, sysrootcache=False):
190 sitedata = siteinfo_data(d)
191 sitefiles = []
192 searched = []
193 for path in d.getVar("BBPATH").split(":"):
194 for element in sitedata:
195 filename = os.path.join(path, "site", element)
196 if os.path.exists(filename):
197 searched.append(filename + ":True")
198 sitefiles.append(filename)
199 else:
200 searched.append(filename + ":False")
201
202 # Have to parameterise out hardcoded paths such as COREBASE for the main site files
203 for var in d.getVar("SITEINFO_PATHVARS").split():
204 searched2 = []
205 replace = os.path.normpath(d.getVar(var))
206 for s in searched:
207 searched2.append(s.replace(replace, "${" + var + "}"))
208 searched = searched2
209
210 if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross', d) or bb.data.inherits_class('crosssdk', d):
211 # We need sstate sigs for native/cross not to vary upon arch so we can't depend on the site files.
212 # In future we may want to depend upon all site files?
213 # This would show up as breaking sstatetests.SStateTests.test_sstate_32_64_same_hash for example
214 searched = []
215
216 if not sysrootcache:
217 return sitefiles, searched
218
219 # Now check for siteconfig cache files in sysroots
220 path_siteconfig = d.getVar('SITECONFIG_SYSROOTCACHE')
221 if path_siteconfig and os.path.isdir(path_siteconfig):
222 for i in os.listdir(path_siteconfig):
223 if not i.endswith("_config"):
224 continue
225 filename = os.path.join(path_siteconfig, i)
226 sitefiles.append(filename)
227 return sitefiles, searched
228
229#
230# Make some information available via variables
231#
232SITECONFIG_SYSROOTCACHE = "${STAGING_DATADIR}/${TARGET_SYS}_config_site.d"
diff --git a/meta/classes/sstate.bbclass b/meta/classes/sstate.bbclass
deleted file mode 100644
index cd77c58dbf..0000000000
--- a/meta/classes/sstate.bbclass
+++ /dev/null
@@ -1,1364 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7SSTATE_VERSION = "10"
8
9SSTATE_ZSTD_CLEVEL ??= "8"
10
11SSTATE_MANIFESTS ?= "${TMPDIR}/sstate-control"
12SSTATE_MANFILEPREFIX = "${SSTATE_MANIFESTS}/manifest-${SSTATE_MANMACH}-${PN}"
13
14def generate_sstatefn(spec, hash, taskname, siginfo, d):
15 if taskname is None:
16 return ""
17 extension = ".tar.zst"
18 # 8 chars reserved for siginfo
19 limit = 254 - 8
20 if siginfo:
21 limit = 254
22 extension = ".tar.zst.siginfo"
23 if not hash:
24 hash = "INVALID"
25 fn = spec + hash + "_" + taskname + extension
26 # If the filename is too long, attempt to reduce it
27 if len(fn) > limit:
28 components = spec.split(":")
29 # Fields 0,5,6 are mandatory, 1 is most useful, 2,3,4 are just for information
30 # 7 is for the separators
31 avail = (limit - len(hash + "_" + taskname + extension) - len(components[0]) - len(components[1]) - len(components[5]) - len(components[6]) - 7) // 3
32 components[2] = components[2][:avail]
33 components[3] = components[3][:avail]
34 components[4] = components[4][:avail]
35 spec = ":".join(components)
36 fn = spec + hash + "_" + taskname + extension
37 if len(fn) > limit:
38 bb.fatal("Unable to reduce sstate name to less than 255 chararacters")
39 return hash[:2] + "/" + hash[2:4] + "/" + fn
40
41SSTATE_PKGARCH = "${PACKAGE_ARCH}"
42SSTATE_PKGSPEC = "sstate:${PN}:${PACKAGE_ARCH}${TARGET_VENDOR}-${TARGET_OS}:${PV}:${PR}:${SSTATE_PKGARCH}:${SSTATE_VERSION}:"
43SSTATE_SWSPEC = "sstate:${PN}::${PV}:${PR}::${SSTATE_VERSION}:"
44SSTATE_PKGNAME = "${SSTATE_EXTRAPATH}${@generate_sstatefn(d.getVar('SSTATE_PKGSPEC'), d.getVar('BB_UNIHASH'), d.getVar('SSTATE_CURRTASK'), False, d)}"
45SSTATE_PKG = "${SSTATE_DIR}/${SSTATE_PKGNAME}"
46SSTATE_EXTRAPATH = ""
47SSTATE_EXTRAPATHWILDCARD = ""
48SSTATE_PATHSPEC = "${SSTATE_DIR}/${SSTATE_EXTRAPATHWILDCARD}*/*/${SSTATE_PKGSPEC}*_${SSTATE_PATH_CURRTASK}.tar.zst*"
49
50# explicitly make PV to depend on evaluated value of PV variable
51PV[vardepvalue] = "${PV}"
52
53# We don't want the sstate to depend on things like the distro string
54# of the system, we let the sstate paths take care of this.
55SSTATE_EXTRAPATH[vardepvalue] = ""
56SSTATE_EXTRAPATHWILDCARD[vardepvalue] = ""
57
58# For multilib rpm the allarch packagegroup files can overwrite (in theory they're identical)
59SSTATE_ALLOW_OVERLAP_FILES = "${DEPLOY_DIR}/licenses/"
60# Avoid docbook/sgml catalog warnings for now
61SSTATE_ALLOW_OVERLAP_FILES += "${STAGING_ETCDIR_NATIVE}/sgml ${STAGING_DATADIR_NATIVE}/sgml"
62# sdk-provides-dummy-nativesdk and nativesdk-buildtools-perl-dummy overlap for different SDKMACHINE
63SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_RPM}/sdk_provides_dummy_nativesdk/ ${DEPLOY_DIR_IPK}/sdk-provides-dummy-nativesdk/"
64SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_RPM}/buildtools_dummy_nativesdk/ ${DEPLOY_DIR_IPK}/buildtools-dummy-nativesdk/"
65# target-sdk-provides-dummy overlaps that allarch is disabled when multilib is used
66SSTATE_ALLOW_OVERLAP_FILES += "${COMPONENTS_DIR}/sdk-provides-dummy-target/ ${DEPLOY_DIR_RPM}/sdk_provides_dummy_target/ ${DEPLOY_DIR_IPK}/sdk-provides-dummy-target/"
67# Archive the sources for many architectures in one deploy folder
68SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_SRC}"
69# ovmf/grub-efi/systemd-boot/intel-microcode multilib recipes can generate identical overlapping files
70SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_IMAGE}/ovmf"
71SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_IMAGE}/grub-efi"
72SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_IMAGE}/systemd-boot"
73SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_IMAGE}/microcode"
74
75SSTATE_SCAN_FILES ?= "*.la *-config *_config postinst-*"
76SSTATE_SCAN_CMD ??= 'find ${SSTATE_BUILDDIR} \( -name "${@"\" -o -name \"".join(d.getVar("SSTATE_SCAN_FILES").split())}" \) -type f'
77SSTATE_SCAN_CMD_NATIVE ??= 'grep -Irl -e ${RECIPE_SYSROOT} -e ${RECIPE_SYSROOT_NATIVE} -e ${HOSTTOOLS_DIR} ${SSTATE_BUILDDIR}'
78SSTATE_HASHEQUIV_FILEMAP ?= " \
79 populate_sysroot:*/postinst-useradd-*:${TMPDIR} \
80 populate_sysroot:*/postinst-useradd-*:${COREBASE} \
81 populate_sysroot:*/postinst-useradd-*:regex-\s(PATH|PSEUDO_IGNORE_PATHS|HOME|LOGNAME|OMP_NUM_THREADS|USER)=.*\s \
82 populate_sysroot:*/crossscripts/*:${TMPDIR} \
83 populate_sysroot:*/crossscripts/*:${COREBASE} \
84 "
85
86BB_HASHFILENAME = "False ${SSTATE_PKGSPEC} ${SSTATE_SWSPEC}"
87
88SSTATE_ARCHS = " \
89 ${BUILD_ARCH} \
90 ${BUILD_ARCH}_${ORIGNATIVELSBSTRING} \
91 ${BUILD_ARCH}_${SDK_ARCH}_${SDK_OS} \
92 ${SDK_ARCH}_${SDK_OS} \
93 ${SDK_ARCH}_${PACKAGE_ARCH} \
94 allarch \
95 ${PACKAGE_ARCH} \
96 ${PACKAGE_EXTRA_ARCHS} \
97 ${MACHINE_ARCH}"
98SSTATE_ARCHS[vardepsexclude] = "ORIGNATIVELSBSTRING"
99
100SSTATE_MANMACH ?= "${SSTATE_PKGARCH}"
101
102SSTATECREATEFUNCS += "sstate_hardcode_path"
103SSTATECREATEFUNCS[vardeps] = "SSTATE_SCAN_FILES"
104SSTATEPOSTCREATEFUNCS = ""
105SSTATEPREINSTFUNCS = ""
106SSTATEPOSTUNPACKFUNCS = "sstate_hardcode_path_unpack"
107SSTATEPOSTINSTFUNCS = ""
108EXTRA_STAGING_FIXMES ?= "HOSTTOOLS_DIR"
109
110# Check whether sstate exists for tasks that support sstate and are in the
111# locked signatures file.
112SIGGEN_LOCKEDSIGS_SSTATE_EXISTS_CHECK ?= 'error'
113
114# Check whether the task's computed hash matches the task's hash in the
115# locked signatures file.
116SIGGEN_LOCKEDSIGS_TASKSIG_CHECK ?= "error"
117
118# The GnuPG key ID and passphrase to use to sign sstate archives (or unset to
119# not sign)
120SSTATE_SIG_KEY ?= ""
121SSTATE_SIG_PASSPHRASE ?= ""
122# Whether to verify the GnUPG signatures when extracting sstate archives
123SSTATE_VERIFY_SIG ?= "0"
124# List of signatures to consider valid.
125SSTATE_VALID_SIGS ??= ""
126SSTATE_VALID_SIGS[vardepvalue] = ""
127
128SSTATE_HASHEQUIV_METHOD ?= "oe.sstatesig.OEOuthashBasic"
129SSTATE_HASHEQUIV_METHOD[doc] = "The fully-qualified function used to calculate \
130 the output hash for a task, which in turn is used to determine equivalency. \
131 "
132
133SSTATE_HASHEQUIV_REPORT_TASKDATA ?= "0"
134SSTATE_HASHEQUIV_REPORT_TASKDATA[doc] = "Report additional useful data to the \
135 hash equivalency server, such as PN, PV, taskname, etc. This information \
136 is very useful for developers looking at task data, but may leak sensitive \
137 data if the equivalence server is public. \
138 "
139
140python () {
141 if bb.data.inherits_class('native', d):
142 d.setVar('SSTATE_PKGARCH', d.getVar('BUILD_ARCH', False))
143 elif bb.data.inherits_class('crosssdk', d):
144 d.setVar('SSTATE_PKGARCH', d.expand("${BUILD_ARCH}_${SDK_ARCH}_${SDK_OS}"))
145 elif bb.data.inherits_class('cross', d):
146 d.setVar('SSTATE_PKGARCH', d.expand("${BUILD_ARCH}"))
147 elif bb.data.inherits_class('nativesdk', d):
148 d.setVar('SSTATE_PKGARCH', d.expand("${SDK_ARCH}_${SDK_OS}"))
149 elif bb.data.inherits_class('cross-canadian', d):
150 d.setVar('SSTATE_PKGARCH', d.expand("${SDK_ARCH}_${PACKAGE_ARCH}"))
151 elif bb.data.inherits_class('allarch', d) and d.getVar("PACKAGE_ARCH") == "all":
152 d.setVar('SSTATE_PKGARCH', "allarch")
153 else:
154 d.setVar('SSTATE_MANMACH', d.expand("${PACKAGE_ARCH}"))
155
156 if bb.data.inherits_class('native', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('cross', d):
157 d.setVar('SSTATE_EXTRAPATH', "${NATIVELSBSTRING}/")
158 d.setVar('BB_HASHFILENAME', "True ${SSTATE_PKGSPEC} ${SSTATE_SWSPEC}")
159 d.setVar('SSTATE_EXTRAPATHWILDCARD', "${NATIVELSBSTRING}/")
160
161 unique_tasks = sorted(set((d.getVar('SSTATETASKS') or "").split()))
162 d.setVar('SSTATETASKS', " ".join(unique_tasks))
163 for task in unique_tasks:
164 d.prependVarFlag(task, 'prefuncs', "sstate_task_prefunc ")
165 d.appendVarFlag(task, 'postfuncs', " sstate_task_postfunc")
166 d.setVarFlag(task, 'network', '1')
167 d.setVarFlag(task + "_setscene", 'network', '1')
168}
169
170def sstate_init(task, d):
171 ss = {}
172 ss['task'] = task
173 ss['dirs'] = []
174 ss['plaindirs'] = []
175 ss['lockfiles'] = []
176 ss['lockfiles-shared'] = []
177 return ss
178
179def sstate_state_fromvars(d, task = None):
180 if task is None:
181 task = d.getVar('BB_CURRENTTASK')
182 if not task:
183 bb.fatal("sstate code running without task context?!")
184 task = task.replace("_setscene", "")
185
186 if task.startswith("do_"):
187 task = task[3:]
188 inputs = (d.getVarFlag("do_" + task, 'sstate-inputdirs') or "").split()
189 outputs = (d.getVarFlag("do_" + task, 'sstate-outputdirs') or "").split()
190 plaindirs = (d.getVarFlag("do_" + task, 'sstate-plaindirs') or "").split()
191 lockfiles = (d.getVarFlag("do_" + task, 'sstate-lockfile') or "").split()
192 lockfilesshared = (d.getVarFlag("do_" + task, 'sstate-lockfile-shared') or "").split()
193 interceptfuncs = (d.getVarFlag("do_" + task, 'sstate-interceptfuncs') or "").split()
194 fixmedir = d.getVarFlag("do_" + task, 'sstate-fixmedir') or ""
195 if not task or len(inputs) != len(outputs):
196 bb.fatal("sstate variables not setup correctly?!")
197
198 if task == "populate_lic":
199 d.setVar("SSTATE_PKGSPEC", "${SSTATE_SWSPEC}")
200 d.setVar("SSTATE_EXTRAPATH", "")
201 d.setVar('SSTATE_EXTRAPATHWILDCARD', "")
202
203 ss = sstate_init(task, d)
204 for i in range(len(inputs)):
205 sstate_add(ss, inputs[i], outputs[i], d)
206 ss['lockfiles'] = lockfiles
207 ss['lockfiles-shared'] = lockfilesshared
208 ss['plaindirs'] = plaindirs
209 ss['interceptfuncs'] = interceptfuncs
210 ss['fixmedir'] = fixmedir
211 return ss
212
213def sstate_add(ss, source, dest, d):
214 if not source.endswith("/"):
215 source = source + "/"
216 if not dest.endswith("/"):
217 dest = dest + "/"
218 source = os.path.normpath(source)
219 dest = os.path.normpath(dest)
220 srcbase = os.path.basename(source)
221 ss['dirs'].append([srcbase, source, dest])
222 return ss
223
224def sstate_install(ss, d):
225 import oe.path
226 import oe.sstatesig
227 import subprocess
228
229 sharedfiles = []
230 shareddirs = []
231 bb.utils.mkdirhier(d.expand("${SSTATE_MANIFESTS}"))
232
233 sstateinst = d.expand("${WORKDIR}/sstate-install-%s/" % ss['task'])
234
235 manifest, d2 = oe.sstatesig.sstate_get_manifest_filename(ss['task'], d)
236
237 if os.access(manifest, os.R_OK):
238 bb.fatal("Package already staged (%s)?!" % manifest)
239
240 d.setVar("SSTATE_INST_POSTRM", manifest + ".postrm")
241
242 locks = []
243 for lock in ss['lockfiles-shared']:
244 locks.append(bb.utils.lockfile(lock, True))
245 for lock in ss['lockfiles']:
246 locks.append(bb.utils.lockfile(lock))
247
248 for state in ss['dirs']:
249 bb.debug(2, "Staging files from %s to %s" % (state[1], state[2]))
250 for walkroot, dirs, files in os.walk(state[1]):
251 for file in files:
252 srcpath = os.path.join(walkroot, file)
253 dstpath = srcpath.replace(state[1], state[2])
254 #bb.debug(2, "Staging %s to %s" % (srcpath, dstpath))
255 sharedfiles.append(dstpath)
256 for dir in dirs:
257 srcdir = os.path.join(walkroot, dir)
258 dstdir = srcdir.replace(state[1], state[2])
259 #bb.debug(2, "Staging %s to %s" % (srcdir, dstdir))
260 if os.path.islink(srcdir):
261 sharedfiles.append(dstdir)
262 continue
263 if not dstdir.endswith("/"):
264 dstdir = dstdir + "/"
265 shareddirs.append(dstdir)
266
267 # Check the file list for conflicts against files which already exist
268 overlap_allowed = (d.getVar("SSTATE_ALLOW_OVERLAP_FILES") or "").split()
269 match = []
270 for f in sharedfiles:
271 if os.path.exists(f) and not os.path.islink(f):
272 f = os.path.normpath(f)
273 realmatch = True
274 for w in overlap_allowed:
275 w = os.path.normpath(w)
276 if f.startswith(w):
277 realmatch = False
278 break
279 if realmatch:
280 match.append(f)
281 sstate_search_cmd = "grep -rlF '%s' %s --exclude=master.list | sed -e 's:^.*/::'" % (f, d.expand("${SSTATE_MANIFESTS}"))
282 search_output = subprocess.Popen(sstate_search_cmd, shell=True, stdout=subprocess.PIPE).communicate()[0]
283 if search_output:
284 match.append(" (matched in %s)" % search_output.decode('utf-8').rstrip())
285 else:
286 match.append(" (not matched to any task)")
287 if match:
288 bb.error("The recipe %s is trying to install files into a shared " \
289 "area when those files already exist. Those files and their manifest " \
290 "location are:\n %s\nPlease verify which recipe should provide the " \
291 "above files.\n\nThe build has stopped, as continuing in this scenario WILL " \
292 "break things - if not now, possibly in the future (we've seen builds fail " \
293 "several months later). If the system knew how to recover from this " \
294 "automatically it would, however there are several different scenarios " \
295 "which can result in this and we don't know which one this is. It may be " \
296 "you have switched providers of something like virtual/kernel (e.g. from " \
297 "linux-yocto to linux-yocto-dev), in that case you need to execute the " \
298 "clean task for both recipes and it will resolve this error. It may be " \
299 "you changed DISTRO_FEATURES from systemd to udev or vice versa. Cleaning " \
300 "those recipes should again resolve this error, however switching " \
301 "DISTRO_FEATURES on an existing build directory is not supported - you " \
302 "should really clean out tmp and rebuild (reusing sstate should be safe). " \
303 "It could be the overlapping files detected are harmless in which case " \
304 "adding them to SSTATE_ALLOW_OVERLAP_FILES may be the correct solution. It could " \
305 "also be your build is including two different conflicting versions of " \
306 "things (e.g. bluez 4 and bluez 5 and the correct solution for that would " \
307 "be to resolve the conflict. If in doubt, please ask on the mailing list, " \
308 "sharing the error and filelist above." % \
309 (d.getVar('PN'), "\n ".join(match)))
310 bb.fatal("If the above message is too much, the simpler version is you're advised to wipe out tmp and rebuild (reusing sstate is fine). That will likely fix things in most (but not all) cases.")
311
312 if ss['fixmedir'] and os.path.exists(ss['fixmedir'] + "/fixmepath.cmd"):
313 sharedfiles.append(ss['fixmedir'] + "/fixmepath.cmd")
314 sharedfiles.append(ss['fixmedir'] + "/fixmepath")
315
316 # Write out the manifest
317 f = open(manifest, "w")
318 for file in sharedfiles:
319 f.write(file + "\n")
320
321 # We want to ensure that directories appear at the end of the manifest
322 # so that when we test to see if they should be deleted any contents
323 # added by the task will have been removed first.
324 dirs = sorted(shareddirs, key=len)
325 # Must remove children first, which will have a longer path than the parent
326 for di in reversed(dirs):
327 f.write(di + "\n")
328 f.close()
329
330 # Append to the list of manifests for this PACKAGE_ARCH
331
332 i = d2.expand("${SSTATE_MANIFESTS}/index-${SSTATE_MANMACH}")
333 l = bb.utils.lockfile(i + ".lock")
334 filedata = d.getVar("STAMP") + " " + d2.getVar("SSTATE_MANFILEPREFIX") + " " + d.getVar("WORKDIR") + "\n"
335 manifests = []
336 if os.path.exists(i):
337 with open(i, "r") as f:
338 manifests = f.readlines()
339 # We append new entries, we don't remove older entries which may have the same
340 # manifest name but different versions from stamp/workdir. See below.
341 if filedata not in manifests:
342 with open(i, "a+") as f:
343 f.write(filedata)
344 bb.utils.unlockfile(l)
345
346 # Run the actual file install
347 for state in ss['dirs']:
348 if os.path.exists(state[1]):
349 oe.path.copyhardlinktree(state[1], state[2])
350
351 for postinst in (d.getVar('SSTATEPOSTINSTFUNCS') or '').split():
352 # All hooks should run in the SSTATE_INSTDIR
353 bb.build.exec_func(postinst, d, (sstateinst,))
354
355 for lock in locks:
356 bb.utils.unlockfile(lock)
357
358sstate_install[vardepsexclude] += "SSTATE_ALLOW_OVERLAP_FILES STATE_MANMACH SSTATE_MANFILEPREFIX"
359sstate_install[vardeps] += "${SSTATEPOSTINSTFUNCS}"
360
361def sstate_installpkg(ss, d):
362 from oe.gpg_sign import get_signer
363
364 sstateinst = d.expand("${WORKDIR}/sstate-install-%s/" % ss['task'])
365 d.setVar("SSTATE_CURRTASK", ss['task'])
366 sstatefetch = d.getVar('SSTATE_PKGNAME')
367 sstatepkg = d.getVar('SSTATE_PKG')
368
369 if not os.path.exists(sstatepkg):
370 pstaging_fetch(sstatefetch, d)
371
372 if not os.path.isfile(sstatepkg):
373 bb.note("Sstate package %s does not exist" % sstatepkg)
374 return False
375
376 sstate_clean(ss, d)
377
378 d.setVar('SSTATE_INSTDIR', sstateinst)
379
380 if bb.utils.to_boolean(d.getVar("SSTATE_VERIFY_SIG"), False):
381 if not os.path.isfile(sstatepkg + '.sig'):
382 bb.warn("No signature file for sstate package %s, skipping acceleration..." % sstatepkg)
383 return False
384 signer = get_signer(d, 'local')
385 if not signer.verify(sstatepkg + '.sig', d.getVar("SSTATE_VALID_SIGS")):
386 bb.warn("Cannot verify signature on sstate package %s, skipping acceleration..." % sstatepkg)
387 return False
388
389 # Empty sstateinst directory, ensure its clean
390 if os.path.exists(sstateinst):
391 oe.path.remove(sstateinst)
392 bb.utils.mkdirhier(sstateinst)
393
394 sstateinst = d.getVar("SSTATE_INSTDIR")
395 d.setVar('SSTATE_FIXMEDIR', ss['fixmedir'])
396
397 for f in (d.getVar('SSTATEPREINSTFUNCS') or '').split() + ['sstate_unpack_package']:
398 # All hooks should run in the SSTATE_INSTDIR
399 bb.build.exec_func(f, d, (sstateinst,))
400
401 return sstate_installpkgdir(ss, d)
402
403def sstate_installpkgdir(ss, d):
404 import oe.path
405 import subprocess
406
407 sstateinst = d.getVar("SSTATE_INSTDIR")
408 d.setVar('SSTATE_FIXMEDIR', ss['fixmedir'])
409
410 for f in (d.getVar('SSTATEPOSTUNPACKFUNCS') or '').split():
411 # All hooks should run in the SSTATE_INSTDIR
412 bb.build.exec_func(f, d, (sstateinst,))
413
414 def prepdir(dir):
415 # remove dir if it exists, ensure any parent directories do exist
416 if os.path.exists(dir):
417 oe.path.remove(dir)
418 bb.utils.mkdirhier(dir)
419 oe.path.remove(dir)
420
421 for state in ss['dirs']:
422 prepdir(state[1])
423 bb.utils.rename(sstateinst + state[0], state[1])
424 sstate_install(ss, d)
425
426 for plain in ss['plaindirs']:
427 workdir = d.getVar('WORKDIR')
428 sharedworkdir = os.path.join(d.getVar('TMPDIR'), "work-shared")
429 src = sstateinst + "/" + plain.replace(workdir, '')
430 if sharedworkdir in plain:
431 src = sstateinst + "/" + plain.replace(sharedworkdir, '')
432 dest = plain
433 bb.utils.mkdirhier(src)
434 prepdir(dest)
435 bb.utils.rename(src, dest)
436
437 return True
438
439python sstate_hardcode_path_unpack () {
440 # Fixup hardcoded paths
441 #
442 # Note: The logic below must match the reverse logic in
443 # sstate_hardcode_path(d)
444 import subprocess
445
446 sstateinst = d.getVar('SSTATE_INSTDIR')
447 sstatefixmedir = d.getVar('SSTATE_FIXMEDIR')
448 fixmefn = sstateinst + "fixmepath"
449 if os.path.isfile(fixmefn):
450 staging_target = d.getVar('RECIPE_SYSROOT')
451 staging_host = d.getVar('RECIPE_SYSROOT_NATIVE')
452
453 if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross-canadian', d):
454 sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIRHOST:%s:g'" % (staging_host)
455 elif bb.data.inherits_class('cross', d) or bb.data.inherits_class('crosssdk', d):
456 sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIRTARGET:%s:g; s:FIXMESTAGINGDIRHOST:%s:g'" % (staging_target, staging_host)
457 else:
458 sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIRTARGET:%s:g'" % (staging_target)
459
460 extra_staging_fixmes = d.getVar('EXTRA_STAGING_FIXMES') or ''
461 for fixmevar in extra_staging_fixmes.split():
462 fixme_path = d.getVar(fixmevar)
463 sstate_sed_cmd += " -e 's:FIXME_%s:%s:g'" % (fixmevar, fixme_path)
464
465 # Add sstateinst to each filename in fixmepath, use xargs to efficiently call sed
466 sstate_hardcode_cmd = "sed -e 's:^:%s:g' %s | xargs %s" % (sstateinst, fixmefn, sstate_sed_cmd)
467
468 # Defer do_populate_sysroot relocation command
469 if sstatefixmedir:
470 bb.utils.mkdirhier(sstatefixmedir)
471 with open(sstatefixmedir + "/fixmepath.cmd", "w") as f:
472 sstate_hardcode_cmd = sstate_hardcode_cmd.replace(fixmefn, sstatefixmedir + "/fixmepath")
473 sstate_hardcode_cmd = sstate_hardcode_cmd.replace(sstateinst, "FIXMEFINALSSTATEINST")
474 sstate_hardcode_cmd = sstate_hardcode_cmd.replace(staging_host, "FIXMEFINALSSTATEHOST")
475 sstate_hardcode_cmd = sstate_hardcode_cmd.replace(staging_target, "FIXMEFINALSSTATETARGET")
476 f.write(sstate_hardcode_cmd)
477 bb.utils.copyfile(fixmefn, sstatefixmedir + "/fixmepath")
478 return
479
480 bb.note("Replacing fixme paths in sstate package: %s" % (sstate_hardcode_cmd))
481 subprocess.check_call(sstate_hardcode_cmd, shell=True)
482
483 # Need to remove this or we'd copy it into the target directory and may
484 # conflict with another writer
485 os.remove(fixmefn)
486}
487
488def sstate_clean_cachefile(ss, d):
489 import oe.path
490
491 if d.getVarFlag('do_%s' % ss['task'], 'task'):
492 d.setVar("SSTATE_PATH_CURRTASK", ss['task'])
493 sstatepkgfile = d.getVar('SSTATE_PATHSPEC')
494 bb.note("Removing %s" % sstatepkgfile)
495 oe.path.remove(sstatepkgfile)
496
497def sstate_clean_cachefiles(d):
498 for task in (d.getVar('SSTATETASKS') or "").split():
499 ld = d.createCopy()
500 ss = sstate_state_fromvars(ld, task)
501 sstate_clean_cachefile(ss, ld)
502
503def sstate_clean_manifest(manifest, d, canrace=False, prefix=None):
504 import oe.path
505
506 mfile = open(manifest)
507 entries = mfile.readlines()
508 mfile.close()
509
510 for entry in entries:
511 entry = entry.strip()
512 if prefix and not entry.startswith("/"):
513 entry = prefix + "/" + entry
514 bb.debug(2, "Removing manifest: %s" % entry)
515 # We can race against another package populating directories as we're removing them
516 # so we ignore errors here.
517 try:
518 if entry.endswith("/"):
519 if os.path.islink(entry[:-1]):
520 os.remove(entry[:-1])
521 elif os.path.exists(entry) and len(os.listdir(entry)) == 0 and not canrace:
522 # Removing directories whilst builds are in progress exposes a race. Only
523 # do it in contexts where it is safe to do so.
524 os.rmdir(entry[:-1])
525 else:
526 os.remove(entry)
527 except OSError:
528 pass
529
530 postrm = manifest + ".postrm"
531 if os.path.exists(manifest + ".postrm"):
532 import subprocess
533 os.chmod(postrm, 0o755)
534 subprocess.check_call(postrm, shell=True)
535 oe.path.remove(postrm)
536
537 oe.path.remove(manifest)
538
539def sstate_clean(ss, d):
540 import oe.path
541 import glob
542
543 d2 = d.createCopy()
544 stamp_clean = d.getVar("STAMPCLEAN")
545 extrainf = d.getVarFlag("do_" + ss['task'], 'stamp-extra-info')
546 if extrainf:
547 d2.setVar("SSTATE_MANMACH", extrainf)
548 wildcard_stfile = "%s.do_%s*.%s" % (stamp_clean, ss['task'], extrainf)
549 else:
550 wildcard_stfile = "%s.do_%s*" % (stamp_clean, ss['task'])
551
552 manifest = d2.expand("${SSTATE_MANFILEPREFIX}.%s" % ss['task'])
553
554 if os.path.exists(manifest):
555 locks = []
556 for lock in ss['lockfiles-shared']:
557 locks.append(bb.utils.lockfile(lock))
558 for lock in ss['lockfiles']:
559 locks.append(bb.utils.lockfile(lock))
560
561 sstate_clean_manifest(manifest, d, canrace=True)
562
563 for lock in locks:
564 bb.utils.unlockfile(lock)
565
566 # Remove the current and previous stamps, but keep the sigdata.
567 #
568 # The glob() matches do_task* which may match multiple tasks, for
569 # example: do_package and do_package_write_ipk, so we need to
570 # exactly match *.do_task.* and *.do_task_setscene.*
571 rm_stamp = '.do_%s.' % ss['task']
572 rm_setscene = '.do_%s_setscene.' % ss['task']
573 # For BB_SIGNATURE_HANDLER = "noop"
574 rm_nohash = ".do_%s" % ss['task']
575 for stfile in glob.glob(wildcard_stfile):
576 # Keep the sigdata
577 if ".sigdata." in stfile or ".sigbasedata." in stfile:
578 continue
579 # Preserve taint files in the stamps directory
580 if stfile.endswith('.taint'):
581 continue
582 if rm_stamp in stfile or rm_setscene in stfile or \
583 stfile.endswith(rm_nohash):
584 oe.path.remove(stfile)
585
586sstate_clean[vardepsexclude] = "SSTATE_MANFILEPREFIX"
587
588CLEANFUNCS += "sstate_cleanall"
589
590python sstate_cleanall() {
591 bb.note("Removing shared state for package %s" % d.getVar('PN'))
592
593 manifest_dir = d.getVar('SSTATE_MANIFESTS')
594 if not os.path.exists(manifest_dir):
595 return
596
597 tasks = d.getVar('SSTATETASKS').split()
598 for name in tasks:
599 ld = d.createCopy()
600 shared_state = sstate_state_fromvars(ld, name)
601 sstate_clean(shared_state, ld)
602}
603
604python sstate_hardcode_path () {
605 import subprocess, platform
606
607 # Need to remove hardcoded paths and fix these when we install the
608 # staging packages.
609 #
610 # Note: the logic in this function needs to match the reverse logic
611 # in sstate_installpkg(ss, d)
612
613 staging_target = d.getVar('RECIPE_SYSROOT')
614 staging_host = d.getVar('RECIPE_SYSROOT_NATIVE')
615 sstate_builddir = d.getVar('SSTATE_BUILDDIR')
616
617 sstate_sed_cmd = "sed -i -e 's:%s:FIXMESTAGINGDIRHOST:g'" % staging_host
618 if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross-canadian', d):
619 sstate_grep_cmd = "grep -l -e '%s'" % (staging_host)
620 elif bb.data.inherits_class('cross', d) or bb.data.inherits_class('crosssdk', d):
621 sstate_grep_cmd = "grep -l -e '%s' -e '%s'" % (staging_target, staging_host)
622 sstate_sed_cmd += " -e 's:%s:FIXMESTAGINGDIRTARGET:g'" % staging_target
623 else:
624 sstate_grep_cmd = "grep -l -e '%s' -e '%s'" % (staging_target, staging_host)
625 sstate_sed_cmd += " -e 's:%s:FIXMESTAGINGDIRTARGET:g'" % staging_target
626
627 extra_staging_fixmes = d.getVar('EXTRA_STAGING_FIXMES') or ''
628 for fixmevar in extra_staging_fixmes.split():
629 fixme_path = d.getVar(fixmevar)
630 sstate_sed_cmd += " -e 's:%s:FIXME_%s:g'" % (fixme_path, fixmevar)
631 sstate_grep_cmd += " -e '%s'" % (fixme_path)
632
633 fixmefn = sstate_builddir + "fixmepath"
634
635 sstate_scan_cmd = d.getVar('SSTATE_SCAN_CMD')
636 sstate_filelist_cmd = "tee %s" % (fixmefn)
637
638 # fixmepath file needs relative paths, drop sstate_builddir prefix
639 sstate_filelist_relative_cmd = "sed -i -e 's:^%s::g' %s" % (sstate_builddir, fixmefn)
640
641 xargs_no_empty_run_cmd = '--no-run-if-empty'
642 if platform.system() == 'Darwin':
643 xargs_no_empty_run_cmd = ''
644
645 # Limit the fixpaths and sed operations based on the initial grep search
646 # This has the side effect of making sure the vfs cache is hot
647 sstate_hardcode_cmd = "%s | xargs %s | %s | xargs %s %s" % (sstate_scan_cmd, sstate_grep_cmd, sstate_filelist_cmd, xargs_no_empty_run_cmd, sstate_sed_cmd)
648
649 bb.note("Removing hardcoded paths from sstate package: '%s'" % (sstate_hardcode_cmd))
650 subprocess.check_output(sstate_hardcode_cmd, shell=True, cwd=sstate_builddir)
651
652 # If the fixmefn is empty, remove it..
653 if os.stat(fixmefn).st_size == 0:
654 os.remove(fixmefn)
655 else:
656 bb.note("Replacing absolute paths in fixmepath file: '%s'" % (sstate_filelist_relative_cmd))
657 subprocess.check_output(sstate_filelist_relative_cmd, shell=True)
658}
659
660def sstate_package(ss, d):
661 import oe.path
662 import time
663
664 tmpdir = d.getVar('TMPDIR')
665
666 fixtime = False
667 if ss['task'] == "package":
668 fixtime = True
669
670 def fixtimestamp(root, path):
671 f = os.path.join(root, path)
672 if os.lstat(f).st_mtime > sde:
673 os.utime(f, (sde, sde), follow_symlinks=False)
674
675 sstatebuild = d.expand("${WORKDIR}/sstate-build-%s/" % ss['task'])
676 sde = int(d.getVar("SOURCE_DATE_EPOCH") or time.time())
677 d.setVar("SSTATE_CURRTASK", ss['task'])
678 bb.utils.remove(sstatebuild, recurse=True)
679 bb.utils.mkdirhier(sstatebuild)
680 for state in ss['dirs']:
681 if not os.path.exists(state[1]):
682 continue
683 srcbase = state[0].rstrip("/").rsplit('/', 1)[0]
684 # Find and error for absolute symlinks. We could attempt to relocate but its not
685 # clear where the symlink is relative to in this context. We could add that markup
686 # to sstate tasks but there aren't many of these so better just avoid them entirely.
687 for walkroot, dirs, files in os.walk(state[1]):
688 for file in files + dirs:
689 if fixtime:
690 fixtimestamp(walkroot, file)
691 srcpath = os.path.join(walkroot, file)
692 if not os.path.islink(srcpath):
693 continue
694 link = os.readlink(srcpath)
695 if not os.path.isabs(link):
696 continue
697 if not link.startswith(tmpdir):
698 continue
699 bb.error("sstate found an absolute path symlink %s pointing at %s. Please replace this with a relative link." % (srcpath, link))
700 bb.debug(2, "Preparing tree %s for packaging at %s" % (state[1], sstatebuild + state[0]))
701 bb.utils.rename(state[1], sstatebuild + state[0])
702
703 workdir = d.getVar('WORKDIR')
704 sharedworkdir = os.path.join(d.getVar('TMPDIR'), "work-shared")
705 for plain in ss['plaindirs']:
706 pdir = plain.replace(workdir, sstatebuild)
707 if sharedworkdir in plain:
708 pdir = plain.replace(sharedworkdir, sstatebuild)
709 bb.utils.mkdirhier(plain)
710 bb.utils.mkdirhier(pdir)
711 bb.utils.rename(plain, pdir)
712 if fixtime:
713 fixtimestamp(pdir, "")
714 for walkroot, dirs, files in os.walk(pdir):
715 for file in files + dirs:
716 fixtimestamp(walkroot, file)
717
718 d.setVar('SSTATE_BUILDDIR', sstatebuild)
719 d.setVar('SSTATE_INSTDIR', sstatebuild)
720
721 if d.getVar('SSTATE_SKIP_CREATION') == '1':
722 return
723
724 sstate_create_package = ['sstate_report_unihash', 'sstate_create_package']
725 if d.getVar('SSTATE_SIG_KEY'):
726 sstate_create_package.append('sstate_sign_package')
727
728 for f in (d.getVar('SSTATECREATEFUNCS') or '').split() + \
729 sstate_create_package + \
730 (d.getVar('SSTATEPOSTCREATEFUNCS') or '').split():
731 # All hooks should run in SSTATE_BUILDDIR.
732 bb.build.exec_func(f, d, (sstatebuild,))
733
734 # SSTATE_PKG may have been changed by sstate_report_unihash
735 siginfo = d.getVar('SSTATE_PKG') + ".siginfo"
736 if not os.path.exists(siginfo):
737 bb.siggen.dump_this_task(siginfo, d)
738 else:
739 try:
740 os.utime(siginfo, None)
741 except PermissionError:
742 pass
743 except OSError as e:
744 # Handle read-only file systems gracefully
745 import errno
746 if e.errno != errno.EROFS:
747 raise e
748
749 return
750
751sstate_package[vardepsexclude] += "SSTATE_SIG_KEY"
752
753def pstaging_fetch(sstatefetch, d):
754 import bb.fetch2
755
756 # Only try and fetch if the user has configured a mirror
757 mirrors = d.getVar('SSTATE_MIRRORS')
758 if not mirrors:
759 return
760
761 # Copy the data object and override DL_DIR and SRC_URI
762 localdata = bb.data.createCopy(d)
763
764 dldir = localdata.expand("${SSTATE_DIR}")
765 bb.utils.mkdirhier(dldir)
766
767 localdata.delVar('MIRRORS')
768 localdata.setVar('FILESPATH', dldir)
769 localdata.setVar('DL_DIR', dldir)
770 localdata.setVar('PREMIRRORS', mirrors)
771 localdata.setVar('SRCPV', d.getVar('SRCPV'))
772
773 # if BB_NO_NETWORK is set but we also have SSTATE_MIRROR_ALLOW_NETWORK,
774 # we'll want to allow network access for the current set of fetches.
775 if bb.utils.to_boolean(localdata.getVar('BB_NO_NETWORK')) and \
776 bb.utils.to_boolean(localdata.getVar('SSTATE_MIRROR_ALLOW_NETWORK')):
777 localdata.delVar('BB_NO_NETWORK')
778
779 # Try a fetch from the sstate mirror, if it fails just return and
780 # we will build the package
781 uris = ['file://{0};downloadfilename={0}'.format(sstatefetch),
782 'file://{0}.siginfo;downloadfilename={0}.siginfo'.format(sstatefetch)]
783 if bb.utils.to_boolean(d.getVar("SSTATE_VERIFY_SIG"), False):
784 uris += ['file://{0}.sig;downloadfilename={0}.sig'.format(sstatefetch)]
785
786 for srcuri in uris:
787 localdata.setVar('SRC_URI', srcuri)
788 try:
789 fetcher = bb.fetch2.Fetch([srcuri], localdata, cache=False)
790 fetcher.checkstatus()
791 fetcher.download()
792
793 except bb.fetch2.BBFetchException:
794 pass
795
796pstaging_fetch[vardepsexclude] += "SRCPV"
797
798
799def sstate_setscene(d):
800 shared_state = sstate_state_fromvars(d)
801 accelerate = sstate_installpkg(shared_state, d)
802 if not accelerate:
803 msg = "No sstate archive obtainable, will run full task instead."
804 bb.warn(msg)
805 raise bb.BBHandledException(msg)
806
807python sstate_task_prefunc () {
808 shared_state = sstate_state_fromvars(d)
809 sstate_clean(shared_state, d)
810}
811sstate_task_prefunc[dirs] = "${WORKDIR}"
812
813python sstate_task_postfunc () {
814 shared_state = sstate_state_fromvars(d)
815
816 for intercept in shared_state['interceptfuncs']:
817 bb.build.exec_func(intercept, d, (d.getVar("WORKDIR"),))
818
819 omask = os.umask(0o002)
820 if omask != 0o002:
821 bb.note("Using umask 0o002 (not %0o) for sstate packaging" % omask)
822 sstate_package(shared_state, d)
823 os.umask(omask)
824
825 sstateinst = d.getVar("SSTATE_INSTDIR")
826 d.setVar('SSTATE_FIXMEDIR', shared_state['fixmedir'])
827
828 sstate_installpkgdir(shared_state, d)
829
830 bb.utils.remove(d.getVar("SSTATE_BUILDDIR"), recurse=True)
831}
832sstate_task_postfunc[dirs] = "${WORKDIR}"
833
834
835#
836# Shell function to generate a sstate package from a directory
837# set as SSTATE_BUILDDIR. Will be run from within SSTATE_BUILDDIR.
838#
839sstate_create_package () {
840 # Exit early if it already exists
841 if [ -e ${SSTATE_PKG} ]; then
842 touch ${SSTATE_PKG} 2>/dev/null || true
843 return
844 fi
845
846 mkdir --mode=0775 -p `dirname ${SSTATE_PKG}`
847 TFILE=`mktemp ${SSTATE_PKG}.XXXXXXXX`
848
849 OPT="-cS"
850 ZSTD="zstd -${SSTATE_ZSTD_CLEVEL} -T${ZSTD_THREADS}"
851 # Use pzstd if available
852 if [ -x "$(command -v pzstd)" ]; then
853 ZSTD="pzstd -${SSTATE_ZSTD_CLEVEL} -p ${ZSTD_THREADS}"
854 fi
855
856 # Need to handle empty directories
857 if [ "$(ls -A)" ]; then
858 set +e
859 tar -I "$ZSTD" $OPT -f $TFILE *
860 ret=$?
861 if [ $ret -ne 0 ] && [ $ret -ne 1 ]; then
862 exit 1
863 fi
864 set -e
865 else
866 tar -I "$ZSTD" $OPT --file=$TFILE --files-from=/dev/null
867 fi
868 chmod 0664 $TFILE
869 # Skip if it was already created by some other process
870 if [ -h ${SSTATE_PKG} ] && [ ! -e ${SSTATE_PKG} ]; then
871 # There is a symbolic link, but it links to nothing.
872 # Forcefully replace it with the new file.
873 ln -f $TFILE ${SSTATE_PKG} || true
874 elif [ ! -e ${SSTATE_PKG} ]; then
875 # Move into place using ln to attempt an atomic op.
876 # Abort if it already exists
877 ln $TFILE ${SSTATE_PKG} || true
878 else
879 touch ${SSTATE_PKG} 2>/dev/null || true
880 fi
881 rm $TFILE
882}
883
884python sstate_sign_package () {
885 from oe.gpg_sign import get_signer
886
887
888 signer = get_signer(d, 'local')
889 sstate_pkg = d.getVar('SSTATE_PKG')
890 if os.path.exists(sstate_pkg + '.sig'):
891 os.unlink(sstate_pkg + '.sig')
892 signer.detach_sign(sstate_pkg, d.getVar('SSTATE_SIG_KEY', False), None,
893 d.getVar('SSTATE_SIG_PASSPHRASE'), armor=False)
894}
895
896python sstate_report_unihash() {
897 report_unihash = getattr(bb.parse.siggen, 'report_unihash', None)
898
899 if report_unihash:
900 ss = sstate_state_fromvars(d)
901 report_unihash(os.getcwd(), ss['task'], d)
902}
903
904#
905# Shell function to decompress and prepare a package for installation
906# Will be run from within SSTATE_INSTDIR.
907#
908sstate_unpack_package () {
909 ZSTD="zstd -T${ZSTD_THREADS}"
910 # Use pzstd if available
911 if [ -x "$(command -v pzstd)" ]; then
912 ZSTD="pzstd -p ${ZSTD_THREADS}"
913 fi
914
915 tar -I "$ZSTD" -xvpf ${SSTATE_PKG}
916 # update .siginfo atime on local/NFS mirror if it is a symbolic link
917 [ ! -h ${SSTATE_PKG}.siginfo ] || [ ! -e ${SSTATE_PKG}.siginfo ] || touch -a ${SSTATE_PKG}.siginfo 2>/dev/null || true
918 # update each symbolic link instead of any referenced file
919 touch --no-dereference ${SSTATE_PKG} 2>/dev/null || true
920 [ ! -e ${SSTATE_PKG}.sig ] || touch --no-dereference ${SSTATE_PKG}.sig 2>/dev/null || true
921 [ ! -e ${SSTATE_PKG}.siginfo ] || touch --no-dereference ${SSTATE_PKG}.siginfo 2>/dev/null || true
922}
923
924BB_HASHCHECK_FUNCTION = "sstate_checkhashes"
925
926def sstate_checkhashes(sq_data, d, siginfo=False, currentcount=0, summary=True, **kwargs):
927 found = set()
928 missed = set()
929
930 def gethash(task):
931 return sq_data['unihash'][task]
932
933 def getpathcomponents(task, d):
934 # Magic data from BB_HASHFILENAME
935 splithashfn = sq_data['hashfn'][task].split(" ")
936 spec = splithashfn[1]
937 if splithashfn[0] == "True":
938 extrapath = d.getVar("NATIVELSBSTRING") + "/"
939 else:
940 extrapath = ""
941
942 tname = bb.runqueue.taskname_from_tid(task)[3:]
943
944 if tname in ["fetch", "unpack", "patch", "populate_lic", "preconfigure"] and splithashfn[2]:
945 spec = splithashfn[2]
946 extrapath = ""
947
948 return spec, extrapath, tname
949
950 def getsstatefile(tid, siginfo, d):
951 spec, extrapath, tname = getpathcomponents(tid, d)
952 return extrapath + generate_sstatefn(spec, gethash(tid), tname, siginfo, d)
953
954 for tid in sq_data['hash']:
955
956 sstatefile = d.expand("${SSTATE_DIR}/" + getsstatefile(tid, siginfo, d))
957
958 if os.path.exists(sstatefile):
959 found.add(tid)
960 bb.debug(2, "SState: Found valid sstate file %s" % sstatefile)
961 else:
962 missed.add(tid)
963 bb.debug(2, "SState: Looked for but didn't find file %s" % sstatefile)
964
965 foundLocal = len(found)
966 mirrors = d.getVar("SSTATE_MIRRORS")
967 if mirrors:
968 # Copy the data object and override DL_DIR and SRC_URI
969 localdata = bb.data.createCopy(d)
970
971 dldir = localdata.expand("${SSTATE_DIR}")
972 localdata.delVar('MIRRORS')
973 localdata.setVar('FILESPATH', dldir)
974 localdata.setVar('DL_DIR', dldir)
975 localdata.setVar('PREMIRRORS', mirrors)
976
977 bb.debug(2, "SState using premirror of: %s" % mirrors)
978
979 # if BB_NO_NETWORK is set but we also have SSTATE_MIRROR_ALLOW_NETWORK,
980 # we'll want to allow network access for the current set of fetches.
981 if bb.utils.to_boolean(localdata.getVar('BB_NO_NETWORK')) and \
982 bb.utils.to_boolean(localdata.getVar('SSTATE_MIRROR_ALLOW_NETWORK')):
983 localdata.delVar('BB_NO_NETWORK')
984
985 from bb.fetch2 import FetchConnectionCache
986 def checkstatus_init():
987 while not connection_cache_pool.full():
988 connection_cache_pool.put(FetchConnectionCache())
989
990 def checkstatus_end():
991 while not connection_cache_pool.empty():
992 connection_cache = connection_cache_pool.get()
993 connection_cache.close_connections()
994
995 def checkstatus(arg):
996 (tid, sstatefile) = arg
997
998 connection_cache = connection_cache_pool.get()
999 localdata2 = bb.data.createCopy(localdata)
1000 srcuri = "file://" + sstatefile
1001 localdata2.setVar('SRC_URI', srcuri)
1002 bb.debug(2, "SState: Attempting to fetch %s" % srcuri)
1003
1004 import traceback
1005
1006 try:
1007 fetcher = bb.fetch2.Fetch(srcuri.split(), localdata2,
1008 connection_cache=connection_cache)
1009 fetcher.checkstatus()
1010 bb.debug(2, "SState: Successful fetch test for %s" % srcuri)
1011 found.add(tid)
1012 missed.remove(tid)
1013 except bb.fetch2.FetchError as e:
1014 bb.debug(2, "SState: Unsuccessful fetch test for %s (%s)\n%s" % (srcuri, repr(e), traceback.format_exc()))
1015 except Exception as e:
1016 bb.error("SState: cannot test %s: %s\n%s" % (srcuri, repr(e), traceback.format_exc()))
1017
1018 connection_cache_pool.put(connection_cache)
1019
1020 if progress:
1021 bb.event.fire(bb.event.ProcessProgress(msg, len(tasklist) - thread_worker.tasks.qsize()), d)
1022
1023 tasklist = []
1024 for tid in missed:
1025 sstatefile = d.expand(getsstatefile(tid, siginfo, d))
1026 tasklist.append((tid, sstatefile))
1027
1028 if tasklist:
1029 nproc = min(int(d.getVar("BB_NUMBER_THREADS")), len(tasklist))
1030
1031 progress = len(tasklist) >= 100
1032 if progress:
1033 msg = "Checking sstate mirror object availability"
1034 bb.event.fire(bb.event.ProcessStarted(msg, len(tasklist)), d)
1035
1036 # Have to setup the fetcher environment here rather than in each thread as it would race
1037 fetcherenv = bb.fetch2.get_fetcher_environment(d)
1038 with bb.utils.environment(**fetcherenv):
1039 bb.event.enable_threadlock()
1040 import concurrent.futures
1041 from queue import Queue
1042 connection_cache_pool = Queue(nproc)
1043 checkstatus_init()
1044 with concurrent.futures.ThreadPoolExecutor(max_workers=nproc) as executor:
1045 executor.map(checkstatus, tasklist.copy())
1046 checkstatus_end()
1047 bb.event.disable_threadlock()
1048
1049 if progress:
1050 bb.event.fire(bb.event.ProcessFinished(msg), d)
1051
1052 inheritlist = d.getVar("INHERIT")
1053 if "toaster" in inheritlist:
1054 evdata = {'missed': [], 'found': []};
1055 for tid in missed:
1056 sstatefile = d.expand(getsstatefile(tid, False, d))
1057 evdata['missed'].append((bb.runqueue.fn_from_tid(tid), bb.runqueue.taskname_from_tid(tid), gethash(tid), sstatefile ) )
1058 for tid in found:
1059 sstatefile = d.expand(getsstatefile(tid, False, d))
1060 evdata['found'].append((bb.runqueue.fn_from_tid(tid), bb.runqueue.taskname_from_tid(tid), gethash(tid), sstatefile ) )
1061 bb.event.fire(bb.event.MetadataEvent("MissedSstate", evdata), d)
1062
1063 if summary:
1064 # Print some summary statistics about the current task completion and how much sstate
1065 # reuse there was. Avoid divide by zero errors.
1066 total = len(sq_data['hash'])
1067 complete = 0
1068 if currentcount:
1069 complete = (len(found) + currentcount) / (total + currentcount) * 100
1070 match = 0
1071 if total:
1072 match = len(found) / total * 100
1073 bb.plain("Sstate summary: Wanted %d Local %d Mirrors %d Missed %d Current %d (%d%% match, %d%% complete)" %
1074 (total, foundLocal, len(found)-foundLocal, len(missed), currentcount, match, complete))
1075
1076 if hasattr(bb.parse.siggen, "checkhashes"):
1077 bb.parse.siggen.checkhashes(sq_data, missed, found, d)
1078
1079 return found
1080setscene_depvalid[vardepsexclude] = "SSTATE_EXCLUDEDEPS_SYSROOT"
1081
1082BB_SETSCENE_DEPVALID = "setscene_depvalid"
1083
1084def setscene_depvalid(task, taskdependees, notneeded, d, log=None):
1085 # taskdependees is a dict of tasks which depend on task, each being a 3 item list of [PN, TASKNAME, FILENAME]
1086 # task is included in taskdependees too
1087 # Return - False - We need this dependency
1088 # - True - We can skip this dependency
1089 import re
1090
1091 def logit(msg, log):
1092 if log is not None:
1093 log.append(msg)
1094 else:
1095 bb.debug(2, msg)
1096
1097 logit("Considering setscene task: %s" % (str(taskdependees[task])), log)
1098
1099 directtasks = ["do_populate_lic", "do_deploy_source_date_epoch", "do_shared_workdir", "do_stash_locale", "do_gcc_stash_builddir", "do_create_spdx"]
1100
1101 def isNativeCross(x):
1102 return x.endswith("-native") or "-cross-" in x or "-crosssdk" in x or x.endswith("-cross")
1103
1104 # We only need to trigger deploy_source_date_epoch through direct dependencies
1105 if taskdependees[task][1] in directtasks:
1106 return True
1107
1108 # We only need to trigger packagedata through direct dependencies
1109 # but need to preserve packagedata on packagedata links
1110 if taskdependees[task][1] == "do_packagedata":
1111 for dep in taskdependees:
1112 if taskdependees[dep][1] == "do_packagedata":
1113 return False
1114 return True
1115
1116 for dep in taskdependees:
1117 logit(" considering dependency: %s" % (str(taskdependees[dep])), log)
1118 if task == dep:
1119 continue
1120 if dep in notneeded:
1121 continue
1122 # do_package_write_* and do_package doesn't need do_package
1123 if taskdependees[task][1] == "do_package" and taskdependees[dep][1] in ['do_package', 'do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm', 'do_packagedata', 'do_package_qa']:
1124 continue
1125 # do_package_write_* need do_populate_sysroot as they're mainly postinstall dependencies
1126 if taskdependees[task][1] == "do_populate_sysroot" and taskdependees[dep][1] in ['do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm']:
1127 return False
1128 # do_package/packagedata/package_qa/deploy don't need do_populate_sysroot
1129 if taskdependees[task][1] == "do_populate_sysroot" and taskdependees[dep][1] in ['do_package', 'do_packagedata', 'do_package_qa', 'do_deploy']:
1130 continue
1131 # Native/Cross packages don't exist and are noexec anyway
1132 if isNativeCross(taskdependees[dep][0]) and taskdependees[dep][1] in ['do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm', 'do_packagedata', 'do_package', 'do_package_qa']:
1133 continue
1134
1135 # This is due to the [depends] in useradd.bbclass complicating matters
1136 # The logic *is* reversed here due to the way hard setscene dependencies are injected
1137 if (taskdependees[task][1] == 'do_package' or taskdependees[task][1] == 'do_populate_sysroot') and taskdependees[dep][0].endswith(('shadow-native', 'shadow-sysroot', 'base-passwd', 'pseudo-native')) and taskdependees[dep][1] == 'do_populate_sysroot':
1138 continue
1139
1140 # Consider sysroot depending on sysroot tasks
1141 if taskdependees[task][1] == 'do_populate_sysroot' and taskdependees[dep][1] == 'do_populate_sysroot':
1142 # Allow excluding certain recursive dependencies. If a recipe needs it should add a
1143 # specific dependency itself, rather than relying on one of its dependees to pull
1144 # them in.
1145 # See also http://lists.openembedded.org/pipermail/openembedded-core/2018-January/146324.html
1146 not_needed = False
1147 excludedeps = d.getVar('_SSTATE_EXCLUDEDEPS_SYSROOT')
1148 if excludedeps is None:
1149 # Cache the regular expressions for speed
1150 excludedeps = []
1151 for excl in (d.getVar('SSTATE_EXCLUDEDEPS_SYSROOT') or "").split():
1152 excludedeps.append((re.compile(excl.split('->', 1)[0]), re.compile(excl.split('->', 1)[1])))
1153 d.setVar('_SSTATE_EXCLUDEDEPS_SYSROOT', excludedeps)
1154 for excl in excludedeps:
1155 if excl[0].match(taskdependees[dep][0]):
1156 if excl[1].match(taskdependees[task][0]):
1157 not_needed = True
1158 break
1159 if not_needed:
1160 continue
1161 # For meta-extsdk-toolchain we want all sysroot dependencies
1162 if taskdependees[dep][0] == 'meta-extsdk-toolchain':
1163 return False
1164 # Native/Cross populate_sysroot need their dependencies
1165 if isNativeCross(taskdependees[task][0]) and isNativeCross(taskdependees[dep][0]):
1166 return False
1167 # Target populate_sysroot depended on by cross tools need to be installed
1168 if isNativeCross(taskdependees[dep][0]):
1169 return False
1170 # Native/cross tools depended upon by target sysroot are not needed
1171 # Add an exception for shadow-native as required by useradd.bbclass
1172 if isNativeCross(taskdependees[task][0]) and taskdependees[task][0] != 'shadow-native':
1173 continue
1174 # Target populate_sysroot need their dependencies
1175 return False
1176
1177 if taskdependees[dep][1] in directtasks:
1178 continue
1179
1180 # Safe fallthrough default
1181 logit(" Default setscene dependency fall through due to dependency: %s" % (str(taskdependees[dep])), log)
1182 return False
1183 return True
1184
1185addhandler sstate_eventhandler
1186sstate_eventhandler[eventmask] = "bb.build.TaskSucceeded"
1187python sstate_eventhandler() {
1188 d = e.data
1189 writtensstate = d.getVar('SSTATE_CURRTASK')
1190 if not writtensstate:
1191 taskname = d.getVar("BB_RUNTASK")[3:]
1192 spec = d.getVar('SSTATE_PKGSPEC')
1193 swspec = d.getVar('SSTATE_SWSPEC')
1194 if taskname in ["fetch", "unpack", "patch", "populate_lic", "preconfigure"] and swspec:
1195 d.setVar("SSTATE_PKGSPEC", "${SSTATE_SWSPEC}")
1196 d.setVar("SSTATE_EXTRAPATH", "")
1197 d.setVar("SSTATE_CURRTASK", taskname)
1198 siginfo = d.getVar('SSTATE_PKG') + ".siginfo"
1199 if not os.path.exists(siginfo):
1200 bb.siggen.dump_this_task(siginfo, d)
1201 else:
1202 try:
1203 os.utime(siginfo, None)
1204 except PermissionError:
1205 pass
1206 except OSError as e:
1207 # Handle read-only file systems gracefully
1208 import errno
1209 if e.errno != errno.EROFS:
1210 raise e
1211
1212}
1213
1214SSTATE_PRUNE_OBSOLETEWORKDIR ?= "1"
1215
1216#
1217# Event handler which removes manifests and stamps file for recipes which are no
1218# longer 'reachable' in a build where they once were. 'Reachable' refers to
1219# whether a recipe is parsed so recipes in a layer which was removed would no
1220# longer be reachable. Switching between systemd and sysvinit where recipes
1221# became skipped would be another example.
1222#
1223# Also optionally removes the workdir of those tasks/recipes
1224#
1225addhandler sstate_eventhandler_reachablestamps
1226sstate_eventhandler_reachablestamps[eventmask] = "bb.event.ReachableStamps"
1227python sstate_eventhandler_reachablestamps() {
1228 import glob
1229 d = e.data
1230 stamps = e.stamps.values()
1231 removeworkdir = (d.getVar("SSTATE_PRUNE_OBSOLETEWORKDIR", False) == "1")
1232 preservestampfile = d.expand('${SSTATE_MANIFESTS}/preserve-stamps')
1233 preservestamps = []
1234 if os.path.exists(preservestampfile):
1235 with open(preservestampfile, 'r') as f:
1236 preservestamps = f.readlines()
1237 seen = []
1238
1239 # The machine index contains all the stamps this machine has ever seen in this build directory.
1240 # We should only remove things which this machine once accessed but no longer does.
1241 machineindex = set()
1242 bb.utils.mkdirhier(d.expand("${SSTATE_MANIFESTS}"))
1243 mi = d.expand("${SSTATE_MANIFESTS}/index-machine-${MACHINE}")
1244 if os.path.exists(mi):
1245 with open(mi, "r") as f:
1246 machineindex = set(line.strip() for line in f.readlines())
1247
1248 for a in sorted(list(set(d.getVar("SSTATE_ARCHS").split()))):
1249 toremove = []
1250 i = d.expand("${SSTATE_MANIFESTS}/index-" + a)
1251 if not os.path.exists(i):
1252 continue
1253 manseen = set()
1254 ignore = []
1255 with open(i, "r") as f:
1256 lines = f.readlines()
1257 for l in reversed(lines):
1258 try:
1259 (stamp, manifest, workdir) = l.split()
1260 # The index may have multiple entries for the same manifest as the code above only appends
1261 # new entries and there may be an entry with matching manifest but differing version in stamp/workdir.
1262 # The last entry in the list is the valid one, any earlier entries with matching manifests
1263 # should be ignored.
1264 if manifest in manseen:
1265 ignore.append(l)
1266 continue
1267 manseen.add(manifest)
1268 if stamp not in stamps and stamp not in preservestamps and stamp in machineindex:
1269 toremove.append(l)
1270 if stamp not in seen:
1271 bb.debug(2, "Stamp %s is not reachable, removing related manifests" % stamp)
1272 seen.append(stamp)
1273 except ValueError:
1274 bb.fatal("Invalid line '%s' in sstate manifest '%s'" % (l, i))
1275
1276 if toremove:
1277 msg = "Removing %d recipes from the %s sysroot" % (len(toremove), a)
1278 bb.event.fire(bb.event.ProcessStarted(msg, len(toremove)), d)
1279
1280 removed = 0
1281 for r in toremove:
1282 (stamp, manifest, workdir) = r.split()
1283 for m in glob.glob(manifest + ".*"):
1284 if m.endswith(".postrm"):
1285 continue
1286 sstate_clean_manifest(m, d)
1287 bb.utils.remove(stamp + "*")
1288 if removeworkdir:
1289 bb.utils.remove(workdir, recurse = True)
1290 lines.remove(r)
1291 removed = removed + 1
1292 bb.event.fire(bb.event.ProcessProgress(msg, removed), d)
1293
1294 bb.event.fire(bb.event.ProcessFinished(msg), d)
1295
1296 with open(i, "w") as f:
1297 for l in lines:
1298 if l in ignore:
1299 continue
1300 f.write(l)
1301 machineindex |= set(stamps)
1302 with open(mi, "w") as f:
1303 for l in machineindex:
1304 f.write(l + "\n")
1305
1306 if preservestamps:
1307 os.remove(preservestampfile)
1308}
1309
1310
1311#
1312# Bitbake can generate an event showing which setscene tasks are 'stale',
1313# i.e. which ones will be rerun. These are ones where a stamp file is present but
1314# it is stable (e.g. taskhash doesn't match). With that list we can go through
1315# the manifests for matching tasks and "uninstall" those manifests now. We do
1316# this now rather than mid build since the distribution of files between sstate
1317# objects may have changed, new tasks may run first and if those new tasks overlap
1318# with the stale tasks, we'd see overlapping files messages and failures. Thankfully
1319# removing these files is fast.
1320#
1321addhandler sstate_eventhandler_stalesstate
1322sstate_eventhandler_stalesstate[eventmask] = "bb.event.StaleSetSceneTasks"
1323python sstate_eventhandler_stalesstate() {
1324 d = e.data
1325 tasks = e.tasks
1326
1327 bb.utils.mkdirhier(d.expand("${SSTATE_MANIFESTS}"))
1328
1329 for a in list(set(d.getVar("SSTATE_ARCHS").split())):
1330 toremove = []
1331 i = d.expand("${SSTATE_MANIFESTS}/index-" + a)
1332 if not os.path.exists(i):
1333 continue
1334 with open(i, "r") as f:
1335 lines = f.readlines()
1336 for l in lines:
1337 try:
1338 (stamp, manifest, workdir) = l.split()
1339 for tid in tasks:
1340 for s in tasks[tid]:
1341 if s.startswith(stamp):
1342 taskname = bb.runqueue.taskname_from_tid(tid)[3:]
1343 manname = manifest + "." + taskname
1344 if os.path.exists(manname):
1345 bb.debug(2, "Sstate for %s is stale, removing related manifest %s" % (tid, manname))
1346 toremove.append((manname, tid, tasks[tid]))
1347 break
1348 except ValueError:
1349 bb.fatal("Invalid line '%s' in sstate manifest '%s'" % (l, i))
1350
1351 if toremove:
1352 msg = "Removing %d stale sstate objects for arch %s" % (len(toremove), a)
1353 bb.event.fire(bb.event.ProcessStarted(msg, len(toremove)), d)
1354
1355 removed = 0
1356 for (manname, tid, stamps) in toremove:
1357 sstate_clean_manifest(manname, d)
1358 for stamp in stamps:
1359 bb.utils.remove(stamp)
1360 removed = removed + 1
1361 bb.event.fire(bb.event.ProcessProgress(msg, removed), d)
1362
1363 bb.event.fire(bb.event.ProcessFinished(msg), d)
1364}
diff --git a/meta/classes/staging.bbclass b/meta/classes/staging.bbclass
deleted file mode 100644
index 5a1f43de78..0000000000
--- a/meta/classes/staging.bbclass
+++ /dev/null
@@ -1,690 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# These directories will be staged in the sysroot
8SYSROOT_DIRS = " \
9 ${includedir} \
10 ${libdir} \
11 ${base_libdir} \
12 ${nonarch_base_libdir} \
13 ${datadir} \
14 /sysroot-only \
15"
16
17# These directories are also staged in the sysroot when they contain files that
18# are usable on the build system
19SYSROOT_DIRS_NATIVE = " \
20 ${bindir} \
21 ${sbindir} \
22 ${base_bindir} \
23 ${base_sbindir} \
24 ${libexecdir} \
25 ${sysconfdir} \
26 ${localstatedir} \
27"
28SYSROOT_DIRS:append:class-native = " ${SYSROOT_DIRS_NATIVE}"
29SYSROOT_DIRS:append:class-cross = " ${SYSROOT_DIRS_NATIVE}"
30SYSROOT_DIRS:append:class-crosssdk = " ${SYSROOT_DIRS_NATIVE}"
31
32# These directories will not be staged in the sysroot
33SYSROOT_DIRS_IGNORE = " \
34 ${mandir} \
35 ${docdir} \
36 ${infodir} \
37 ${datadir}/X11/locale \
38 ${datadir}/applications \
39 ${datadir}/bash-completion \
40 ${datadir}/fonts \
41 ${datadir}/gtk-doc/html \
42 ${datadir}/installed-tests \
43 ${datadir}/locale \
44 ${datadir}/pixmaps \
45 ${datadir}/terminfo \
46 ${libdir}/${BPN}/ptest \
47"
48
49sysroot_stage_dir() {
50 src="$1"
51 dest="$2"
52 # if the src doesn't exist don't do anything
53 if [ ! -d "$src" ]; then
54 return
55 fi
56
57 mkdir -p "$dest"
58 rdest=$(realpath --relative-to="$src" "$dest")
59 (
60 cd $src
61 find . -print0 | cpio --null -pdlu $rdest
62 )
63}
64
65sysroot_stage_dirs() {
66 from="$1"
67 to="$2"
68
69 for dir in ${SYSROOT_DIRS}; do
70 sysroot_stage_dir "$from$dir" "$to$dir"
71 done
72
73 # Remove directories we do not care about
74 for dir in ${SYSROOT_DIRS_IGNORE}; do
75 rm -rf "$to$dir"
76 done
77}
78
79sysroot_stage_all() {
80 sysroot_stage_dirs ${D} ${SYSROOT_DESTDIR}
81}
82
83python sysroot_strip () {
84 inhibit_sysroot = d.getVar('INHIBIT_SYSROOT_STRIP')
85 if inhibit_sysroot and oe.types.boolean(inhibit_sysroot):
86 return
87
88 dstdir = d.getVar('SYSROOT_DESTDIR')
89 pn = d.getVar('PN')
90 libdir = d.getVar("libdir")
91 base_libdir = d.getVar("base_libdir")
92 qa_already_stripped = 'already-stripped' in (d.getVar('INSANE_SKIP:' + pn) or "").split()
93 strip_cmd = d.getVar("STRIP")
94
95 oe.package.strip_execs(pn, dstdir, strip_cmd, libdir, base_libdir, d,
96 qa_already_stripped=qa_already_stripped)
97}
98
99do_populate_sysroot[dirs] = "${SYSROOT_DESTDIR}"
100
101addtask populate_sysroot after do_install
102
103SYSROOT_PREPROCESS_FUNCS ?= ""
104SYSROOT_DESTDIR = "${WORKDIR}/sysroot-destdir"
105
106python do_populate_sysroot () {
107 # SYSROOT 'version' 2
108 bb.build.exec_func("sysroot_stage_all", d)
109 bb.build.exec_func("sysroot_strip", d)
110 for f in (d.getVar('SYSROOT_PREPROCESS_FUNCS') or '').split():
111 bb.build.exec_func(f, d)
112 pn = d.getVar("PN")
113 multiprov = d.getVar("BB_MULTI_PROVIDER_ALLOWED").split()
114 provdir = d.expand("${SYSROOT_DESTDIR}${base_prefix}/sysroot-providers/")
115 bb.utils.mkdirhier(provdir)
116 for p in d.getVar("PROVIDES").split():
117 if p in multiprov:
118 continue
119 p = p.replace("/", "_")
120 with open(provdir + p, "w") as f:
121 f.write(pn)
122}
123
124do_populate_sysroot[vardeps] += "${SYSROOT_PREPROCESS_FUNCS}"
125do_populate_sysroot[vardepsexclude] += "BB_MULTI_PROVIDER_ALLOWED"
126
127POPULATESYSROOTDEPS = ""
128POPULATESYSROOTDEPS:class-target = "virtual/${MLPREFIX}${HOST_PREFIX}binutils:do_populate_sysroot"
129POPULATESYSROOTDEPS:class-nativesdk = "virtual/${HOST_PREFIX}binutils-crosssdk:do_populate_sysroot"
130do_populate_sysroot[depends] += "${POPULATESYSROOTDEPS}"
131
132SSTATETASKS += "do_populate_sysroot"
133do_populate_sysroot[cleandirs] = "${SYSROOT_DESTDIR}"
134do_populate_sysroot[sstate-inputdirs] = "${SYSROOT_DESTDIR}"
135do_populate_sysroot[sstate-outputdirs] = "${COMPONENTS_DIR}/${PACKAGE_ARCH}/${PN}"
136do_populate_sysroot[sstate-fixmedir] = "${COMPONENTS_DIR}/${PACKAGE_ARCH}/${PN}"
137
138python do_populate_sysroot_setscene () {
139 sstate_setscene(d)
140}
141addtask do_populate_sysroot_setscene
142
143def staging_copyfile(c, target, dest, postinsts, seendirs):
144 import errno
145
146 destdir = os.path.dirname(dest)
147 if destdir not in seendirs:
148 bb.utils.mkdirhier(destdir)
149 seendirs.add(destdir)
150 if "/usr/bin/postinst-" in c:
151 postinsts.append(dest)
152 if os.path.islink(c):
153 linkto = os.readlink(c)
154 if os.path.lexists(dest):
155 if not os.path.islink(dest):
156 raise OSError(errno.EEXIST, "Link %s already exists as a file" % dest, dest)
157 if os.readlink(dest) == linkto:
158 return dest
159 raise OSError(errno.EEXIST, "Link %s already exists to a different location? (%s vs %s)" % (dest, os.readlink(dest), linkto), dest)
160 os.symlink(linkto, dest)
161 #bb.warn(c)
162 else:
163 try:
164 os.link(c, dest)
165 except OSError as err:
166 if err.errno == errno.EXDEV:
167 bb.utils.copyfile(c, dest)
168 else:
169 raise
170 return dest
171
172def staging_copydir(c, target, dest, seendirs):
173 if dest not in seendirs:
174 bb.utils.mkdirhier(dest)
175 seendirs.add(dest)
176
177def staging_processfixme(fixme, target, recipesysroot, recipesysrootnative, d):
178 import subprocess
179
180 if not fixme:
181 return
182 cmd = "sed -e 's:^[^/]*/:%s/:g' %s | xargs sed -i -e 's:FIXMESTAGINGDIRTARGET:%s:g; s:FIXMESTAGINGDIRHOST:%s:g'" % (target, " ".join(fixme), recipesysroot, recipesysrootnative)
183 for fixmevar in ['PSEUDO_SYSROOT', 'HOSTTOOLS_DIR', 'PKGDATA_DIR', 'PSEUDO_LOCALSTATEDIR', 'LOGFIFO']:
184 fixme_path = d.getVar(fixmevar)
185 cmd += " -e 's:FIXME_%s:%s:g'" % (fixmevar, fixme_path)
186 bb.debug(2, cmd)
187 subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
188
189
190def staging_populate_sysroot_dir(targetsysroot, nativesysroot, native, d):
191 import glob
192 import subprocess
193 import errno
194
195 fixme = []
196 postinsts = []
197 seendirs = set()
198 stagingdir = d.getVar("STAGING_DIR")
199 if native:
200 pkgarchs = ['${BUILD_ARCH}', '${BUILD_ARCH}_*']
201 targetdir = nativesysroot
202 else:
203 pkgarchs = ['${MACHINE_ARCH}']
204 pkgarchs = pkgarchs + list(reversed(d.getVar("PACKAGE_EXTRA_ARCHS").split()))
205 pkgarchs.append('allarch')
206 targetdir = targetsysroot
207
208 bb.utils.mkdirhier(targetdir)
209 for pkgarch in pkgarchs:
210 for manifest in glob.glob(d.expand("${SSTATE_MANIFESTS}/manifest-%s-*.populate_sysroot" % pkgarch)):
211 if manifest.endswith("-initial.populate_sysroot"):
212 # skip libgcc-initial due to file overlap
213 continue
214 if not native and (manifest.endswith("-native.populate_sysroot") or "nativesdk-" in manifest):
215 continue
216 if native and not (manifest.endswith("-native.populate_sysroot") or manifest.endswith("-cross.populate_sysroot") or "-cross-" in manifest):
217 continue
218 tmanifest = targetdir + "/" + os.path.basename(manifest)
219 if os.path.exists(tmanifest):
220 continue
221 try:
222 os.link(manifest, tmanifest)
223 except OSError as err:
224 if err.errno == errno.EXDEV:
225 bb.utils.copyfile(manifest, tmanifest)
226 else:
227 raise
228 with open(manifest, "r") as f:
229 for l in f:
230 l = l.strip()
231 if l.endswith("/fixmepath"):
232 fixme.append(l)
233 continue
234 if l.endswith("/fixmepath.cmd"):
235 continue
236 dest = l.replace(stagingdir, "")
237 dest = targetdir + "/" + "/".join(dest.split("/")[3:])
238 if l.endswith("/"):
239 staging_copydir(l, targetdir, dest, seendirs)
240 continue
241 try:
242 staging_copyfile(l, targetdir, dest, postinsts, seendirs)
243 except FileExistsError:
244 continue
245
246 staging_processfixme(fixme, targetdir, targetsysroot, nativesysroot, d)
247 for p in postinsts:
248 subprocess.check_output(p, shell=True, stderr=subprocess.STDOUT)
249
250#
251# Manifests here are complicated. The main sysroot area has the unpacked sstate
252# which us unrelocated and tracked by the main sstate manifests. Each recipe
253# specific sysroot has manifests for each dependency that is installed there.
254# The task hash is used to tell whether the data needs to be reinstalled. We
255# use a symlink to point to the currently installed hash. There is also a
256# "complete" stamp file which is used to mark if installation completed. If
257# something fails (e.g. a postinst), this won't get written and we would
258# remove and reinstall the dependency. This also means partially installed
259# dependencies should get cleaned up correctly.
260#
261
262python extend_recipe_sysroot() {
263 import copy
264 import subprocess
265 import errno
266 import collections
267 import glob
268
269 taskdepdata = d.getVar("BB_TASKDEPDATA", False)
270 mytaskname = d.getVar("BB_RUNTASK")
271 if mytaskname.endswith("_setscene"):
272 mytaskname = mytaskname.replace("_setscene", "")
273 workdir = d.getVar("WORKDIR")
274 #bb.warn(str(taskdepdata))
275 pn = d.getVar("PN")
276 stagingdir = d.getVar("STAGING_DIR")
277 sharedmanifests = d.getVar("COMPONENTS_DIR") + "/manifests"
278 recipesysroot = d.getVar("RECIPE_SYSROOT")
279 recipesysrootnative = d.getVar("RECIPE_SYSROOT_NATIVE")
280
281 # Detect bitbake -b usage
282 nodeps = d.getVar("BB_LIMITEDDEPS") or False
283 if nodeps:
284 lock = bb.utils.lockfile(recipesysroot + "/sysroot.lock")
285 staging_populate_sysroot_dir(recipesysroot, recipesysrootnative, True, d)
286 staging_populate_sysroot_dir(recipesysroot, recipesysrootnative, False, d)
287 bb.utils.unlockfile(lock)
288 return
289
290 start = None
291 configuredeps = []
292 owntaskdeps = []
293 for dep in taskdepdata:
294 data = taskdepdata[dep]
295 if data[1] == mytaskname and data[0] == pn:
296 start = dep
297 elif data[0] == pn:
298 owntaskdeps.append(data[1])
299 if start is None:
300 bb.fatal("Couldn't find ourself in BB_TASKDEPDATA?")
301
302 # We need to figure out which sysroot files we need to expose to this task.
303 # This needs to match what would get restored from sstate, which is controlled
304 # ultimately by calls from bitbake to setscene_depvalid().
305 # That function expects a setscene dependency tree. We build a dependency tree
306 # condensed to inter-sstate task dependencies, similar to that used by setscene
307 # tasks. We can then call into setscene_depvalid() and decide
308 # which dependencies we can "see" and should expose in the recipe specific sysroot.
309 setscenedeps = copy.deepcopy(taskdepdata)
310
311 start = set([start])
312
313 sstatetasks = d.getVar("SSTATETASKS").split()
314 # Add recipe specific tasks referenced by setscene_depvalid()
315 sstatetasks.append("do_stash_locale")
316 sstatetasks.append("do_deploy")
317
318 def print_dep_tree(deptree):
319 data = ""
320 for dep in deptree:
321 deps = " " + "\n ".join(deptree[dep][3]) + "\n"
322 data = data + "%s:\n %s\n %s\n%s %s\n %s\n" % (deptree[dep][0], deptree[dep][1], deptree[dep][2], deps, deptree[dep][4], deptree[dep][5])
323 return data
324
325 #bb.note("Full dep tree is:\n%s" % print_dep_tree(taskdepdata))
326
327 #bb.note(" start2 is %s" % str(start))
328
329 # If start is an sstate task (like do_package) we need to add in its direct dependencies
330 # else the code below won't recurse into them.
331 for dep in set(start):
332 for dep2 in setscenedeps[dep][3]:
333 start.add(dep2)
334 start.remove(dep)
335
336 #bb.note(" start3 is %s" % str(start))
337
338 # Create collapsed do_populate_sysroot -> do_populate_sysroot tree
339 for dep in taskdepdata:
340 data = setscenedeps[dep]
341 if data[1] not in sstatetasks:
342 for dep2 in setscenedeps:
343 data2 = setscenedeps[dep2]
344 if dep in data2[3]:
345 data2[3].update(setscenedeps[dep][3])
346 data2[3].remove(dep)
347 if dep in start:
348 start.update(setscenedeps[dep][3])
349 start.remove(dep)
350 del setscenedeps[dep]
351
352 # Remove circular references
353 for dep in setscenedeps:
354 if dep in setscenedeps[dep][3]:
355 setscenedeps[dep][3].remove(dep)
356
357 #bb.note("Computed dep tree is:\n%s" % print_dep_tree(setscenedeps))
358 #bb.note(" start is %s" % str(start))
359
360 # Direct dependencies should be present and can be depended upon
361 for dep in sorted(set(start)):
362 if setscenedeps[dep][1] == "do_populate_sysroot":
363 if dep not in configuredeps:
364 configuredeps.append(dep)
365 bb.note("Direct dependencies are %s" % str(configuredeps))
366 #bb.note(" or %s" % str(start))
367
368 msgbuf = []
369 # Call into setscene_depvalid for each sub-dependency and only copy sysroot files
370 # for ones that would be restored from sstate.
371 done = list(start)
372 next = list(start)
373 while next:
374 new = []
375 for dep in next:
376 data = setscenedeps[dep]
377 for datadep in data[3]:
378 if datadep in done:
379 continue
380 taskdeps = {}
381 taskdeps[dep] = setscenedeps[dep][:2]
382 taskdeps[datadep] = setscenedeps[datadep][:2]
383 retval = setscene_depvalid(datadep, taskdeps, [], d, msgbuf)
384 if retval:
385 msgbuf.append("Skipping setscene dependency %s for installation into the sysroot" % datadep)
386 continue
387 done.append(datadep)
388 new.append(datadep)
389 if datadep not in configuredeps and setscenedeps[datadep][1] == "do_populate_sysroot":
390 configuredeps.append(datadep)
391 msgbuf.append("Adding dependency on %s" % setscenedeps[datadep][0])
392 else:
393 msgbuf.append("Following dependency on %s" % setscenedeps[datadep][0])
394 next = new
395
396 # This logging is too verbose for day to day use sadly
397 #bb.debug(2, "\n".join(msgbuf))
398
399 depdir = recipesysrootnative + "/installeddeps"
400 bb.utils.mkdirhier(depdir)
401 bb.utils.mkdirhier(sharedmanifests)
402
403 lock = bb.utils.lockfile(recipesysroot + "/sysroot.lock")
404
405 fixme = {}
406 seendirs = set()
407 postinsts = []
408 multilibs = {}
409 manifests = {}
410 # All files that we're going to be installing, to find conflicts.
411 fileset = {}
412
413 invalidate_tasks = set()
414 for f in os.listdir(depdir):
415 removed = []
416 if not f.endswith(".complete"):
417 continue
418 f = depdir + "/" + f
419 if os.path.islink(f) and not os.path.exists(f):
420 bb.note("%s no longer exists, removing from sysroot" % f)
421 lnk = os.readlink(f.replace(".complete", ""))
422 sstate_clean_manifest(depdir + "/" + lnk, d, canrace=True, prefix=workdir)
423 os.unlink(f)
424 os.unlink(f.replace(".complete", ""))
425 removed.append(os.path.basename(f.replace(".complete", "")))
426
427 # If we've removed files from the sysroot above, the task that installed them may still
428 # have a stamp file present for the task. This is probably invalid right now but may become
429 # valid again if the user were to change configuration back for example. Since we've removed
430 # the files a task might need, remove the stamp file too to force it to rerun.
431 # YOCTO #14790
432 if removed:
433 for i in glob.glob(depdir + "/index.*"):
434 if i.endswith("." + mytaskname):
435 continue
436 with open(i, "r") as f:
437 for l in f:
438 if l.startswith("TaskDeps:"):
439 continue
440 l = l.strip()
441 if l in removed:
442 invalidate_tasks.add(i.rsplit(".", 1)[1])
443 break
444 for t in invalidate_tasks:
445 bb.note("Invalidating stamps for task %s" % t)
446 bb.build.clean_stamp(t, d)
447
448 installed = []
449 for dep in configuredeps:
450 c = setscenedeps[dep][0]
451 if mytaskname in ["do_sdk_depends", "do_populate_sdk_ext"] and c.endswith("-initial"):
452 bb.note("Skipping initial setscene dependency %s for installation into the sysroot" % c)
453 continue
454 installed.append(c)
455
456 # We want to remove anything which this task previously installed but is no longer a dependency
457 taskindex = depdir + "/" + "index." + mytaskname
458 if os.path.exists(taskindex):
459 potential = []
460 with open(taskindex, "r") as f:
461 for l in f:
462 l = l.strip()
463 if l not in installed:
464 fl = depdir + "/" + l
465 if not os.path.exists(fl):
466 # Was likely already uninstalled
467 continue
468 potential.append(l)
469 # We need to ensure no other task needs this dependency. We hold the sysroot
470 # lock so we ca search the indexes to check
471 if potential:
472 for i in glob.glob(depdir + "/index.*"):
473 if i.endswith("." + mytaskname):
474 continue
475 with open(i, "r") as f:
476 for l in f:
477 if l.startswith("TaskDeps:"):
478 prevtasks = l.split()[1:]
479 if mytaskname in prevtasks:
480 # We're a dependency of this task so we can clear items out the sysroot
481 break
482 l = l.strip()
483 if l in potential:
484 potential.remove(l)
485 for l in potential:
486 fl = depdir + "/" + l
487 bb.note("Task %s no longer depends on %s, removing from sysroot" % (mytaskname, l))
488 lnk = os.readlink(fl)
489 sstate_clean_manifest(depdir + "/" + lnk, d, canrace=True, prefix=workdir)
490 os.unlink(fl)
491 os.unlink(fl + ".complete")
492
493 msg_exists = []
494 msg_adding = []
495
496 # Handle all removals first since files may move between recipes
497 for dep in configuredeps:
498 c = setscenedeps[dep][0]
499 if c not in installed:
500 continue
501 taskhash = setscenedeps[dep][5]
502 taskmanifest = depdir + "/" + c + "." + taskhash
503
504 if os.path.exists(depdir + "/" + c):
505 lnk = os.readlink(depdir + "/" + c)
506 if lnk == c + "." + taskhash and os.path.exists(depdir + "/" + c + ".complete"):
507 continue
508 else:
509 bb.note("%s exists in sysroot, but is stale (%s vs. %s), removing." % (c, lnk, c + "." + taskhash))
510 sstate_clean_manifest(depdir + "/" + lnk, d, canrace=True, prefix=workdir)
511 os.unlink(depdir + "/" + c)
512 if os.path.lexists(depdir + "/" + c + ".complete"):
513 os.unlink(depdir + "/" + c + ".complete")
514 elif os.path.lexists(depdir + "/" + c):
515 os.unlink(depdir + "/" + c)
516
517 binfiles = {}
518 # Now handle installs
519 for dep in configuredeps:
520 c = setscenedeps[dep][0]
521 if c not in installed:
522 continue
523 taskhash = setscenedeps[dep][5]
524 taskmanifest = depdir + "/" + c + "." + taskhash
525
526 if os.path.exists(depdir + "/" + c):
527 lnk = os.readlink(depdir + "/" + c)
528 if lnk == c + "." + taskhash and os.path.exists(depdir + "/" + c + ".complete"):
529 msg_exists.append(c)
530 continue
531
532 msg_adding.append(c)
533
534 os.symlink(c + "." + taskhash, depdir + "/" + c)
535
536 manifest, d2 = oe.sstatesig.find_sstate_manifest(c, setscenedeps[dep][2], "populate_sysroot", d, multilibs)
537 if d2 is not d:
538 # If we don't do this, the recipe sysroot will be placed in the wrong WORKDIR for multilibs
539 # We need a consistent WORKDIR for the image
540 d2.setVar("WORKDIR", d.getVar("WORKDIR"))
541 destsysroot = d2.getVar("RECIPE_SYSROOT")
542 # We put allarch recipes into the default sysroot
543 if manifest and "allarch" in manifest:
544 destsysroot = d.getVar("RECIPE_SYSROOT")
545
546 native = False
547 if c.endswith("-native") or "-cross-" in c or "-crosssdk" in c:
548 native = True
549
550 if manifest:
551 newmanifest = collections.OrderedDict()
552 targetdir = destsysroot
553 if native:
554 targetdir = recipesysrootnative
555 if targetdir not in fixme:
556 fixme[targetdir] = []
557 fm = fixme[targetdir]
558
559 with open(manifest, "r") as f:
560 manifests[dep] = manifest
561 for l in f:
562 l = l.strip()
563 if l.endswith("/fixmepath"):
564 fm.append(l)
565 continue
566 if l.endswith("/fixmepath.cmd"):
567 continue
568 dest = l.replace(stagingdir, "")
569 dest = "/" + "/".join(dest.split("/")[3:])
570 newmanifest[l] = targetdir + dest
571
572 # Check if files have already been installed by another
573 # recipe and abort if they have, explaining what recipes are
574 # conflicting.
575 hashname = targetdir + dest
576 if not hashname.endswith("/"):
577 if hashname in fileset:
578 bb.fatal("The file %s is installed by both %s and %s, aborting" % (dest, c, fileset[hashname]))
579 else:
580 fileset[hashname] = c
581
582 # Having multiple identical manifests in each sysroot eats diskspace so
583 # create a shared pool of them and hardlink if we can.
584 # We create the manifest in advance so that if something fails during installation,
585 # or the build is interrupted, subsequent exeuction can cleanup.
586 sharedm = sharedmanifests + "/" + os.path.basename(taskmanifest)
587 if not os.path.exists(sharedm):
588 smlock = bb.utils.lockfile(sharedm + ".lock")
589 # Can race here. You'd think it just means we may not end up with all copies hardlinked to each other
590 # but python can lose file handles so we need to do this under a lock.
591 if not os.path.exists(sharedm):
592 with open(sharedm, 'w') as m:
593 for l in newmanifest:
594 dest = newmanifest[l]
595 m.write(dest.replace(workdir + "/", "") + "\n")
596 bb.utils.unlockfile(smlock)
597 try:
598 os.link(sharedm, taskmanifest)
599 except OSError as err:
600 if err.errno == errno.EXDEV:
601 bb.utils.copyfile(sharedm, taskmanifest)
602 else:
603 raise
604 # Finally actually install the files
605 for l in newmanifest:
606 dest = newmanifest[l]
607 if l.endswith("/"):
608 staging_copydir(l, targetdir, dest, seendirs)
609 continue
610 if "/bin/" in l or "/sbin/" in l:
611 # defer /*bin/* files until last in case they need libs
612 binfiles[l] = (targetdir, dest)
613 else:
614 staging_copyfile(l, targetdir, dest, postinsts, seendirs)
615
616 # Handle deferred binfiles
617 for l in binfiles:
618 (targetdir, dest) = binfiles[l]
619 staging_copyfile(l, targetdir, dest, postinsts, seendirs)
620
621 bb.note("Installed into sysroot: %s" % str(msg_adding))
622 bb.note("Skipping as already exists in sysroot: %s" % str(msg_exists))
623
624 for f in fixme:
625 staging_processfixme(fixme[f], f, recipesysroot, recipesysrootnative, d)
626
627 for p in postinsts:
628 subprocess.check_output(p, shell=True, stderr=subprocess.STDOUT)
629
630 for dep in manifests:
631 c = setscenedeps[dep][0]
632 os.symlink(manifests[dep], depdir + "/" + c + ".complete")
633
634 with open(taskindex, "w") as f:
635 f.write("TaskDeps: " + " ".join(owntaskdeps) + "\n")
636 for l in sorted(installed):
637 f.write(l + "\n")
638
639 bb.utils.unlockfile(lock)
640}
641extend_recipe_sysroot[vardepsexclude] += "MACHINE_ARCH PACKAGE_EXTRA_ARCHS SDK_ARCH BUILD_ARCH SDK_OS BB_TASKDEPDATA"
642
643do_prepare_recipe_sysroot[deptask] = "do_populate_sysroot"
644python do_prepare_recipe_sysroot () {
645 bb.build.exec_func("extend_recipe_sysroot", d)
646}
647addtask do_prepare_recipe_sysroot before do_configure after do_fetch
648
649python staging_taskhandler() {
650 bbtasks = e.tasklist
651 for task in bbtasks:
652 deps = d.getVarFlag(task, "depends")
653 if task == "do_configure" or (deps and "populate_sysroot" in deps):
654 d.prependVarFlag(task, "prefuncs", "extend_recipe_sysroot ")
655}
656staging_taskhandler[eventmask] = "bb.event.RecipeTaskPreProcess"
657addhandler staging_taskhandler
658
659
660#
661# Target build output, stored in do_populate_sysroot or do_package can depend
662# not only upon direct dependencies but also indirect ones. A good example is
663# linux-libc-headers. The toolchain depends on this but most target recipes do
664# not. There are some headers which are not used by the toolchain build and do
665# not change the toolchain task output, hence the task hashes can change without
666# changing the sysroot output of that recipe yet they can influence others.
667#
668# A specific example is rtc.h which can change rtcwake.c in util-linux but is not
669# used in the glibc or gcc build. To account for this, we need to account for the
670# populate_sysroot hashes in the task output hashes.
671#
672python target_add_sysroot_deps () {
673 current_task = "do_" + d.getVar("BB_CURRENTTASK")
674 if current_task not in ["do_populate_sysroot", "do_package"]:
675 return
676
677 pn = d.getVar("PN")
678 if pn.endswith("-native"):
679 return
680
681 taskdepdata = d.getVar("BB_TASKDEPDATA", False)
682 deps = {}
683 for dep in taskdepdata.values():
684 if dep[1] == "do_populate_sysroot" and not dep[0].endswith(("-native", "-initial")) and "-cross-" not in dep[0] and dep[0] != pn:
685 deps[dep[0]] = dep[6]
686
687 d.setVar("HASHEQUIV_EXTRA_SIGDATA", "\n".join("%s: %s" % (k, deps[k]) for k in sorted(deps.keys())))
688}
689SSTATECREATEFUNCS += "target_add_sysroot_deps"
690
diff --git a/meta/classes/syslinux.bbclass b/meta/classes/syslinux.bbclass
deleted file mode 100644
index be3b898b4d..0000000000
--- a/meta/classes/syslinux.bbclass
+++ /dev/null
@@ -1,194 +0,0 @@
1# syslinux.bbclass
2# Copyright (C) 2004-2006, Advanced Micro Devices, Inc.
3# SPDX-License-Identifier: MIT
4
5# Provide syslinux specific functions for building bootable images.
6
7# External variables
8# ${INITRD} - indicates a list of filesystem images to concatenate and use as an initrd (optional)
9# ${ROOTFS} - indicates a filesystem image to include as the root filesystem (optional)
10# ${AUTO_SYSLINUXMENU} - set this to 1 to enable creating an automatic menu
11# ${LABELS} - a list of targets for the automatic config
12# ${APPEND} - an override list of append strings for each label
13# ${SYSLINUX_OPTS} - additional options to add to the syslinux file ';' delimited
14# ${SYSLINUX_SPLASH} - A background for the vga boot menu if using the boot menu
15# ${SYSLINUX_DEFAULT_CONSOLE} - set to "console=ttyX" to change kernel boot default console
16# ${SYSLINUX_SERIAL} - Set an alternate serial port or turn off serial with empty string
17# ${SYSLINUX_SERIAL_TTY} - Set alternate console=tty... kernel boot argument
18# ${SYSLINUX_KERNEL_ARGS} - Add additional kernel arguments
19
20do_bootimg[depends] += "${MLPREFIX}syslinux:do_populate_sysroot \
21 syslinux-native:do_populate_sysroot"
22
23ISOLINUXDIR ?= "/isolinux"
24SYSLINUXDIR = "/"
25# The kernel has an internal default console, which you can override with
26# a console=...some_tty...
27SYSLINUX_DEFAULT_CONSOLE ?= ""
28SYSLINUX_SERIAL ?= "0 115200"
29SYSLINUX_SERIAL_TTY ?= "console=ttyS0,115200"
30SYSLINUX_PROMPT ?= "0"
31SYSLINUX_TIMEOUT ?= "50"
32AUTO_SYSLINUXMENU ?= "1"
33SYSLINUX_ALLOWOPTIONS ?= "1"
34SYSLINUX_ROOT ?= "${ROOT}"
35SYSLINUX_CFG_VM ?= "${S}/syslinux_vm.cfg"
36SYSLINUX_CFG_LIVE ?= "${S}/syslinux_live.cfg"
37APPEND ?= ""
38
39# Need UUID utility code.
40inherit fs-uuid
41
42syslinux_populate() {
43 DEST=$1
44 BOOTDIR=$2
45 CFGNAME=$3
46
47 install -d ${DEST}${BOOTDIR}
48
49 # Install the config files
50 install -m 0644 ${SYSLINUX_CFG} ${DEST}${BOOTDIR}/${CFGNAME}
51 if [ "${AUTO_SYSLINUXMENU}" = 1 ] ; then
52 install -m 0644 ${STAGING_DATADIR}/syslinux/vesamenu.c32 ${DEST}${BOOTDIR}/vesamenu.c32
53 install -m 0444 ${STAGING_DATADIR}/syslinux/libcom32.c32 ${DEST}${BOOTDIR}/libcom32.c32
54 install -m 0444 ${STAGING_DATADIR}/syslinux/libutil.c32 ${DEST}${BOOTDIR}/libutil.c32
55 if [ "${SYSLINUX_SPLASH}" != "" ] ; then
56 install -m 0644 ${SYSLINUX_SPLASH} ${DEST}${BOOTDIR}/splash.lss
57 fi
58 fi
59}
60
61syslinux_iso_populate() {
62 iso_dir=$1
63 syslinux_populate $iso_dir ${ISOLINUXDIR} isolinux.cfg
64 install -m 0644 ${STAGING_DATADIR}/syslinux/isolinux.bin $iso_dir${ISOLINUXDIR}
65 install -m 0644 ${STAGING_DATADIR}/syslinux/ldlinux.c32 $iso_dir${ISOLINUXDIR}
66}
67
68syslinux_hddimg_populate() {
69 hdd_dir=$1
70 syslinux_populate $hdd_dir ${SYSLINUXDIR} syslinux.cfg
71 install -m 0444 ${STAGING_DATADIR}/syslinux/ldlinux.sys $hdd_dir${SYSLINUXDIR}/ldlinux.sys
72}
73
74syslinux_hddimg_install() {
75 syslinux ${IMGDEPLOYDIR}/${IMAGE_NAME}.hddimg
76}
77
78python build_syslinux_cfg () {
79 import copy
80 import sys
81
82 workdir = d.getVar('WORKDIR')
83 if not workdir:
84 bb.error("WORKDIR not defined, unable to package")
85 return
86
87 labels = d.getVar('LABELS')
88 if not labels:
89 bb.debug(1, "LABELS not defined, nothing to do")
90 return
91
92 if labels == []:
93 bb.debug(1, "No labels, nothing to do")
94 return
95
96 cfile = d.getVar('SYSLINUX_CFG')
97 if not cfile:
98 bb.fatal('Unable to read SYSLINUX_CFG')
99
100 try:
101 cfgfile = open(cfile, 'w')
102 except OSError:
103 bb.fatal('Unable to open %s' % cfile)
104
105 cfgfile.write('# Automatically created by OE\n')
106
107 opts = d.getVar('SYSLINUX_OPTS')
108
109 if opts:
110 for opt in opts.split(';'):
111 cfgfile.write('%s\n' % opt)
112
113 allowoptions = d.getVar('SYSLINUX_ALLOWOPTIONS')
114 if allowoptions:
115 cfgfile.write('ALLOWOPTIONS %s\n' % allowoptions)
116 else:
117 cfgfile.write('ALLOWOPTIONS 1\n')
118
119 syslinux_default_console = d.getVar('SYSLINUX_DEFAULT_CONSOLE')
120 syslinux_serial_tty = d.getVar('SYSLINUX_SERIAL_TTY')
121 syslinux_serial = d.getVar('SYSLINUX_SERIAL')
122 if syslinux_serial:
123 cfgfile.write('SERIAL %s\n' % syslinux_serial)
124
125 menu = (d.getVar('AUTO_SYSLINUXMENU') == "1")
126
127 if menu and syslinux_serial:
128 cfgfile.write('DEFAULT Graphics console %s\n' % (labels.split()[0]))
129 else:
130 cfgfile.write('DEFAULT %s\n' % (labels.split()[0]))
131
132 timeout = d.getVar('SYSLINUX_TIMEOUT')
133
134 if timeout:
135 cfgfile.write('TIMEOUT %s\n' % timeout)
136 else:
137 cfgfile.write('TIMEOUT 50\n')
138
139 prompt = d.getVar('SYSLINUX_PROMPT')
140 if prompt:
141 cfgfile.write('PROMPT %s\n' % prompt)
142 else:
143 cfgfile.write('PROMPT 1\n')
144
145 if menu:
146 cfgfile.write('ui vesamenu.c32\n')
147 cfgfile.write('menu title Select kernel options and boot kernel\n')
148 cfgfile.write('menu tabmsg Press [Tab] to edit, [Return] to select\n')
149 splash = d.getVar('SYSLINUX_SPLASH')
150 if splash:
151 cfgfile.write('menu background splash.lss\n')
152
153 for label in labels.split():
154 localdata = bb.data.createCopy(d)
155
156 overrides = localdata.getVar('OVERRIDES')
157 if not overrides:
158 bb.fatal('OVERRIDES not defined')
159
160 localdata.setVar('OVERRIDES', label + ':' + overrides)
161
162 btypes = [ [ "", syslinux_default_console ] ]
163 if menu and syslinux_serial:
164 btypes = [ [ "Graphics console ", syslinux_default_console ],
165 [ "Serial console ", syslinux_serial_tty ] ]
166
167 root= d.getVar('SYSLINUX_ROOT')
168 if not root:
169 bb.fatal('SYSLINUX_ROOT not defined')
170
171 kernel = localdata.getVar('KERNEL_IMAGETYPE')
172 for btype in btypes:
173 cfgfile.write('LABEL %s%s\nKERNEL /%s\n' % (btype[0], label, kernel))
174
175 exargs = d.getVar('SYSLINUX_KERNEL_ARGS')
176 if exargs:
177 btype[1] += " " + exargs
178
179 append = localdata.getVar('APPEND')
180 initrd = localdata.getVar('INITRD')
181
182 append = root + " " + append
183 cfgfile.write('APPEND ')
184
185 if initrd:
186 cfgfile.write('initrd=/initrd ')
187
188 cfgfile.write('LABEL=%s '% (label))
189 append = replace_rootfs_uuid(d, append)
190 cfgfile.write('%s %s\n' % (append, btype[1]))
191
192 cfgfile.close()
193}
194build_syslinux_cfg[dirs] = "${S}"
diff --git a/meta/classes/systemd-boot-cfg.bbclass b/meta/classes/systemd-boot-cfg.bbclass
deleted file mode 100644
index 366dd23738..0000000000
--- a/meta/classes/systemd-boot-cfg.bbclass
+++ /dev/null
@@ -1,77 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7SYSTEMD_BOOT_CFG ?= "${S}/loader.conf"
8SYSTEMD_BOOT_ENTRIES ?= ""
9SYSTEMD_BOOT_TIMEOUT ?= "10"
10
11# Uses MACHINE specific KERNEL_IMAGETYPE
12PACKAGE_ARCH = "${MACHINE_ARCH}"
13
14# Need UUID utility code.
15inherit fs-uuid
16
17python build_efi_cfg() {
18 s = d.getVar("S")
19 labels = d.getVar('LABELS')
20 if not labels:
21 bb.debug(1, "LABELS not defined, nothing to do")
22 return
23
24 if labels == []:
25 bb.debug(1, "No labels, nothing to do")
26 return
27
28 cfile = d.getVar('SYSTEMD_BOOT_CFG')
29 cdir = os.path.dirname(cfile)
30 if not os.path.exists(cdir):
31 os.makedirs(cdir)
32 try:
33 cfgfile = open(cfile, 'w')
34 except OSError:
35 bb.fatal('Unable to open %s' % cfile)
36
37 cfgfile.write('# Automatically created by OE\n')
38 cfgfile.write('default %s\n' % (labels.split()[0]))
39 timeout = d.getVar('SYSTEMD_BOOT_TIMEOUT')
40 if timeout:
41 cfgfile.write('timeout %s\n' % timeout)
42 else:
43 cfgfile.write('timeout 10\n')
44 cfgfile.close()
45
46 for label in labels.split():
47 localdata = d.createCopy()
48
49 entryfile = "%s/%s.conf" % (s, label)
50 if not os.path.exists(s):
51 os.makedirs(s)
52 d.appendVar("SYSTEMD_BOOT_ENTRIES", " " + entryfile)
53 try:
54 entrycfg = open(entryfile, "w")
55 except OSError:
56 bb.fatal('Unable to open %s' % entryfile)
57
58 entrycfg.write('title %s\n' % label)
59
60 kernel = localdata.getVar("KERNEL_IMAGETYPE")
61 entrycfg.write('linux /%s\n' % kernel)
62
63 append = localdata.getVar('APPEND')
64 initrd = localdata.getVar('INITRD')
65
66 if initrd:
67 entrycfg.write('initrd /initrd\n')
68 lb = label
69 if label == "install":
70 lb = "install-efi"
71 entrycfg.write('options LABEL=%s ' % lb)
72 if append:
73 append = replace_rootfs_uuid(d, append)
74 entrycfg.write('%s' % append)
75 entrycfg.write('\n')
76 entrycfg.close()
77}
diff --git a/meta/classes/systemd-boot.bbclass b/meta/classes/systemd-boot.bbclass
deleted file mode 100644
index 5aa32dd997..0000000000
--- a/meta/classes/systemd-boot.bbclass
+++ /dev/null
@@ -1,35 +0,0 @@
1# Copyright (C) 2016 Intel Corporation
2#
3# SPDX-License-Identifier: MIT
4
5# systemd-boot.bbclass - The "systemd-boot" is essentially the gummiboot merged into systemd.
6# The original standalone gummiboot project is dead without any more
7# maintenance.
8#
9# Set EFI_PROVIDER = "systemd-boot" to use systemd-boot on your live images instead of grub-efi
10# (images built by image-live.bbclass)
11
12do_bootimg[depends] += "${MLPREFIX}systemd-boot:do_deploy"
13
14require conf/image-uefi.conf
15# Need UUID utility code.
16inherit fs-uuid
17
18efi_populate() {
19 efi_populate_common "$1" systemd
20
21 # systemd-boot requires these paths for configuration files
22 # they are not customizable so no point in new vars
23 install -d ${DEST}/loader
24 install -d ${DEST}/loader/entries
25 install -m 0644 ${SYSTEMD_BOOT_CFG} ${DEST}/loader/loader.conf
26 for i in ${SYSTEMD_BOOT_ENTRIES}; do
27 install -m 0644 ${i} ${DEST}/loader/entries
28 done
29}
30
31efi_iso_populate:append() {
32 cp -r $iso_dir/loader ${EFIIMGDIR}
33}
34
35inherit systemd-boot-cfg
diff --git a/meta/classes/systemd.bbclass b/meta/classes/systemd.bbclass
deleted file mode 100644
index f6564c2b31..0000000000
--- a/meta/classes/systemd.bbclass
+++ /dev/null
@@ -1,239 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# The list of packages that should have systemd packaging scripts added. For
8# each entry, optionally have a SYSTEMD_SERVICE:[package] that lists the service
9# files in this package. If this variable isn't set, [package].service is used.
10SYSTEMD_PACKAGES ?= "${PN}"
11SYSTEMD_PACKAGES:class-native ?= ""
12SYSTEMD_PACKAGES:class-nativesdk ?= ""
13
14# Whether to enable or disable the services on installation.
15SYSTEMD_AUTO_ENABLE ??= "enable"
16
17# This class will be included in any recipe that supports systemd init scripts,
18# even if systemd is not in DISTRO_FEATURES. As such don't make any changes
19# directly but check the DISTRO_FEATURES first.
20python __anonymous() {
21 # If the distro features have systemd but not sysvinit, inhibit update-rcd
22 # from doing any work so that pure-systemd images don't have redundant init
23 # files.
24 if bb.utils.contains('DISTRO_FEATURES', 'systemd', True, False, d):
25 d.appendVar("DEPENDS", " systemd-systemctl-native")
26 d.appendVar("PACKAGE_WRITE_DEPS", " systemd-systemctl-native")
27 if not bb.utils.contains('DISTRO_FEATURES', 'sysvinit', True, False, d):
28 d.setVar("INHIBIT_UPDATERCD_BBCLASS", "1")
29}
30
31systemd_postinst() {
32if systemctl >/dev/null 2>/dev/null; then
33 OPTS=""
34
35 if [ -n "$D" ]; then
36 OPTS="--root=$D"
37 fi
38
39 if [ "${SYSTEMD_AUTO_ENABLE}" = "enable" ]; then
40 for service in ${SYSTEMD_SERVICE_ESCAPED}; do
41 systemctl ${OPTS} enable "$service"
42 done
43 fi
44
45 if [ -z "$D" ]; then
46 systemctl daemon-reload
47 systemctl preset ${SYSTEMD_SERVICE_ESCAPED}
48
49 if [ "${SYSTEMD_AUTO_ENABLE}" = "enable" ]; then
50 systemctl --no-block restart ${SYSTEMD_SERVICE_ESCAPED}
51 fi
52 fi
53fi
54}
55
56systemd_prerm() {
57if systemctl >/dev/null 2>/dev/null; then
58 if [ -z "$D" ]; then
59 systemctl stop ${SYSTEMD_SERVICE_ESCAPED}
60
61 systemctl disable ${SYSTEMD_SERVICE_ESCAPED}
62 fi
63fi
64}
65
66
67systemd_populate_packages[vardeps] += "systemd_prerm systemd_postinst"
68systemd_populate_packages[vardepsexclude] += "OVERRIDES"
69
70
71python systemd_populate_packages() {
72 import re
73 import shlex
74
75 if not bb.utils.contains('DISTRO_FEATURES', 'systemd', True, False, d):
76 return
77
78 def get_package_var(d, var, pkg):
79 val = (d.getVar('%s:%s' % (var, pkg)) or "").strip()
80 if val == "":
81 val = (d.getVar(var) or "").strip()
82 return val
83
84 # Check if systemd-packages already included in PACKAGES
85 def systemd_check_package(pkg_systemd):
86 packages = d.getVar('PACKAGES')
87 if not pkg_systemd in packages.split():
88 bb.error('%s does not appear in package list, please add it' % pkg_systemd)
89
90
91 def systemd_generate_package_scripts(pkg):
92 bb.debug(1, 'adding systemd calls to postinst/postrm for %s' % pkg)
93
94 paths_escaped = ' '.join(shlex.quote(s) for s in d.getVar('SYSTEMD_SERVICE:' + pkg).split())
95 d.setVar('SYSTEMD_SERVICE_ESCAPED:' + pkg, paths_escaped)
96
97 # Add pkg to the overrides so that it finds the SYSTEMD_SERVICE:pkg
98 # variable.
99 localdata = d.createCopy()
100 localdata.prependVar("OVERRIDES", pkg + ":")
101
102 postinst = d.getVar('pkg_postinst:%s' % pkg)
103 if not postinst:
104 postinst = '#!/bin/sh\n'
105 postinst += localdata.getVar('systemd_postinst')
106 d.setVar('pkg_postinst:%s' % pkg, postinst)
107
108 prerm = d.getVar('pkg_prerm:%s' % pkg)
109 if not prerm:
110 prerm = '#!/bin/sh\n'
111 prerm += localdata.getVar('systemd_prerm')
112 d.setVar('pkg_prerm:%s' % pkg, prerm)
113
114
115 # Add files to FILES:*-systemd if existent and not already done
116 def systemd_append_file(pkg_systemd, file_append):
117 appended = False
118 if os.path.exists(oe.path.join(d.getVar("D"), file_append)):
119 var_name = "FILES:" + pkg_systemd
120 files = d.getVar(var_name, False) or ""
121 if file_append not in files.split():
122 d.appendVar(var_name, " " + file_append)
123 appended = True
124 return appended
125
126 # Add systemd files to FILES:*-systemd, parse for Also= and follow recursive
127 def systemd_add_files_and_parse(pkg_systemd, path, service, keys):
128 # avoid infinite recursion
129 if systemd_append_file(pkg_systemd, oe.path.join(path, service)):
130 fullpath = oe.path.join(d.getVar("D"), path, service)
131 if service.find('.service') != -1:
132 # for *.service add *@.service
133 service_base = service.replace('.service', '')
134 systemd_add_files_and_parse(pkg_systemd, path, service_base + '@.service', keys)
135 if service.find('.socket') != -1:
136 # for *.socket add *.service and *@.service
137 service_base = service.replace('.socket', '')
138 systemd_add_files_and_parse(pkg_systemd, path, service_base + '.service', keys)
139 systemd_add_files_and_parse(pkg_systemd, path, service_base + '@.service', keys)
140 for key in keys.split():
141 # recurse all dependencies found in keys ('Also';'Conflicts';..) and add to files
142 cmd = "grep %s %s | sed 's,%s=,,g' | tr ',' '\\n'" % (key, shlex.quote(fullpath), key)
143 pipe = os.popen(cmd, 'r')
144 line = pipe.readline()
145 while line:
146 line = line.replace('\n', '')
147 systemd_add_files_and_parse(pkg_systemd, path, line, keys)
148 line = pipe.readline()
149 pipe.close()
150
151 # Check service-files and call systemd_add_files_and_parse for each entry
152 def systemd_check_services():
153 searchpaths = [oe.path.join(d.getVar("sysconfdir"), "systemd", "system"),]
154 searchpaths.append(d.getVar("systemd_system_unitdir"))
155 systemd_packages = d.getVar('SYSTEMD_PACKAGES')
156
157 keys = 'Also'
158 # scan for all in SYSTEMD_SERVICE[]
159 for pkg_systemd in systemd_packages.split():
160 for service in get_package_var(d, 'SYSTEMD_SERVICE', pkg_systemd).split():
161 path_found = ''
162
163 # Deal with adding, for example, 'ifplugd@eth0.service' from
164 # 'ifplugd@.service'
165 base = None
166 at = service.find('@')
167 if at != -1:
168 ext = service.rfind('.')
169 base = service[:at] + '@' + service[ext:]
170
171 for path in searchpaths:
172 if os.path.exists(oe.path.join(d.getVar("D"), path, service)):
173 path_found = path
174 break
175 elif base is not None:
176 if os.path.exists(oe.path.join(d.getVar("D"), path, base)):
177 path_found = path
178 break
179
180 if path_found != '':
181 systemd_add_files_and_parse(pkg_systemd, path_found, service, keys)
182 else:
183 bb.fatal("Didn't find service unit '{0}', specified in SYSTEMD_SERVICE:{1}. {2}".format(
184 service, pkg_systemd, "Also looked for service unit '{0}'.".format(base) if base is not None else ""))
185
186 def systemd_create_presets(pkg, action):
187 presetf = oe.path.join(d.getVar("PKGD"), d.getVar("systemd_unitdir"), "system-preset/98-%s.preset" % pkg)
188 bb.utils.mkdirhier(os.path.dirname(presetf))
189 with open(presetf, 'a') as fd:
190 for service in d.getVar('SYSTEMD_SERVICE:%s' % pkg).split():
191 fd.write("%s %s\n" % (action,service))
192 d.appendVar("FILES:%s" % pkg, ' ' + oe.path.join(d.getVar("systemd_unitdir"), "system-preset/98-%s.preset" % pkg))
193
194 # Run all modifications once when creating package
195 if os.path.exists(d.getVar("D")):
196 for pkg in d.getVar('SYSTEMD_PACKAGES').split():
197 systemd_check_package(pkg)
198 if d.getVar('SYSTEMD_SERVICE:' + pkg):
199 systemd_generate_package_scripts(pkg)
200 action = get_package_var(d, 'SYSTEMD_AUTO_ENABLE', pkg)
201 if action in ("enable", "disable"):
202 systemd_create_presets(pkg, action)
203 elif action not in ("mask", "preset"):
204 bb.fatal("SYSTEMD_AUTO_ENABLE:%s '%s' is not 'enable', 'disable', 'mask' or 'preset'" % (pkg, action))
205 systemd_check_services()
206}
207
208PACKAGESPLITFUNCS:prepend = "systemd_populate_packages "
209
210python rm_systemd_unitdir (){
211 import shutil
212 if not bb.utils.contains('DISTRO_FEATURES', 'systemd', True, False, d):
213 systemd_unitdir = oe.path.join(d.getVar("D"), d.getVar('systemd_unitdir'))
214 if os.path.exists(systemd_unitdir):
215 shutil.rmtree(systemd_unitdir)
216 systemd_libdir = os.path.dirname(systemd_unitdir)
217 if (os.path.exists(systemd_libdir) and not os.listdir(systemd_libdir)):
218 os.rmdir(systemd_libdir)
219}
220
221python rm_sysvinit_initddir (){
222 import shutil
223 sysv_initddir = oe.path.join(d.getVar("D"), (d.getVar('INIT_D_DIR') or "/etc/init.d"))
224
225 if bb.utils.contains('DISTRO_FEATURES', 'systemd', True, False, d) and \
226 not bb.utils.contains('DISTRO_FEATURES', 'sysvinit', True, False, d) and \
227 os.path.exists(sysv_initddir):
228 systemd_system_unitdir = oe.path.join(d.getVar("D"), d.getVar('systemd_system_unitdir'))
229
230 # If systemd_system_unitdir contains anything, delete sysv_initddir
231 if (os.path.exists(systemd_system_unitdir) and os.listdir(systemd_system_unitdir)):
232 shutil.rmtree(sysv_initddir)
233}
234
235do_install[postfuncs] += "${RMINITDIR} "
236RMINITDIR:class-target = " rm_sysvinit_initddir rm_systemd_unitdir "
237RMINITDIR:class-nativesdk = " rm_sysvinit_initddir rm_systemd_unitdir "
238RMINITDIR = ""
239
diff --git a/meta/classes/testimage.bbclass b/meta/classes/testimage.bbclass
deleted file mode 100644
index 8d2fab21df..0000000000
--- a/meta/classes/testimage.bbclass
+++ /dev/null
@@ -1,508 +0,0 @@
1# Copyright (C) 2013 Intel Corporation
2#
3# SPDX-License-Identifier: MIT
4
5inherit metadata_scm
6inherit image-artifact-names
7
8# testimage.bbclass enables testing of qemu images using python unittests.
9# Most of the tests are commands run on target image over ssh.
10# To use it add testimage to global inherit and call your target image with -c testimage
11# You can try it out like this:
12# - first add IMAGE_CLASSES += "testimage" in local.conf
13# - build a qemu core-image-sato
14# - then bitbake core-image-sato -c testimage. That will run a standard suite of tests.
15#
16# The tests can be run automatically each time an image is built if you set
17# TESTIMAGE_AUTO = "1"
18
19TESTIMAGE_AUTO ??= "0"
20
21# You can set (or append to) TEST_SUITES in local.conf to select the tests
22# which you want to run for your target.
23# The test names are the module names in meta/lib/oeqa/runtime/cases.
24# Each name in TEST_SUITES represents a required test for the image. (no skipping allowed)
25# Appending "auto" means that it will try to run all tests that are suitable for the image (each test decides that on it's own).
26# Note that order in TEST_SUITES is relevant: tests are run in an order such that
27# tests mentioned in @skipUnlessPassed run before the tests that depend on them,
28# but without such dependencies, tests run in the order in which they are listed
29# in TEST_SUITES.
30#
31# A layer can add its own tests in lib/oeqa/runtime, provided it extends BBPATH as normal in its layer.conf.
32
33# TEST_LOG_DIR contains a command ssh log and may contain infromation about what command is running, output and return codes and for qemu a boot log till login.
34# Booting is handled by this class, and it's not a test in itself.
35# TEST_QEMUBOOT_TIMEOUT can be used to set the maximum time in seconds the launch code will wait for the login prompt.
36# TEST_OVERALL_TIMEOUT can be used to set the maximum time in seconds the tests will be allowed to run (defaults to no limit).
37# TEST_QEMUPARAMS can be used to pass extra parameters to qemu, e.g. "-m 1024" for setting the amount of ram to 1 GB.
38# TEST_RUNQEMUPARAMS can be used to pass extra parameters to runqemu, e.g. "gl" to enable OpenGL acceleration.
39# QEMU_USE_KVM can be set to "" to disable the use of kvm (by default it is enabled if target_arch == build_arch or both of them are x86 archs)
40
41# TESTIMAGE_BOOT_PATTERNS can be used to override certain patterns used to communicate with the target when booting,
42# if a pattern is not specifically present on this variable a default will be used when booting the target.
43# TESTIMAGE_BOOT_PATTERNS[<flag>] overrides the pattern used for that specific flag, where flag comes from a list of accepted flags
44# e.g. normally the system boots and waits for a login prompt (login:), after that it sends the command: "root\n" to log as the root user
45# if we wanted to log in as the hypothetical "webserver" user for example we could set the following:
46# TESTIMAGE_BOOT_PATTERNS = "send_login_user search_login_succeeded"
47# TESTIMAGE_BOOT_PATTERNS[send_login_user] = "webserver\n"
48# TESTIMAGE_BOOT_PATTERNS[search_login_succeeded] = "webserver@[a-zA-Z0-9\-]+:~#"
49# The accepted flags are the following: search_reached_prompt, send_login_user, search_login_succeeded, search_cmd_finished.
50# They are prefixed with either search/send, to differentiate if the pattern is meant to be sent or searched to/from the target terminal
51
52TEST_LOG_DIR ?= "${WORKDIR}/testimage"
53
54TEST_EXPORT_DIR ?= "${TMPDIR}/testimage/${PN}"
55TEST_INSTALL_TMP_DIR ?= "${WORKDIR}/testimage/install_tmp"
56TEST_NEEDED_PACKAGES_DIR ?= "${WORKDIR}/testimage/packages"
57TEST_EXTRACTED_DIR ?= "${TEST_NEEDED_PACKAGES_DIR}/extracted"
58TEST_PACKAGED_DIR ?= "${TEST_NEEDED_PACKAGES_DIR}/packaged"
59
60BASICTESTSUITE = "\
61 ping date df ssh scp python perl gi ptest parselogs \
62 logrotate connman systemd oe_syslog pam stap ldd xorg \
63 kernelmodule gcc buildcpio buildlzip buildgalculator \
64 dnf rpm opkg apt weston go rust"
65
66DEFAULT_TEST_SUITES = "${BASICTESTSUITE}"
67
68# musl doesn't support systemtap
69DEFAULT_TEST_SUITES:remove:libc-musl = "stap"
70
71# qemumips is quite slow and has reached the timeout limit several times on the YP build cluster,
72# mitigate this by removing build tests for qemumips machines.
73MIPSREMOVE ??= "buildcpio buildlzip buildgalculator"
74DEFAULT_TEST_SUITES:remove:qemumips = "${MIPSREMOVE}"
75DEFAULT_TEST_SUITES:remove:qemumips64 = "${MIPSREMOVE}"
76
77TEST_SUITES ?= "${DEFAULT_TEST_SUITES}"
78
79QEMU_USE_KVM ?= "1"
80TEST_QEMUBOOT_TIMEOUT ?= "1000"
81TEST_OVERALL_TIMEOUT ?= ""
82TEST_TARGET ?= "qemu"
83TEST_QEMUPARAMS ?= ""
84TEST_RUNQEMUPARAMS ?= ""
85
86TESTIMAGE_BOOT_PATTERNS ?= ""
87
88TESTIMAGEDEPENDS = ""
89TESTIMAGEDEPENDS:append:qemuall = " qemu-native:do_populate_sysroot qemu-helper-native:do_populate_sysroot qemu-helper-native:do_addto_recipe_sysroot"
90TESTIMAGEDEPENDS += "${@bb.utils.contains('IMAGE_PKGTYPE', 'rpm', 'cpio-native:do_populate_sysroot', '', d)}"
91TESTIMAGEDEPENDS += "${@bb.utils.contains('IMAGE_PKGTYPE', 'rpm', 'dnf-native:do_populate_sysroot', '', d)}"
92TESTIMAGEDEPENDS += "${@bb.utils.contains('IMAGE_PKGTYPE', 'rpm', 'createrepo-c-native:do_populate_sysroot', '', d)}"
93TESTIMAGEDEPENDS += "${@bb.utils.contains('IMAGE_PKGTYPE', 'ipk', 'opkg-utils-native:do_populate_sysroot package-index:do_package_index', '', d)}"
94TESTIMAGEDEPENDS += "${@bb.utils.contains('IMAGE_PKGTYPE', 'deb', 'apt-native:do_populate_sysroot package-index:do_package_index', '', d)}"
95
96TESTIMAGELOCK = "${TMPDIR}/testimage.lock"
97TESTIMAGELOCK:qemuall = ""
98
99TESTIMAGE_DUMP_DIR ?= "${LOG_DIR}/runtime-hostdump/"
100
101TESTIMAGE_UPDATE_VARS ?= "DL_DIR WORKDIR DEPLOY_DIR"
102
103testimage_dump_target () {
104 top -bn1
105 ps
106 free
107 df
108 # The next command will export the default gateway IP
109 export DEFAULT_GATEWAY=$(ip route | awk '/default/ { print $3}')
110 ping -c3 $DEFAULT_GATEWAY
111 dmesg
112 netstat -an
113 ip address
114 # Next command will dump logs from /var/log/
115 find /var/log/ -type f 2>/dev/null -exec echo "====================" \; -exec echo {} \; -exec echo "====================" \; -exec cat {} \; -exec echo "" \;
116}
117
118testimage_dump_host () {
119 top -bn1
120 iostat -x -z -N -d -p ALL 20 2
121 ps -ef
122 free
123 df
124 memstat
125 dmesg
126 ip -s link
127 netstat -an
128}
129
130testimage_dump_monitor () {
131 query-status
132 query-block
133 dump-guest-memory {"paging":false,"protocol":"file:%s.img"}
134}
135
136python do_testimage() {
137 testimage_main(d)
138}
139
140addtask testimage
141do_testimage[nostamp] = "1"
142do_testimage[network] = "1"
143do_testimage[depends] += "${TESTIMAGEDEPENDS}"
144do_testimage[lockfiles] += "${TESTIMAGELOCK}"
145
146def testimage_sanity(d):
147 if (d.getVar('TEST_TARGET') == 'simpleremote'
148 and (not d.getVar('TEST_TARGET_IP')
149 or not d.getVar('TEST_SERVER_IP'))):
150 bb.fatal('When TEST_TARGET is set to "simpleremote" '
151 'TEST_TARGET_IP and TEST_SERVER_IP are needed too.')
152
153def get_testimage_configuration(d, test_type, machine):
154 import platform
155 from oeqa.utils.metadata import get_layers
156 configuration = {'TEST_TYPE': test_type,
157 'MACHINE': machine,
158 'DISTRO': d.getVar("DISTRO"),
159 'IMAGE_BASENAME': d.getVar("IMAGE_BASENAME"),
160 'IMAGE_PKGTYPE': d.getVar("IMAGE_PKGTYPE"),
161 'STARTTIME': d.getVar("DATETIME"),
162 'HOST_DISTRO': oe.lsb.distro_identifier().replace(' ', '-'),
163 'LAYERS': get_layers(d.getVar("BBLAYERS"))}
164 return configuration
165get_testimage_configuration[vardepsexclude] = "DATETIME"
166
167def get_testimage_json_result_dir(d):
168 json_result_dir = os.path.join(d.getVar("LOG_DIR"), 'oeqa')
169 custom_json_result_dir = d.getVar("OEQA_JSON_RESULT_DIR")
170 if custom_json_result_dir:
171 json_result_dir = custom_json_result_dir
172 return json_result_dir
173
174def get_testimage_result_id(configuration):
175 return '%s_%s_%s_%s' % (configuration['TEST_TYPE'], configuration['IMAGE_BASENAME'], configuration['MACHINE'], configuration['STARTTIME'])
176
177def get_testimage_boot_patterns(d):
178 from collections import defaultdict
179 boot_patterns = defaultdict(str)
180 # Only accept certain values
181 accepted_patterns = ['search_reached_prompt', 'send_login_user', 'search_login_succeeded', 'search_cmd_finished']
182 # Not all patterns need to be overriden, e.g. perhaps we only want to change the user
183 boot_patterns_flags = d.getVarFlags('TESTIMAGE_BOOT_PATTERNS') or {}
184 if boot_patterns_flags:
185 patterns_set = [p for p in boot_patterns_flags.items() if p[0] in d.getVar('TESTIMAGE_BOOT_PATTERNS').split()]
186 for flag, flagval in patterns_set:
187 if flag not in accepted_patterns:
188 bb.fatal('Testimage: The only accepted boot patterns are: search_reached_prompt,send_login_user, \
189 search_login_succeeded,search_cmd_finished\n Make sure your TESTIMAGE_BOOT_PATTERNS=%s \
190 contains an accepted flag.' % d.getVar('TESTIMAGE_BOOT_PATTERNS'))
191 return
192 # We know boot prompt is searched through in binary format, others might be expressions
193 if flag == 'search_reached_prompt':
194 boot_patterns[flag] = flagval.encode()
195 else:
196 boot_patterns[flag] = flagval.encode().decode('unicode-escape')
197 return boot_patterns
198
199
200def testimage_main(d):
201 import os
202 import json
203 import signal
204 import logging
205 import shutil
206
207 from bb.utils import export_proxies
208 from oeqa.runtime.context import OERuntimeTestContext
209 from oeqa.runtime.context import OERuntimeTestContextExecutor
210 from oeqa.core.target.qemu import supported_fstypes
211 from oeqa.core.utils.test import getSuiteCases
212 from oeqa.utils import make_logger_bitbake_compatible
213
214 def sigterm_exception(signum, stackframe):
215 """
216 Catch SIGTERM from worker in order to stop qemu.
217 """
218 os.kill(os.getpid(), signal.SIGINT)
219
220 def handle_test_timeout(timeout):
221 bb.warn("Global test timeout reached (%s seconds), stopping the tests." %(timeout))
222 os.kill(os.getpid(), signal.SIGINT)
223
224 testimage_sanity(d)
225
226 if (d.getVar('IMAGE_PKGTYPE') == 'rpm'
227 and ('dnf' in d.getVar('TEST_SUITES') or 'auto' in d.getVar('TEST_SUITES'))):
228 create_rpm_index(d)
229
230 logger = make_logger_bitbake_compatible(logging.getLogger("BitBake"))
231 pn = d.getVar("PN")
232
233 bb.utils.mkdirhier(d.getVar("TEST_LOG_DIR"))
234
235 image_name = ("%s/%s" % (d.getVar('DEPLOY_DIR_IMAGE'),
236 d.getVar('IMAGE_LINK_NAME')))
237
238 tdname = "%s.testdata.json" % image_name
239 try:
240 with open(tdname, "r") as f:
241 td = json.load(f)
242 except FileNotFoundError as err:
243 bb.fatal('File %s not found (%s).\nHave you built the image with INHERIT += "testimage" in the conf/local.conf?' % (tdname, err))
244
245 # Some variables need to be updates (mostly paths) with the
246 # ones of the current environment because some tests require them.
247 for var in d.getVar('TESTIMAGE_UPDATE_VARS').split():
248 td[var] = d.getVar(var)
249
250 image_manifest = "%s.manifest" % image_name
251 image_packages = OERuntimeTestContextExecutor.readPackagesManifest(image_manifest)
252
253 extract_dir = d.getVar("TEST_EXTRACTED_DIR")
254
255 # Get machine
256 machine = d.getVar("MACHINE")
257
258 # Get rootfs
259 fstypes = d.getVar('IMAGE_FSTYPES').split()
260 if d.getVar("TEST_TARGET") == "qemu":
261 fstypes = [fs for fs in fstypes if fs in supported_fstypes]
262 if not fstypes:
263 bb.fatal('Unsupported image type built. Add a compatible image to '
264 'IMAGE_FSTYPES. Supported types: %s' %
265 ', '.join(supported_fstypes))
266 qfstype = fstypes[0]
267 qdeffstype = d.getVar("QB_DEFAULT_FSTYPE")
268 if qdeffstype:
269 qfstype = qdeffstype
270 rootfs = '%s.%s' % (image_name, qfstype)
271
272 # Get tmpdir (not really used, just for compatibility)
273 tmpdir = d.getVar("TMPDIR")
274
275 # Get deploy_dir_image (not really used, just for compatibility)
276 dir_image = d.getVar("DEPLOY_DIR_IMAGE")
277
278 # Get bootlog
279 bootlog = os.path.join(d.getVar("TEST_LOG_DIR"),
280 'qemu_boot_log.%s' % d.getVar('DATETIME'))
281
282 # Get display
283 display = d.getVar("BB_ORIGENV").getVar("DISPLAY")
284
285 # Get kernel
286 kernel_name = ('%s-%s.bin' % (d.getVar("KERNEL_IMAGETYPE"), machine))
287 kernel = os.path.join(d.getVar("DEPLOY_DIR_IMAGE"), kernel_name)
288
289 # Get boottime
290 boottime = int(d.getVar("TEST_QEMUBOOT_TIMEOUT"))
291
292 # Get use_kvm
293 kvm = oe.types.qemu_use_kvm(d.getVar('QEMU_USE_KVM'), d.getVar('TARGET_ARCH'))
294
295 # Get OVMF
296 ovmf = d.getVar("QEMU_USE_OVMF")
297
298 slirp = False
299 if d.getVar("QEMU_USE_SLIRP"):
300 slirp = True
301
302 # TODO: We use the current implementation of qemu runner because of
303 # time constrains, qemu runner really needs a refactor too.
304 target_kwargs = { 'machine' : machine,
305 'rootfs' : rootfs,
306 'tmpdir' : tmpdir,
307 'dir_image' : dir_image,
308 'display' : display,
309 'kernel' : kernel,
310 'boottime' : boottime,
311 'bootlog' : bootlog,
312 'kvm' : kvm,
313 'slirp' : slirp,
314 'dump_dir' : d.getVar("TESTIMAGE_DUMP_DIR"),
315 'serial_ports': len(d.getVar("SERIAL_CONSOLES").split()),
316 'ovmf' : ovmf,
317 'tmpfsdir' : d.getVar("RUNQEMU_TMPFS_DIR"),
318 }
319
320 if d.getVar("TESTIMAGE_BOOT_PATTERNS"):
321 target_kwargs['boot_patterns'] = get_testimage_boot_patterns(d)
322
323 # hardware controlled targets might need further access
324 target_kwargs['powercontrol_cmd'] = d.getVar("TEST_POWERCONTROL_CMD") or None
325 target_kwargs['powercontrol_extra_args'] = d.getVar("TEST_POWERCONTROL_EXTRA_ARGS") or ""
326 target_kwargs['serialcontrol_cmd'] = d.getVar("TEST_SERIALCONTROL_CMD") or None
327 target_kwargs['serialcontrol_extra_args'] = d.getVar("TEST_SERIALCONTROL_EXTRA_ARGS") or ""
328 target_kwargs['testimage_dump_monitor'] = d.getVar("testimage_dump_monitor") or ""
329 target_kwargs['testimage_dump_target'] = d.getVar("testimage_dump_target") or ""
330
331 def export_ssh_agent(d):
332 import os
333
334 variables = ['SSH_AGENT_PID', 'SSH_AUTH_SOCK']
335 for v in variables:
336 if v not in os.environ.keys():
337 val = d.getVar(v)
338 if val is not None:
339 os.environ[v] = val
340
341 export_ssh_agent(d)
342
343 # runtime use network for download projects for build
344 export_proxies(d)
345
346 # we need the host dumper in test context
347 host_dumper = OERuntimeTestContextExecutor.getHostDumper(
348 d.getVar("testimage_dump_host"),
349 d.getVar("TESTIMAGE_DUMP_DIR"))
350
351 # the robot dance
352 target = OERuntimeTestContextExecutor.getTarget(
353 d.getVar("TEST_TARGET"), logger, d.getVar("TEST_TARGET_IP"),
354 d.getVar("TEST_SERVER_IP"), **target_kwargs)
355
356 # test context
357 tc = OERuntimeTestContext(td, logger, target, host_dumper,
358 image_packages, extract_dir)
359
360 # Load tests before starting the target
361 test_paths = get_runtime_paths(d)
362 test_modules = d.getVar('TEST_SUITES').split()
363 if not test_modules:
364 bb.fatal('Empty test suite, please verify TEST_SUITES variable')
365
366 tc.loadTests(test_paths, modules=test_modules)
367
368 suitecases = getSuiteCases(tc.suites)
369 if not suitecases:
370 bb.fatal('Empty test suite, please verify TEST_SUITES variable')
371 else:
372 bb.debug(2, 'test suites:\n\t%s' % '\n\t'.join([str(c) for c in suitecases]))
373
374 package_extraction(d, tc.suites)
375
376 results = None
377 complete = False
378 orig_sigterm_handler = signal.signal(signal.SIGTERM, sigterm_exception)
379 try:
380 # We need to check if runqemu ends unexpectedly
381 # or if the worker send us a SIGTERM
382 tc.target.start(params=d.getVar("TEST_QEMUPARAMS"), runqemuparams=d.getVar("TEST_RUNQEMUPARAMS"))
383 import threading
384 try:
385 threading.Timer(int(d.getVar("TEST_OVERALL_TIMEOUT")), handle_test_timeout, (int(d.getVar("TEST_OVERALL_TIMEOUT")),)).start()
386 except ValueError:
387 pass
388 results = tc.runTests()
389 complete = True
390 except (KeyboardInterrupt, BlockingIOError) as err:
391 if isinstance(err, KeyboardInterrupt):
392 bb.error('testimage interrupted, shutting down...')
393 else:
394 bb.error('runqemu failed, shutting down...')
395 if results:
396 results.stop()
397 results = tc.results
398 finally:
399 signal.signal(signal.SIGTERM, orig_sigterm_handler)
400 tc.target.stop()
401
402 # Show results (if we have them)
403 if results:
404 configuration = get_testimage_configuration(d, 'runtime', machine)
405 results.logDetails(get_testimage_json_result_dir(d),
406 configuration,
407 get_testimage_result_id(configuration),
408 dump_streams=d.getVar('TESTREPORT_FULLLOGS'))
409 results.logSummary(pn)
410
411 # Copy additional logs to tmp/log/oeqa so it's easier to find them
412 targetdir = os.path.join(get_testimage_json_result_dir(d), d.getVar("PN"))
413 os.makedirs(targetdir, exist_ok=True)
414 os.symlink(bootlog, os.path.join(targetdir, os.path.basename(bootlog)))
415 os.symlink(d.getVar("BB_LOGFILE"), os.path.join(targetdir, os.path.basename(d.getVar("BB_LOGFILE") + "." + d.getVar('DATETIME'))))
416
417 if not results or not complete:
418 bb.fatal('%s - FAILED - tests were interrupted during execution, check the logs in %s' % (pn, d.getVar("LOG_DIR")), forcelog=True)
419 if not results.wasSuccessful():
420 bb.fatal('%s - FAILED - also check the logs in %s' % (pn, d.getVar("LOG_DIR")), forcelog=True)
421
422def get_runtime_paths(d):
423 """
424 Returns a list of paths where runtime test must reside.
425
426 Runtime tests are expected in <LAYER_DIR>/lib/oeqa/runtime/cases/
427 """
428 paths = []
429
430 for layer in d.getVar('BBLAYERS').split():
431 path = os.path.join(layer, 'lib/oeqa/runtime/cases')
432 if os.path.isdir(path):
433 paths.append(path)
434 return paths
435
436def create_index(arg):
437 import subprocess
438
439 index_cmd = arg
440 try:
441 bb.note("Executing '%s' ..." % index_cmd)
442 result = subprocess.check_output(index_cmd,
443 stderr=subprocess.STDOUT,
444 shell=True)
445 result = result.decode('utf-8')
446 except subprocess.CalledProcessError as e:
447 return("Index creation command '%s' failed with return code "
448 '%d:\n%s' % (e.cmd, e.returncode, e.output.decode("utf-8")))
449 if result:
450 bb.note(result)
451 return None
452
453def create_rpm_index(d):
454 import glob
455 # Index RPMs
456 rpm_createrepo = bb.utils.which(os.getenv('PATH'), "createrepo_c")
457 index_cmds = []
458 archs = (d.getVar('ALL_MULTILIB_PACKAGE_ARCHS') or '').replace('-', '_')
459
460 for arch in archs.split():
461 rpm_dir = os.path.join(d.getVar('DEPLOY_DIR_RPM'), arch)
462 idx_path = os.path.join(d.getVar('WORKDIR'), 'oe-testimage-repo', arch)
463
464 if not os.path.isdir(rpm_dir):
465 continue
466
467 lockfilename = os.path.join(d.getVar('DEPLOY_DIR_RPM'), 'rpm.lock')
468 lf = bb.utils.lockfile(lockfilename, False)
469 oe.path.copyhardlinktree(rpm_dir, idx_path)
470 # Full indexes overload a 256MB image so reduce the number of rpms
471 # in the feed by filtering to specific packages needed by the tests.
472 package_list = glob.glob(idx_path + "*/*.rpm")
473
474 for pkg in package_list:
475 if os.path.basename(pkg).startswith(("curl-ptest")):
476 bb.utils.remove(pkg)
477
478 if not os.path.basename(pkg).startswith(("rpm", "run-postinsts", "busybox", "bash", "update-alternatives", "libc6", "curl", "musl")):
479 bb.utils.remove(pkg)
480
481 bb.utils.unlockfile(lf)
482 cmd = '%s --update -q %s' % (rpm_createrepo, idx_path)
483
484 # Create repodata
485 result = create_index(cmd)
486 if result:
487 bb.fatal('%s' % ('\n'.join(result)))
488
489def package_extraction(d, test_suites):
490 from oeqa.utils.package_manager import find_packages_to_extract
491 from oeqa.utils.package_manager import extract_packages
492
493 bb.utils.remove(d.getVar("TEST_NEEDED_PACKAGES_DIR"), recurse=True)
494 packages = find_packages_to_extract(test_suites)
495 if packages:
496 bb.utils.mkdirhier(d.getVar("TEST_INSTALL_TMP_DIR"))
497 bb.utils.mkdirhier(d.getVar("TEST_PACKAGED_DIR"))
498 bb.utils.mkdirhier(d.getVar("TEST_EXTRACTED_DIR"))
499 extract_packages(d, packages)
500
501testimage_main[vardepsexclude] += "BB_ORIGENV DATETIME"
502
503python () {
504 if oe.types.boolean(d.getVar("TESTIMAGE_AUTO") or "False"):
505 bb.build.addtask("testimage", "do_build", "do_image_complete", d)
506}
507
508inherit testsdk
diff --git a/meta/classes/testsdk.bbclass b/meta/classes/testsdk.bbclass
deleted file mode 100644
index fd82e6ef41..0000000000
--- a/meta/classes/testsdk.bbclass
+++ /dev/null
@@ -1,52 +0,0 @@
1# Copyright (C) 2013 - 2016 Intel Corporation
2#
3# SPDX-License-Identifier: MIT
4
5# testsdk.bbclass enables testing for SDK and Extensible SDK
6#
7# To run SDK tests, run the commands:
8# $ bitbake <image-name> -c populate_sdk
9# $ bitbake <image-name> -c testsdk
10#
11# To run eSDK tests, run the commands:
12# $ bitbake <image-name> -c populate_sdk_ext
13# $ bitbake <image-name> -c testsdkext
14#
15# where "<image-name>" is an image like core-image-sato.
16
17TESTSDK_CLASS_NAME ?= "oeqa.sdk.testsdk.TestSDK"
18TESTSDKEXT_CLASS_NAME ?= "oeqa.sdkext.testsdk.TestSDKExt"
19
20def import_and_run(name, d):
21 import importlib
22
23 class_name = d.getVar(name)
24 if class_name:
25 module, cls = class_name.rsplit('.', 1)
26 m = importlib.import_module(module)
27 c = getattr(m, cls)()
28 c.run(d)
29 else:
30 bb.warn('No tests were run because %s did not define a class' % name)
31
32import_and_run[vardepsexclude] = "DATETIME BB_ORIGENV"
33
34python do_testsdk() {
35 import_and_run('TESTSDK_CLASS_NAME', d)
36}
37addtask testsdk
38do_testsdk[nostamp] = "1"
39do_testsdk[network] = "1"
40
41python do_testsdkext() {
42 import_and_run('TESTSDKEXT_CLASS_NAME', d)
43}
44addtask testsdkext
45do_testsdkext[nostamp] = "1"
46do_testsdkext[network] = "1"
47
48python () {
49 if oe.types.boolean(d.getVar("TESTIMAGE_AUTO") or "False"):
50 bb.build.addtask("testsdk", None, "do_populate_sdk", d)
51 bb.build.addtask("testsdkext", None, "do_populate_sdk_ext", d)
52}
diff --git a/meta/classes/texinfo.bbclass b/meta/classes/texinfo.bbclass
deleted file mode 100644
index 380247faf5..0000000000
--- a/meta/classes/texinfo.bbclass
+++ /dev/null
@@ -1,24 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# This class is inherited by recipes whose upstream packages invoke the
8# texinfo utilities at build-time. Native and cross recipes are made to use the
9# dummy scripts provided by texinfo-dummy-native, for improved performance.
10# Target architecture recipes use the genuine Texinfo utilities. By default,
11# they use the Texinfo utilities on the host system. If you want to use the
12# Texinfo recipe, you can remove texinfo-native from ASSUME_PROVIDED and
13# makeinfo from SANITY_REQUIRED_UTILITIES.
14
15TEXDEP = "${@bb.utils.contains('DISTRO_FEATURES', 'api-documentation', 'texinfo-replacement-native', 'texinfo-dummy-native', d)}"
16TEXDEP:class-native = "texinfo-dummy-native"
17TEXDEP:class-cross = "texinfo-dummy-native"
18TEXDEP:class-crosssdk = "texinfo-dummy-native"
19TEXDEP:class-cross-canadian = "texinfo-dummy-native"
20DEPENDS:append = " ${TEXDEP}"
21
22# libtool-cross doesn't inherit cross
23TEXDEP:pn-libtool-cross = "texinfo-dummy-native"
24
diff --git a/meta/classes/toolchain-scripts-base.bbclass b/meta/classes/toolchain-scripts-base.bbclass
deleted file mode 100644
index d24a986e02..0000000000
--- a/meta/classes/toolchain-scripts-base.bbclass
+++ /dev/null
@@ -1,17 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7#This function create a version information file
8toolchain_create_sdk_version () {
9 local versionfile=$1
10 rm -f $versionfile
11 touch $versionfile
12 echo 'Distro: ${DISTRO}' >> $versionfile
13 echo 'Distro Version: ${DISTRO_VERSION}' >> $versionfile
14 echo 'Metadata Revision: ${METADATA_REVISION}' >> $versionfile
15 echo 'Timestamp: ${DATETIME}' >> $versionfile
16}
17toolchain_create_sdk_version[vardepsexclude] = "DATETIME"
diff --git a/meta/classes/toolchain-scripts.bbclass b/meta/classes/toolchain-scripts.bbclass
deleted file mode 100644
index 3cc823fe63..0000000000
--- a/meta/classes/toolchain-scripts.bbclass
+++ /dev/null
@@ -1,236 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7inherit toolchain-scripts-base siteinfo kernel-arch
8
9# We want to be able to change the value of MULTIMACH_TARGET_SYS, because it
10# doesn't always match our expectations... but we default to the stock value
11REAL_MULTIMACH_TARGET_SYS ?= "${MULTIMACH_TARGET_SYS}"
12TARGET_CC_ARCH:append:libc-musl = " -mmusl"
13
14# default debug prefix map isn't valid in the SDK
15DEBUG_PREFIX_MAP = ""
16
17EXPORT_SDK_PS1 = "${@ 'export PS1=\\"%s\\"' % d.getVar('SDK_PS1') if d.getVar('SDK_PS1') else ''}"
18
19# This function creates an environment-setup-script for use in a deployable SDK
20toolchain_create_sdk_env_script () {
21 # Create environment setup script. Remember that $SDKTARGETSYSROOT should
22 # only be expanded on the target at runtime.
23 base_sbindir=${10:-${base_sbindir_nativesdk}}
24 base_bindir=${9:-${base_bindir_nativesdk}}
25 sbindir=${8:-${sbindir_nativesdk}}
26 sdkpathnative=${7:-${SDKPATHNATIVE}}
27 prefix=${6:-${prefix_nativesdk}}
28 bindir=${5:-${bindir_nativesdk}}
29 libdir=${4:-${libdir}}
30 sysroot=${3:-${SDKTARGETSYSROOT}}
31 multimach_target_sys=${2:-${REAL_MULTIMACH_TARGET_SYS}}
32 script=${1:-${SDK_OUTPUT}/${SDKPATH}/environment-setup-$multimach_target_sys}
33 rm -f $script
34 touch $script
35
36 echo '# Check for LD_LIBRARY_PATH being set, which can break SDK and generally is a bad practice' >> $script
37 echo '# http://tldp.org/HOWTO/Program-Library-HOWTO/shared-libraries.html#AEN80' >> $script
38 echo '# http://xahlee.info/UnixResource_dir/_/ldpath.html' >> $script
39 echo '# Only disable this check if you are absolutely know what you are doing!' >> $script
40 echo 'if [ ! -z "$LD_LIBRARY_PATH" ]; then' >> $script
41 echo " echo \"Your environment is misconfigured, you probably need to 'unset LD_LIBRARY_PATH'\"" >> $script
42 echo " echo \"but please check why this was set in the first place and that it's safe to unset.\"" >> $script
43 echo ' echo "The SDK will not operate correctly in most cases when LD_LIBRARY_PATH is set."' >> $script
44 echo ' echo "For more references see:"' >> $script
45 echo ' echo " http://tldp.org/HOWTO/Program-Library-HOWTO/shared-libraries.html#AEN80"' >> $script
46 echo ' echo " http://xahlee.info/UnixResource_dir/_/ldpath.html"' >> $script
47 echo ' return 1' >> $script
48 echo 'fi' >> $script
49
50 echo "${EXPORT_SDK_PS1}" >> $script
51 echo 'export SDKTARGETSYSROOT='"$sysroot" >> $script
52 EXTRAPATH=""
53 for i in ${CANADIANEXTRAOS}; do
54 EXTRAPATH="$EXTRAPATH:$sdkpathnative$bindir/${TARGET_ARCH}${TARGET_VENDOR}-$i"
55 done
56 echo "export PATH=$sdkpathnative$bindir:$sdkpathnative$sbindir:$sdkpathnative$base_bindir:$sdkpathnative$base_sbindir:$sdkpathnative$bindir/../${HOST_SYS}/bin:$sdkpathnative$bindir/${TARGET_SYS}"$EXTRAPATH':$PATH' >> $script
57 echo 'export PKG_CONFIG_SYSROOT_DIR=$SDKTARGETSYSROOT' >> $script
58 echo 'export PKG_CONFIG_PATH=$SDKTARGETSYSROOT'"$libdir"'/pkgconfig:$SDKTARGETSYSROOT'"$prefix"'/share/pkgconfig' >> $script
59 echo 'export CONFIG_SITE=${SDKPATH}/site-config-'"${multimach_target_sys}" >> $script
60 echo "export OECORE_NATIVE_SYSROOT=\"$sdkpathnative\"" >> $script
61 echo 'export OECORE_TARGET_SYSROOT="$SDKTARGETSYSROOT"' >> $script
62 echo "export OECORE_ACLOCAL_OPTS=\"-I $sdkpathnative/usr/share/aclocal\"" >> $script
63 echo 'export OECORE_BASELIB="${baselib}"' >> $script
64 echo 'export OECORE_TARGET_ARCH="${TARGET_ARCH}"' >>$script
65 echo 'export OECORE_TARGET_OS="${TARGET_OS}"' >>$script
66
67 echo 'unset command_not_found_handle' >> $script
68
69 toolchain_shared_env_script
70}
71
72# This function creates an environment-setup-script in B which enables
73# a OE-core IDE to integrate with the build tree
74# Caller must ensure CONFIG_SITE is setup
75toolchain_create_tree_env_script () {
76 script=${B}/environment-setup-${REAL_MULTIMACH_TARGET_SYS}
77 rm -f $script
78 touch $script
79 echo 'standalone_sysroot_target="${STAGING_DIR}/${MACHINE}"' >> $script
80 echo 'standalone_sysroot_native="${STAGING_DIR}/${BUILD_ARCH}"' >> $script
81 echo 'orig=`pwd`; cd ${COREBASE}; . ./oe-init-build-env ${TOPDIR}; cd $orig' >> $script
82 echo 'export PATH=$standalone_sysroot_native/${bindir_native}:$standalone_sysroot_native/${bindir_native}/${TARGET_SYS}:$PATH' >> $script
83 echo 'export PKG_CONFIG_SYSROOT_DIR=$standalone_sysroot_target' >> $script
84 echo 'export PKG_CONFIG_PATH=$standalone_sysroot_target'"$libdir"'/pkgconfig:$standalone_sysroot_target'"$prefix"'/share/pkgconfig' >> $script
85 echo 'export CONFIG_SITE="${CONFIG_SITE}"' >> $script
86 echo 'export SDKTARGETSYSROOT=$standalone_sysroot_target' >> $script
87 echo 'export OECORE_NATIVE_SYSROOT=$standalone_sysroot_native' >> $script
88 echo 'export OECORE_TARGET_SYSROOT=$standalone_sysroot_target' >> $script
89 echo 'export OECORE_ACLOCAL_OPTS="-I $standalone_sysroot_native/usr/share/aclocal"' >> $script
90 echo 'export OECORE_BASELIB="${baselib}"' >> $script
91 echo 'export OECORE_TARGET_ARCH="${TARGET_ARCH}"' >>$script
92 echo 'export OECORE_TARGET_OS="${TARGET_OS}"' >>$script
93
94 toolchain_shared_env_script
95
96 cat >> $script <<EOF
97
98if [ -d "\$OECORE_NATIVE_SYSROOT/${datadir}/post-relocate-setup.d/" ]; then
99 for s in \$OECORE_NATIVE_SYSROOT/${datadir}/post-relocate-setup.d/*; do
100 if [ ! -x \$s ]; then
101 continue
102 fi
103 \$s "\$1"
104 status=\$?
105 if [ \$status != 0 ]; then
106 echo "post-relocate command \"\$s \$1\" failed with status \$status" >&2
107 exit \$status
108 fi
109 done
110fi
111EOF
112}
113
114toolchain_shared_env_script () {
115 echo 'export CC="${TARGET_PREFIX}gcc ${TARGET_CC_ARCH} --sysroot=$SDKTARGETSYSROOT"' >> $script
116 echo 'export CXX="${TARGET_PREFIX}g++ ${TARGET_CC_ARCH} --sysroot=$SDKTARGETSYSROOT"' >> $script
117 echo 'export CPP="${TARGET_PREFIX}gcc -E ${TARGET_CC_ARCH} --sysroot=$SDKTARGETSYSROOT"' >> $script
118 echo 'export AS="${TARGET_PREFIX}as ${TARGET_AS_ARCH}"' >> $script
119 echo 'export LD="${TARGET_PREFIX}ld ${TARGET_LD_ARCH} --sysroot=$SDKTARGETSYSROOT"' >> $script
120 echo 'export GDB=${TARGET_PREFIX}gdb' >> $script
121 echo 'export STRIP=${TARGET_PREFIX}strip' >> $script
122 echo 'export RANLIB=${TARGET_PREFIX}ranlib' >> $script
123 echo 'export OBJCOPY=${TARGET_PREFIX}objcopy' >> $script
124 echo 'export OBJDUMP=${TARGET_PREFIX}objdump' >> $script
125 echo 'export READELF=${TARGET_PREFIX}readelf' >> $script
126 echo 'export AR=${TARGET_PREFIX}ar' >> $script
127 echo 'export NM=${TARGET_PREFIX}nm' >> $script
128 echo 'export M4=m4' >> $script
129 echo 'export TARGET_PREFIX=${TARGET_PREFIX}' >> $script
130 echo 'export CONFIGURE_FLAGS="--target=${TARGET_SYS} --host=${TARGET_SYS} --build=${SDK_ARCH}-linux --with-libtool-sysroot=$SDKTARGETSYSROOT"' >> $script
131 echo 'export CFLAGS="${TARGET_CFLAGS}"' >> $script
132 echo 'export CXXFLAGS="${TARGET_CXXFLAGS}"' >> $script
133 echo 'export LDFLAGS="${TARGET_LDFLAGS}"' >> $script
134 echo 'export CPPFLAGS="${TARGET_CPPFLAGS}"' >> $script
135 echo 'export KCFLAGS="--sysroot=$SDKTARGETSYSROOT"' >> $script
136 echo 'export OECORE_DISTRO_VERSION="${DISTRO_VERSION}"' >> $script
137 echo 'export OECORE_SDK_VERSION="${SDK_VERSION}"' >> $script
138 echo 'export ARCH=${ARCH}' >> $script
139 echo 'export CROSS_COMPILE=${TARGET_PREFIX}' >> $script
140 echo 'export OECORE_TUNE_CCARGS="${TUNE_CCARGS}"' >> $script
141
142 cat >> $script <<EOF
143
144# Append environment subscripts
145if [ -d "\$OECORE_TARGET_SYSROOT/environment-setup.d" ]; then
146 for envfile in \$OECORE_TARGET_SYSROOT/environment-setup.d/*.sh; do
147 . \$envfile
148 done
149fi
150if [ -d "\$OECORE_NATIVE_SYSROOT/environment-setup.d" ]; then
151 for envfile in \$OECORE_NATIVE_SYSROOT/environment-setup.d/*.sh; do
152 . \$envfile
153 done
154fi
155EOF
156}
157
158toolchain_create_post_relocate_script() {
159 relocate_script=$1
160 env_dir=$2
161 rm -f $relocate_script
162 touch $relocate_script
163
164 cat >> $relocate_script <<EOF
165if [ -d "${SDKPATHNATIVE}/post-relocate-setup.d/" ]; then
166 # Source top-level SDK env scripts in case they are needed for the relocate
167 # scripts.
168 for env_setup_script in ${env_dir}/environment-setup-*; do
169 . \$env_setup_script
170 status=\$?
171 if [ \$status != 0 ]; then
172 echo "\$0: Failed to source \$env_setup_script with status \$status"
173 exit \$status
174 fi
175
176 for s in ${SDKPATHNATIVE}/post-relocate-setup.d/*; do
177 if [ ! -x \$s ]; then
178 continue
179 fi
180 \$s "\$1"
181 status=\$?
182 if [ \$status != 0 ]; then
183 echo "post-relocate command \"\$s \$1\" failed with status \$status" >&2
184 exit \$status
185 fi
186 done
187 done
188 rm -rf "${SDKPATHNATIVE}/post-relocate-setup.d"
189fi
190EOF
191}
192
193#we get the cached site config in the runtime
194TOOLCHAIN_CONFIGSITE_NOCACHE = "${@' '.join(siteinfo_get_files(d)[0])}"
195TOOLCHAIN_CONFIGSITE_SYSROOTCACHE = "${STAGING_DIR}/${MLPREFIX}${MACHINE}/${target_datadir}/${TARGET_SYS}_config_site.d"
196TOOLCHAIN_NEED_CONFIGSITE_CACHE ??= "virtual/${MLPREFIX}libc ncurses"
197DEPENDS += "${TOOLCHAIN_NEED_CONFIGSITE_CACHE}"
198
199#This function create a site config file
200toolchain_create_sdk_siteconfig () {
201 local siteconfig=$1
202
203 rm -f $siteconfig
204 touch $siteconfig
205
206 for sitefile in ${TOOLCHAIN_CONFIGSITE_NOCACHE} ; do
207 cat $sitefile >> $siteconfig
208 done
209
210 #get cached site config
211 for sitefile in ${TOOLCHAIN_NEED_CONFIGSITE_CACHE}; do
212 # Resolve virtual/* names to the real recipe name using sysroot-providers info
213 case $sitefile in virtual/*)
214 sitefile=`echo $sitefile | tr / _`
215 sitefile=`cat ${STAGING_DIR_TARGET}/sysroot-providers/$sitefile`
216 esac
217
218 if [ -r ${TOOLCHAIN_CONFIGSITE_SYSROOTCACHE}/${sitefile}_config ]; then
219 cat ${TOOLCHAIN_CONFIGSITE_SYSROOTCACHE}/${sitefile}_config >> $siteconfig
220 fi
221 done
222}
223# The immediate expansion above can result in unwanted path dependencies here
224toolchain_create_sdk_siteconfig[vardepsexclude] = "TOOLCHAIN_CONFIGSITE_SYSROOTCACHE"
225
226python __anonymous () {
227 import oe.classextend
228 deps = ""
229 for dep in (d.getVar('TOOLCHAIN_NEED_CONFIGSITE_CACHE') or "").split():
230 deps += " %s:do_populate_sysroot" % dep
231 for variant in (d.getVar('MULTILIB_VARIANTS') or "").split():
232 clsextend = oe.classextend.ClassExtender(variant, d)
233 newdep = clsextend.extend_name(dep)
234 deps += " %s:do_populate_sysroot" % newdep
235 d.appendVarFlag('do_configure', 'depends', deps)
236}
diff --git a/meta/classes/uboot-config.bbclass b/meta/classes/uboot-config.bbclass
deleted file mode 100644
index 9889d026fa..0000000000
--- a/meta/classes/uboot-config.bbclass
+++ /dev/null
@@ -1,133 +0,0 @@
1# Handle U-Boot config for a machine
2#
3# The format to specify it, in the machine, is:
4#
5# UBOOT_CONFIG ??= <default>
6# UBOOT_CONFIG[foo] = "config,images,binary"
7#
8# or
9#
10# UBOOT_MACHINE = "config"
11#
12# Copyright 2013, 2014 (C) O.S. Systems Software LTDA.
13#
14# SPDX-License-Identifier: MIT
15
16
17def removesuffix(s, suffix):
18 if suffix and s.endswith(suffix):
19 return s[:-len(suffix)]
20 return s
21
22# Some versions of u-boot use .bin and others use .img. By default use .bin
23# but enable individual recipes to change this value.
24UBOOT_SUFFIX ??= "bin"
25UBOOT_BINARY ?= "u-boot.${UBOOT_SUFFIX}"
26UBOOT_BINARYNAME ?= "${@os.path.splitext(d.getVar("UBOOT_BINARY"))[0]}"
27UBOOT_IMAGE ?= "${UBOOT_BINARYNAME}-${MACHINE}-${PV}-${PR}.${UBOOT_SUFFIX}"
28UBOOT_SYMLINK ?= "${UBOOT_BINARYNAME}-${MACHINE}.${UBOOT_SUFFIX}"
29UBOOT_MAKE_TARGET ?= "all"
30
31# Output the ELF generated. Some platforms can use the ELF file and directly
32# load it (JTAG booting, QEMU) additionally the ELF can be used for debugging
33# purposes.
34UBOOT_ELF ?= ""
35UBOOT_ELF_SUFFIX ?= "elf"
36UBOOT_ELF_IMAGE ?= "u-boot-${MACHINE}-${PV}-${PR}.${UBOOT_ELF_SUFFIX}"
37UBOOT_ELF_BINARY ?= "u-boot.${UBOOT_ELF_SUFFIX}"
38UBOOT_ELF_SYMLINK ?= "u-boot-${MACHINE}.${UBOOT_ELF_SUFFIX}"
39
40# Some versions of u-boot build an SPL (Second Program Loader) image that
41# should be packaged along with the u-boot binary as well as placed in the
42# deploy directory. For those versions they can set the following variables
43# to allow packaging the SPL.
44SPL_SUFFIX ?= ""
45SPL_BINARY ?= ""
46SPL_DELIMITER ?= "${@'.' if d.getVar("SPL_SUFFIX") else ''}"
47SPL_BINARYFILE ?= "${@os.path.basename(d.getVar("SPL_BINARY"))}"
48SPL_BINARYNAME ?= "${@removesuffix(d.getVar("SPL_BINARYFILE"), "." + d.getVar("SPL_SUFFIX"))}"
49SPL_IMAGE ?= "${SPL_BINARYNAME}-${MACHINE}-${PV}-${PR}${SPL_DELIMITER}${SPL_SUFFIX}"
50SPL_SYMLINK ?= "${SPL_BINARYNAME}-${MACHINE}${SPL_DELIMITER}${SPL_SUFFIX}"
51
52# Additional environment variables or a script can be installed alongside
53# u-boot to be used automatically on boot. This file, typically 'uEnv.txt'
54# or 'boot.scr', should be packaged along with u-boot as well as placed in the
55# deploy directory. Machine configurations needing one of these files should
56# include it in the SRC_URI and set the UBOOT_ENV parameter.
57UBOOT_ENV_SUFFIX ?= "txt"
58UBOOT_ENV ?= ""
59UBOOT_ENV_SRC_SUFFIX ?= "cmd"
60UBOOT_ENV_SRC ?= "${UBOOT_ENV}.${UBOOT_ENV_SRC_SUFFIX}"
61UBOOT_ENV_BINARY ?= "${UBOOT_ENV}.${UBOOT_ENV_SUFFIX}"
62UBOOT_ENV_IMAGE ?= "${UBOOT_ENV}-${MACHINE}-${PV}-${PR}.${UBOOT_ENV_SUFFIX}"
63UBOOT_ENV_SYMLINK ?= "${UBOOT_ENV}-${MACHINE}.${UBOOT_ENV_SUFFIX}"
64
65# Default name of u-boot initial env, but enable individual recipes to change
66# this value.
67UBOOT_INITIAL_ENV ?= "${PN}-initial-env"
68
69# U-Boot EXTLINUX variables. U-Boot searches for /boot/extlinux/extlinux.conf
70# to find EXTLINUX conf file.
71UBOOT_EXTLINUX_INSTALL_DIR ?= "/boot/extlinux"
72UBOOT_EXTLINUX_CONF_NAME ?= "extlinux.conf"
73UBOOT_EXTLINUX_SYMLINK ?= "${UBOOT_EXTLINUX_CONF_NAME}-${MACHINE}-${PR}"
74
75# Options for the device tree compiler passed to mkimage '-D' feature:
76UBOOT_MKIMAGE_DTCOPTS ??= ""
77SPL_MKIMAGE_DTCOPTS ??= ""
78
79# mkimage command
80UBOOT_MKIMAGE ?= "uboot-mkimage"
81UBOOT_MKIMAGE_SIGN ?= "${UBOOT_MKIMAGE}"
82
83# Arguments passed to mkimage for signing
84UBOOT_MKIMAGE_SIGN_ARGS ?= ""
85SPL_MKIMAGE_SIGN_ARGS ?= ""
86
87# Options to deploy the u-boot device tree
88UBOOT_DTB ?= ""
89UBOOT_DTB_BINARY ??= ""
90
91python () {
92 ubootmachine = d.getVar("UBOOT_MACHINE")
93 ubootconfigflags = d.getVarFlags('UBOOT_CONFIG')
94 ubootbinary = d.getVar('UBOOT_BINARY')
95 ubootbinaries = d.getVar('UBOOT_BINARIES')
96 # The "doc" varflag is special, we don't want to see it here
97 ubootconfigflags.pop('doc', None)
98 ubootconfig = (d.getVar('UBOOT_CONFIG') or "").split()
99
100 if not ubootmachine and not ubootconfig:
101 PN = d.getVar("PN")
102 FILE = os.path.basename(d.getVar("FILE"))
103 bb.debug(1, "To build %s, see %s for instructions on \
104 setting up your machine config" % (PN, FILE))
105 raise bb.parse.SkipRecipe("Either UBOOT_MACHINE or UBOOT_CONFIG must be set in the %s machine configuration." % d.getVar("MACHINE"))
106
107 if ubootmachine and ubootconfig:
108 raise bb.parse.SkipRecipe("You cannot use UBOOT_MACHINE and UBOOT_CONFIG at the same time.")
109
110 if ubootconfigflags and ubootbinaries:
111 raise bb.parse.SkipRecipe("You cannot use UBOOT_BINARIES as it is internal to uboot_config.bbclass.")
112
113 if len(ubootconfig) > 0:
114 for config in ubootconfig:
115 for f, v in ubootconfigflags.items():
116 if config == f:
117 items = v.split(',')
118 if items[0] and len(items) > 3:
119 raise bb.parse.SkipRecipe('Only config,images,binary can be specified!')
120 d.appendVar('UBOOT_MACHINE', ' ' + items[0])
121 # IMAGE_FSTYPES appending
122 if len(items) > 1 and items[1]:
123 bb.debug(1, "Appending '%s' to IMAGE_FSTYPES." % items[1])
124 d.appendVar('IMAGE_FSTYPES', ' ' + items[1])
125 if len(items) > 2 and items[2]:
126 bb.debug(1, "Appending '%s' to UBOOT_BINARIES." % items[2])
127 d.appendVar('UBOOT_BINARIES', ' ' + items[2])
128 else:
129 bb.debug(1, "Appending '%s' to UBOOT_BINARIES." % ubootbinary)
130 d.appendVar('UBOOT_BINARIES', ' ' + ubootbinary)
131 return
132 raise bb.parse.SkipRecipe("The selected UBOOT_CONFIG key %s has no match in %s." % (ubootconfig, ubootconfigflags.keys()))
133}
diff --git a/meta/classes/uboot-extlinux-config.bbclass b/meta/classes/uboot-extlinux-config.bbclass
deleted file mode 100644
index 86a7d30ca0..0000000000
--- a/meta/classes/uboot-extlinux-config.bbclass
+++ /dev/null
@@ -1,158 +0,0 @@
1# uboot-extlinux-config.bbclass
2#
3# This class allow the extlinux.conf generation for U-Boot use. The
4# U-Boot support for it is given to allow the Generic Distribution
5# Configuration specification use by OpenEmbedded-based products.
6#
7# External variables:
8#
9# UBOOT_EXTLINUX_CONSOLE - Set to "console=ttyX" to change kernel boot
10# default console.
11# UBOOT_EXTLINUX_LABELS - A list of targets for the automatic config.
12# UBOOT_EXTLINUX_KERNEL_ARGS - Add additional kernel arguments.
13# UBOOT_EXTLINUX_KERNEL_IMAGE - Kernel image name.
14# UBOOT_EXTLINUX_FDTDIR - Device tree directory.
15# UBOOT_EXTLINUX_FDT - Device tree file.
16# UBOOT_EXTLINUX_INITRD - Indicates a list of filesystem images to
17# concatenate and use as an initrd (optional).
18# UBOOT_EXTLINUX_MENU_DESCRIPTION - Name to use as description.
19# UBOOT_EXTLINUX_ROOT - Root kernel cmdline.
20# UBOOT_EXTLINUX_TIMEOUT - Timeout before DEFAULT selection is made.
21# Measured in 1/10 of a second.
22# UBOOT_EXTLINUX_DEFAULT_LABEL - Target to be selected by default after
23# the timeout period
24#
25# If there's only one label system will boot automatically and menu won't be
26# created. If you want to use more than one labels, e.g linux and alternate,
27# use overrides to set menu description, console and others variables.
28#
29# Ex:
30#
31# UBOOT_EXTLINUX_LABELS ??= "default fallback"
32#
33# UBOOT_EXTLINUX_DEFAULT_LABEL ??= "Linux Default"
34# UBOOT_EXTLINUX_TIMEOUT ??= "30"
35#
36# UBOOT_EXTLINUX_KERNEL_IMAGE_default ??= "../zImage"
37# UBOOT_EXTLINUX_MENU_DESCRIPTION_default ??= "Linux Default"
38#
39# UBOOT_EXTLINUX_KERNEL_IMAGE_fallback ??= "../zImage-fallback"
40# UBOOT_EXTLINUX_MENU_DESCRIPTION_fallback ??= "Linux Fallback"
41#
42# Results:
43#
44# menu title Select the boot mode
45# TIMEOUT 30
46# DEFAULT Linux Default
47# LABEL Linux Default
48# KERNEL ../zImage
49# FDTDIR ../
50# APPEND root=/dev/mmcblk2p2 rootwait rw console=${console}
51# LABEL Linux Fallback
52# KERNEL ../zImage-fallback
53# FDTDIR ../
54# APPEND root=/dev/mmcblk2p2 rootwait rw console=${console}
55#
56# Copyright (C) 2016, O.S. Systems Software LTDA. All Rights Reserved
57# SPDX-License-Identifier: MIT
58#
59# The kernel has an internal default console, which you can override with
60# a console=...some_tty...
61UBOOT_EXTLINUX_CONSOLE ??= "console=${console},${baudrate}"
62UBOOT_EXTLINUX_LABELS ??= "linux"
63UBOOT_EXTLINUX_FDT ??= ""
64UBOOT_EXTLINUX_FDTDIR ??= "../"
65UBOOT_EXTLINUX_KERNEL_IMAGE ??= "../${KERNEL_IMAGETYPE}"
66UBOOT_EXTLINUX_KERNEL_ARGS ??= "rootwait rw"
67UBOOT_EXTLINUX_MENU_DESCRIPTION:linux ??= "${DISTRO_NAME}"
68
69UBOOT_EXTLINUX_CONFIG = "${B}/extlinux.conf"
70
71python do_create_extlinux_config() {
72 if d.getVar("UBOOT_EXTLINUX") != "1":
73 return
74
75 if not d.getVar('WORKDIR'):
76 bb.error("WORKDIR not defined, unable to package")
77
78 labels = d.getVar('UBOOT_EXTLINUX_LABELS')
79 if not labels:
80 bb.fatal("UBOOT_EXTLINUX_LABELS not defined, nothing to do")
81
82 if not labels.strip():
83 bb.fatal("No labels, nothing to do")
84
85 cfile = d.getVar('UBOOT_EXTLINUX_CONFIG')
86 if not cfile:
87 bb.fatal('Unable to read UBOOT_EXTLINUX_CONFIG')
88
89 localdata = bb.data.createCopy(d)
90
91 try:
92 with open(cfile, 'w') as cfgfile:
93 cfgfile.write('# Generic Distro Configuration file generated by OpenEmbedded\n')
94
95 if len(labels.split()) > 1:
96 cfgfile.write('menu title Select the boot mode\n')
97
98 timeout = localdata.getVar('UBOOT_EXTLINUX_TIMEOUT')
99 if timeout:
100 cfgfile.write('TIMEOUT %s\n' % (timeout))
101
102 if len(labels.split()) > 1:
103 default = localdata.getVar('UBOOT_EXTLINUX_DEFAULT_LABEL')
104 if default:
105 cfgfile.write('DEFAULT %s\n' % (default))
106
107 # Need to deconflict the labels with existing overrides
108 label_overrides = labels.split()
109 default_overrides = localdata.getVar('OVERRIDES').split(':')
110 # We're keeping all the existing overrides that aren't used as a label
111 # an override for that label will be added back in while we're processing that label
112 keep_overrides = list(filter(lambda x: x not in label_overrides, default_overrides))
113
114 for label in labels.split():
115
116 localdata.setVar('OVERRIDES', ':'.join(keep_overrides + [label]))
117
118 extlinux_console = localdata.getVar('UBOOT_EXTLINUX_CONSOLE')
119
120 menu_description = localdata.getVar('UBOOT_EXTLINUX_MENU_DESCRIPTION')
121 if not menu_description:
122 menu_description = label
123
124 root = localdata.getVar('UBOOT_EXTLINUX_ROOT')
125 if not root:
126 bb.fatal('UBOOT_EXTLINUX_ROOT not defined')
127
128 kernel_image = localdata.getVar('UBOOT_EXTLINUX_KERNEL_IMAGE')
129 fdtdir = localdata.getVar('UBOOT_EXTLINUX_FDTDIR')
130
131 fdt = localdata.getVar('UBOOT_EXTLINUX_FDT')
132
133 if fdt:
134 cfgfile.write('LABEL %s\n\tKERNEL %s\n\tFDT %s\n' %
135 (menu_description, kernel_image, fdt))
136 elif fdtdir:
137 cfgfile.write('LABEL %s\n\tKERNEL %s\n\tFDTDIR %s\n' %
138 (menu_description, kernel_image, fdtdir))
139 else:
140 cfgfile.write('LABEL %s\n\tKERNEL %s\n' % (menu_description, kernel_image))
141
142 kernel_args = localdata.getVar('UBOOT_EXTLINUX_KERNEL_ARGS')
143
144 initrd = localdata.getVar('UBOOT_EXTLINUX_INITRD')
145 if initrd:
146 cfgfile.write('\tINITRD %s\n'% initrd)
147
148 kernel_args = root + " " + kernel_args
149 cfgfile.write('\tAPPEND %s %s\n' % (kernel_args, extlinux_console))
150
151 except OSError:
152 bb.fatal('Unable to open %s' % (cfile))
153}
154UBOOT_EXTLINUX_VARS = "CONSOLE MENU_DESCRIPTION ROOT KERNEL_IMAGE FDTDIR FDT KERNEL_ARGS INITRD"
155do_create_extlinux_config[vardeps] += "${@' '.join(['UBOOT_EXTLINUX_%s_%s' % (v, l) for v in d.getVar('UBOOT_EXTLINUX_VARS').split() for l in d.getVar('UBOOT_EXTLINUX_LABELS').split()])}"
156do_create_extlinux_config[vardepsexclude] += "OVERRIDES"
157
158addtask create_extlinux_config before do_install do_deploy after do_compile
diff --git a/meta/classes/uboot-sign.bbclass b/meta/classes/uboot-sign.bbclass
deleted file mode 100644
index debbf23ec6..0000000000
--- a/meta/classes/uboot-sign.bbclass
+++ /dev/null
@@ -1,505 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# This file is part of U-Boot verified boot support and is intended to be
8# inherited from u-boot recipe and from kernel-fitimage.bbclass.
9#
10# The signature procedure requires the user to generate an RSA key and
11# certificate in a directory and to define the following variable:
12#
13# UBOOT_SIGN_KEYDIR = "/keys/directory"
14# UBOOT_SIGN_KEYNAME = "dev" # keys name in keydir (eg. "dev.crt", "dev.key")
15# UBOOT_MKIMAGE_DTCOPTS = "-I dts -O dtb -p 2000"
16# UBOOT_SIGN_ENABLE = "1"
17#
18# As verified boot depends on fitImage generation, following is also required:
19#
20# KERNEL_CLASSES ?= " kernel-fitimage "
21# KERNEL_IMAGETYPE ?= "fitImage"
22#
23# The signature support is limited to the use of CONFIG_OF_SEPARATE in U-Boot.
24#
25# The tasks sequence is set as below, using DEPLOY_IMAGE_DIR as common place to
26# treat the device tree blob:
27#
28# * u-boot:do_install:append
29# Install UBOOT_DTB_BINARY to datadir, so that kernel can use it for
30# signing, and kernel will deploy UBOOT_DTB_BINARY after signs it.
31#
32# * virtual/kernel:do_assemble_fitimage
33# Sign the image
34#
35# * u-boot:do_deploy[postfuncs]
36# Deploy files like UBOOT_DTB_IMAGE, UBOOT_DTB_SYMLINK and others.
37#
38# For more details on signature process, please refer to U-Boot documentation.
39
40# We need some variables from u-boot-config
41inherit uboot-config
42
43# Enable use of a U-Boot fitImage
44UBOOT_FITIMAGE_ENABLE ?= "0"
45
46# Signature activation - these require their respective fitImages
47UBOOT_SIGN_ENABLE ?= "0"
48SPL_SIGN_ENABLE ?= "0"
49
50# Default value for deployment filenames.
51UBOOT_DTB_IMAGE ?= "u-boot-${MACHINE}-${PV}-${PR}.dtb"
52UBOOT_DTB_BINARY ?= "u-boot.dtb"
53UBOOT_DTB_SYMLINK ?= "u-boot-${MACHINE}.dtb"
54UBOOT_NODTB_IMAGE ?= "u-boot-nodtb-${MACHINE}-${PV}-${PR}.bin"
55UBOOT_NODTB_BINARY ?= "u-boot-nodtb.bin"
56UBOOT_NODTB_SYMLINK ?= "u-boot-nodtb-${MACHINE}.bin"
57UBOOT_ITS_IMAGE ?= "u-boot-its-${MACHINE}-${PV}-${PR}"
58UBOOT_ITS ?= "u-boot.its"
59UBOOT_ITS_SYMLINK ?= "u-boot-its-${MACHINE}"
60UBOOT_FITIMAGE_IMAGE ?= "u-boot-fitImage-${MACHINE}-${PV}-${PR}"
61UBOOT_FITIMAGE_BINARY ?= "u-boot-fitImage"
62UBOOT_FITIMAGE_SYMLINK ?= "u-boot-fitImage-${MACHINE}"
63SPL_DIR ?= "spl"
64SPL_DTB_IMAGE ?= "u-boot-spl-${MACHINE}-${PV}-${PR}.dtb"
65SPL_DTB_BINARY ?= "u-boot-spl.dtb"
66SPL_DTB_SYMLINK ?= "u-boot-spl-${MACHINE}.dtb"
67SPL_NODTB_IMAGE ?= "u-boot-spl-nodtb-${MACHINE}-${PV}-${PR}.bin"
68SPL_NODTB_BINARY ?= "u-boot-spl-nodtb.bin"
69SPL_NODTB_SYMLINK ?= "u-boot-spl-nodtb-${MACHINE}.bin"
70
71# U-Boot fitImage description
72UBOOT_FIT_DESC ?= "U-Boot fitImage for ${DISTRO_NAME}/${PV}/${MACHINE}"
73
74# Kernel / U-Boot fitImage Hash Algo
75FIT_HASH_ALG ?= "sha256"
76UBOOT_FIT_HASH_ALG ?= "sha256"
77
78# Kernel / U-Boot fitImage Signature Algo
79FIT_SIGN_ALG ?= "rsa2048"
80UBOOT_FIT_SIGN_ALG ?= "rsa2048"
81
82# Kernel / U-Boot fitImage Padding Algo
83FIT_PAD_ALG ?= "pkcs-1.5"
84
85# Generate keys for signing Kernel / U-Boot fitImage
86FIT_GENERATE_KEYS ?= "0"
87UBOOT_FIT_GENERATE_KEYS ?= "0"
88
89# Size of private keys in number of bits
90FIT_SIGN_NUMBITS ?= "2048"
91UBOOT_FIT_SIGN_NUMBITS ?= "2048"
92
93# args to openssl genrsa (Default is just the public exponent)
94FIT_KEY_GENRSA_ARGS ?= "-F4"
95UBOOT_FIT_KEY_GENRSA_ARGS ?= "-F4"
96
97# args to openssl req (Default is -batch for non interactive mode and
98# -new for new certificate)
99FIT_KEY_REQ_ARGS ?= "-batch -new"
100UBOOT_FIT_KEY_REQ_ARGS ?= "-batch -new"
101
102# Standard format for public key certificate
103FIT_KEY_SIGN_PKCS ?= "-x509"
104UBOOT_FIT_KEY_SIGN_PKCS ?= "-x509"
105
106# Functions on this bbclass can apply to either U-boot or Kernel,
107# depending on the scenario
108UBOOT_PN = "${@d.getVar('PREFERRED_PROVIDER_u-boot') or 'u-boot'}"
109KERNEL_PN = "${@d.getVar('PREFERRED_PROVIDER_virtual/kernel')}"
110
111# We need u-boot-tools-native if we're creating a U-Boot fitImage
112python() {
113 if d.getVar('UBOOT_FITIMAGE_ENABLE') == '1':
114 depends = d.getVar("DEPENDS")
115 depends = "%s u-boot-tools-native dtc-native" % depends
116 d.setVar("DEPENDS", depends)
117}
118
119concat_dtb_helper() {
120 if [ -e "${UBOOT_DTB_BINARY}" ]; then
121 ln -sf ${UBOOT_DTB_IMAGE} ${DEPLOYDIR}/${UBOOT_DTB_BINARY}
122 ln -sf ${UBOOT_DTB_IMAGE} ${DEPLOYDIR}/${UBOOT_DTB_SYMLINK}
123 fi
124
125 if [ -f "${UBOOT_NODTB_BINARY}" ]; then
126 install ${UBOOT_NODTB_BINARY} ${DEPLOYDIR}/${UBOOT_NODTB_IMAGE}
127 ln -sf ${UBOOT_NODTB_IMAGE} ${DEPLOYDIR}/${UBOOT_NODTB_SYMLINK}
128 ln -sf ${UBOOT_NODTB_IMAGE} ${DEPLOYDIR}/${UBOOT_NODTB_BINARY}
129 fi
130
131 # If we're not using a signed u-boot fit, concatenate SPL w/o DTB & U-Boot DTB
132 # with public key (otherwise it will be deployed by the equivalent
133 # concat_spl_dtb_helper function - cf. kernel-fitimage.bbclass for more details)
134 if [ "${SPL_SIGN_ENABLE}" != "1" ] ; then
135 deployed_uboot_dtb_binary='${DEPLOY_DIR_IMAGE}/${UBOOT_DTB_IMAGE}'
136 if [ "x${UBOOT_SUFFIX}" = "ximg" -o "x${UBOOT_SUFFIX}" = "xrom" ] && \
137 [ -e "$deployed_uboot_dtb_binary" ]; then
138 oe_runmake EXT_DTB=$deployed_uboot_dtb_binary
139 install ${UBOOT_BINARY} ${DEPLOYDIR}/${UBOOT_IMAGE}
140 elif [ -e "${DEPLOYDIR}/${UBOOT_NODTB_IMAGE}" -a -e "$deployed_uboot_dtb_binary" ]; then
141 cd ${DEPLOYDIR}
142 cat ${UBOOT_NODTB_IMAGE} $deployed_uboot_dtb_binary | tee ${B}/${CONFIG_B_PATH}/${UBOOT_BINARY} > ${UBOOT_IMAGE}
143
144 if [ -n "${UBOOT_CONFIG}" ]
145 then
146 i=0
147 j=0
148 for config in ${UBOOT_MACHINE}; do
149 i=$(expr $i + 1);
150 for type in ${UBOOT_CONFIG}; do
151 j=$(expr $j + 1);
152 if [ $j -eq $i ]
153 then
154 cp ${UBOOT_IMAGE} ${B}/${CONFIG_B_PATH}/u-boot-$type.${UBOOT_SUFFIX}
155 fi
156 done
157 done
158 fi
159 else
160 bbwarn "Failure while adding public key to u-boot binary. Verified boot won't be available."
161 fi
162 fi
163}
164
165concat_spl_dtb_helper() {
166
167 # We only deploy symlinks to the u-boot-spl.dtb,as the KERNEL_PN will
168 # be responsible for deploying the real file
169 if [ -e "${SPL_DIR}/${SPL_DTB_BINARY}" ] ; then
170 ln -sf ${SPL_DTB_IMAGE} ${DEPLOYDIR}/${SPL_DTB_SYMLINK}
171 ln -sf ${SPL_DTB_IMAGE} ${DEPLOYDIR}/${SPL_DTB_BINARY}
172 fi
173
174 # Concatenate the SPL nodtb binary and u-boot.dtb
175 deployed_spl_dtb_binary='${DEPLOY_DIR_IMAGE}/${SPL_DTB_IMAGE}'
176 if [ -e "${DEPLOYDIR}/${SPL_NODTB_IMAGE}" -a -e "$deployed_spl_dtb_binary" ] ; then
177 cd ${DEPLOYDIR}
178 cat ${SPL_NODTB_IMAGE} $deployed_spl_dtb_binary | tee ${B}/${CONFIG_B_PATH}/${SPL_BINARY} > ${SPL_IMAGE}
179 else
180 bbwarn "Failure while adding public key to spl binary. Verified U-Boot boot won't be available."
181 fi
182}
183
184
185concat_dtb() {
186 if [ "${UBOOT_SIGN_ENABLE}" = "1" -a "${PN}" = "${UBOOT_PN}" -a -n "${UBOOT_DTB_BINARY}" ]; then
187 mkdir -p ${DEPLOYDIR}
188 if [ -n "${UBOOT_CONFIG}" ]; then
189 for config in ${UBOOT_MACHINE}; do
190 CONFIG_B_PATH="$config"
191 cd ${B}/$config
192 concat_dtb_helper
193 done
194 else
195 CONFIG_B_PATH=""
196 cd ${B}
197 concat_dtb_helper
198 fi
199 fi
200}
201
202concat_spl_dtb() {
203 if [ "${SPL_SIGN_ENABLE}" = "1" -a "${PN}" = "${UBOOT_PN}" -a -n "${SPL_DTB_BINARY}" ]; then
204 mkdir -p ${DEPLOYDIR}
205 if [ -n "${UBOOT_CONFIG}" ]; then
206 for config in ${UBOOT_MACHINE}; do
207 CONFIG_B_PATH="$config"
208 cd ${B}/$config
209 concat_spl_dtb_helper
210 done
211 else
212 CONFIG_B_PATH=""
213 cd ${B}
214 concat_spl_dtb_helper
215 fi
216 fi
217}
218
219
220# Install UBOOT_DTB_BINARY to datadir, so that kernel can use it for
221# signing, and kernel will deploy UBOOT_DTB_BINARY after signs it.
222install_helper() {
223 if [ -f "${UBOOT_DTB_BINARY}" ]; then
224 # UBOOT_DTB_BINARY is a symlink to UBOOT_DTB_IMAGE, so we
225 # need both of them.
226 install -Dm 0644 ${UBOOT_DTB_BINARY} ${D}${datadir}/${UBOOT_DTB_IMAGE}
227 ln -sf ${UBOOT_DTB_IMAGE} ${D}${datadir}/${UBOOT_DTB_BINARY}
228 else
229 bbwarn "${UBOOT_DTB_BINARY} not found"
230 fi
231}
232
233# Install SPL dtb and u-boot nodtb to datadir,
234install_spl_helper() {
235 if [ -f "${SPL_DIR}/${SPL_DTB_BINARY}" ]; then
236 install -Dm 0644 ${SPL_DIR}/${SPL_DTB_BINARY} ${D}${datadir}/${SPL_DTB_IMAGE}
237 ln -sf ${SPL_DTB_IMAGE} ${D}${datadir}/${SPL_DTB_BINARY}
238 else
239 bbwarn "${SPL_DTB_BINARY} not found"
240 fi
241 if [ -f "${UBOOT_NODTB_BINARY}" ] ; then
242 install -Dm 0644 ${UBOOT_NODTB_BINARY} ${D}${datadir}/${UBOOT_NODTB_IMAGE}
243 ln -sf ${UBOOT_NODTB_IMAGE} ${D}${datadir}/${UBOOT_NODTB_BINARY}
244 else
245 bbwarn "${UBOOT_NODTB_BINARY} not found"
246 fi
247
248 # We need to install a 'stub' u-boot-fitimage + its to datadir,
249 # so that the KERNEL_PN can use the correct filename when
250 # assembling and deploying them
251 touch ${D}/${datadir}/${UBOOT_FITIMAGE_IMAGE}
252 touch ${D}/${datadir}/${UBOOT_ITS_IMAGE}
253}
254
255do_install:append() {
256 if [ "${PN}" = "${UBOOT_PN}" ]; then
257 if [ -n "${UBOOT_CONFIG}" ]; then
258 for config in ${UBOOT_MACHINE}; do
259 cd ${B}/$config
260 if [ "${UBOOT_SIGN_ENABLE}" = "1" -o "${UBOOT_FITIMAGE_ENABLE}" = "1" ] && \
261 [ -n "${UBOOT_DTB_BINARY}" ]; then
262 install_helper
263 fi
264 if [ "${UBOOT_FITIMAGE_ENABLE}" = "1" -a -n "${SPL_DTB_BINARY}" ]; then
265 install_spl_helper
266 fi
267 done
268 else
269 cd ${B}
270 if [ "${UBOOT_SIGN_ENABLE}" = "1" -o "${UBOOT_FITIMAGE_ENABLE}" = "1" ] && \
271 [ -n "${UBOOT_DTB_BINARY}" ]; then
272 install_helper
273 fi
274 if [ "${UBOOT_FITIMAGE_ENABLE}" = "1" -a -n "${SPL_DTB_BINARY}" ]; then
275 install_spl_helper
276 fi
277 fi
278 fi
279}
280
281do_uboot_generate_rsa_keys() {
282 if [ "${SPL_SIGN_ENABLE}" = "0" ] && [ "${UBOOT_FIT_GENERATE_KEYS}" = "1" ]; then
283 bbwarn "UBOOT_FIT_GENERATE_KEYS is set to 1 eventhough SPL_SIGN_ENABLE is set to 0. The keys will not be generated as they won't be used."
284 fi
285
286 if [ "${SPL_SIGN_ENABLE}" = "1" ] && [ "${UBOOT_FIT_GENERATE_KEYS}" = "1" ]; then
287
288 # Generate keys only if they don't already exist
289 if [ ! -f "${SPL_SIGN_KEYDIR}/${SPL_SIGN_KEYNAME}".key ] || \
290 [ ! -f "${SPL_SIGN_KEYDIR}/${SPL_SIGN_KEYNAME}".crt ]; then
291
292 # make directory if it does not already exist
293 mkdir -p "${SPL_SIGN_KEYDIR}"
294
295 echo "Generating RSA private key for signing U-Boot fitImage"
296 openssl genrsa ${UBOOT_FIT_KEY_GENRSA_ARGS} -out \
297 "${SPL_SIGN_KEYDIR}/${SPL_SIGN_KEYNAME}".key \
298 "${UBOOT_FIT_SIGN_NUMBITS}"
299
300 echo "Generating certificate for signing U-Boot fitImage"
301 openssl req ${FIT_KEY_REQ_ARGS} "${UBOOT_FIT_KEY_SIGN_PKCS}" \
302 -key "${SPL_SIGN_KEYDIR}/${SPL_SIGN_KEYNAME}".key \
303 -out "${SPL_SIGN_KEYDIR}/${SPL_SIGN_KEYNAME}".crt
304 fi
305 fi
306
307}
308
309addtask uboot_generate_rsa_keys before do_uboot_assemble_fitimage after do_compile
310
311# Create a ITS file for the U-boot FIT, for use when
312# we want to sign it so that the SPL can verify it
313uboot_fitimage_assemble() {
314 uboot_its="$1"
315 uboot_nodtb_bin="$2"
316 uboot_dtb="$3"
317 uboot_bin="$4"
318 spl_dtb="$5"
319 uboot_csum="${UBOOT_FIT_HASH_ALG}"
320 uboot_sign_algo="${UBOOT_FIT_SIGN_ALG}"
321 uboot_sign_keyname="${SPL_SIGN_KEYNAME}"
322
323 rm -f $uboot_its $uboot_bin
324
325 # First we create the ITS script
326 cat << EOF >> $uboot_its
327/dts-v1/;
328
329/ {
330 description = "${UBOOT_FIT_DESC}";
331 #address-cells = <1>;
332
333 images {
334 uboot {
335 description = "U-Boot image";
336 data = /incbin/("$uboot_nodtb_bin");
337 type = "standalone";
338 os = "u-boot";
339 arch = "${UBOOT_ARCH}";
340 compression = "none";
341 load = <${UBOOT_LOADADDRESS}>;
342 entry = <${UBOOT_ENTRYPOINT}>;
343EOF
344
345 if [ "${SPL_SIGN_ENABLE}" = "1" ] ; then
346 cat << EOF >> $uboot_its
347 signature {
348 algo = "$uboot_csum,$uboot_sign_algo";
349 key-name-hint = "$uboot_sign_keyname";
350 };
351EOF
352 fi
353
354 cat << EOF >> $uboot_its
355 };
356 fdt {
357 description = "U-Boot FDT";
358 data = /incbin/("$uboot_dtb");
359 type = "flat_dt";
360 arch = "${UBOOT_ARCH}";
361 compression = "none";
362EOF
363
364 if [ "${SPL_SIGN_ENABLE}" = "1" ] ; then
365 cat << EOF >> $uboot_its
366 signature {
367 algo = "$uboot_csum,$uboot_sign_algo";
368 key-name-hint = "$uboot_sign_keyname";
369 };
370EOF
371 fi
372
373 cat << EOF >> $uboot_its
374 };
375 };
376
377 configurations {
378 default = "conf";
379 conf {
380 description = "Boot with signed U-Boot FIT";
381 loadables = "uboot";
382 fdt = "fdt";
383 };
384 };
385};
386EOF
387
388 #
389 # Assemble the U-boot FIT image
390 #
391 ${UBOOT_MKIMAGE} \
392 ${@'-D "${SPL_MKIMAGE_DTCOPTS}"' if len('${SPL_MKIMAGE_DTCOPTS}') else ''} \
393 -f $uboot_its \
394 $uboot_bin
395
396 if [ "${SPL_SIGN_ENABLE}" = "1" ] ; then
397 #
398 # Sign the U-boot FIT image and add public key to SPL dtb
399 #
400 ${UBOOT_MKIMAGE_SIGN} \
401 ${@'-D "${SPL_MKIMAGE_DTCOPTS}"' if len('${SPL_MKIMAGE_DTCOPTS}') else ''} \
402 -F -k "${SPL_SIGN_KEYDIR}" \
403 -K "$spl_dtb" \
404 -r $uboot_bin \
405 ${SPL_MKIMAGE_SIGN_ARGS}
406 fi
407
408}
409
410do_uboot_assemble_fitimage() {
411 # This function runs in KERNEL_PN context. The reason for that is that we need to
412 # support the scenario where UBOOT_SIGN_ENABLE is placing the Kernel fitImage's
413 # pubkey in the u-boot.dtb file, so that we can use it when building the U-Boot
414 # fitImage itself.
415 if [ "${UBOOT_FITIMAGE_ENABLE}" = "1" ] && \
416 [ -n "${SPL_DTB_BINARY}" -a "${PN}" = "${KERNEL_PN}" ] ; then
417 if [ "${UBOOT_SIGN_ENABLE}" != "1" ]; then
418 # If we're not signing the Kernel fitImage, that means
419 # we need to copy the u-boot.dtb from staging ourselves
420 cp -P ${STAGING_DATADIR}/u-boot*.dtb ${B}
421 fi
422 # As we are in the kernel context, we need to copy u-boot-spl.dtb from staging first.
423 # Unfortunately, need to glob on top of ${SPL_DTB_BINARY} since _IMAGE and _SYMLINK
424 # will contain U-boot's PV
425 # Similarly, we need to get the filename for the 'stub' u-boot-fitimage + its in
426 # staging so that we can use it for creating the image with the correct filename
427 # in the KERNEL_PN context.
428 # As for the u-boot.dtb (with fitimage's pubkey), it should come from the dependent
429 # do_assemble_fitimage task
430 cp -P ${STAGING_DATADIR}/u-boot-spl*.dtb ${B}
431 cp -P ${STAGING_DATADIR}/u-boot-nodtb*.bin ${B}
432 rm -rf ${B}/u-boot-fitImage-* ${B}/u-boot-its-*
433 kernel_uboot_fitimage_name=`basename ${STAGING_DATADIR}/u-boot-fitImage-*`
434 kernel_uboot_its_name=`basename ${STAGING_DATADIR}/u-boot-its-*`
435 cd ${B}
436 uboot_fitimage_assemble $kernel_uboot_its_name ${UBOOT_NODTB_BINARY} \
437 ${UBOOT_DTB_BINARY} $kernel_uboot_fitimage_name \
438 ${SPL_DTB_BINARY}
439 fi
440}
441
442addtask uboot_assemble_fitimage before do_deploy after do_compile
443
444do_deploy:prepend:pn-${UBOOT_PN}() {
445 if [ "${UBOOT_SIGN_ENABLE}" = "1" -a -n "${UBOOT_DTB_BINARY}" ] ; then
446 concat_dtb
447 fi
448
449 if [ "${UBOOT_FITIMAGE_ENABLE}" = "1" ] ; then
450 # Deploy the u-boot-nodtb binary and symlinks...
451 if [ -f "${SPL_DIR}/${SPL_NODTB_BINARY}" ] ; then
452 echo "Copying u-boot-nodtb binary..."
453 install -m 0644 ${SPL_DIR}/${SPL_NODTB_BINARY} ${DEPLOYDIR}/${SPL_NODTB_IMAGE}
454 ln -sf ${SPL_NODTB_IMAGE} ${DEPLOYDIR}/${SPL_NODTB_SYMLINK}
455 ln -sf ${SPL_NODTB_IMAGE} ${DEPLOYDIR}/${SPL_NODTB_BINARY}
456 fi
457
458
459 # We only deploy the symlinks to the uboot-fitImage and uboot-its
460 # images, as the KERNEL_PN will take care of deploying the real file
461 ln -sf ${UBOOT_FITIMAGE_IMAGE} ${DEPLOYDIR}/${UBOOT_FITIMAGE_BINARY}
462 ln -sf ${UBOOT_FITIMAGE_IMAGE} ${DEPLOYDIR}/${UBOOT_FITIMAGE_SYMLINK}
463 ln -sf ${UBOOT_ITS_IMAGE} ${DEPLOYDIR}/${UBOOT_ITS}
464 ln -sf ${UBOOT_ITS_IMAGE} ${DEPLOYDIR}/${UBOOT_ITS_SYMLINK}
465 fi
466
467 if [ "${SPL_SIGN_ENABLE}" = "1" -a -n "${SPL_DTB_BINARY}" ] ; then
468 concat_spl_dtb
469 fi
470
471
472}
473
474do_deploy:append:pn-${UBOOT_PN}() {
475 # If we're creating a u-boot fitImage, point u-boot.bin
476 # symlink since it might get used by image recipes
477 if [ "${UBOOT_FITIMAGE_ENABLE}" = "1" ] ; then
478 ln -sf ${UBOOT_FITIMAGE_IMAGE} ${DEPLOYDIR}/${UBOOT_BINARY}
479 ln -sf ${UBOOT_FITIMAGE_IMAGE} ${DEPLOYDIR}/${UBOOT_SYMLINK}
480 fi
481}
482
483python () {
484 if ( (d.getVar('UBOOT_SIGN_ENABLE') == '1'
485 or d.getVar('UBOOT_FITIMAGE_ENABLE') == '1')
486 and d.getVar('PN') == d.getVar('UBOOT_PN')
487 and d.getVar('UBOOT_DTB_BINARY')):
488
489 # Make "bitbake u-boot -cdeploy" deploys the signed u-boot.dtb
490 # and/or the U-Boot fitImage
491 d.appendVarFlag('do_deploy', 'depends', ' %s:do_deploy' % d.getVar('KERNEL_PN'))
492
493 if d.getVar('UBOOT_FITIMAGE_ENABLE') == '1' and d.getVar('PN') == d.getVar('KERNEL_PN'):
494 # As the U-Boot fitImage is created by the KERNEL_PN, we need
495 # to make sure that the u-boot-spl.dtb and u-boot-spl-nodtb.bin
496 # files are in the staging dir for it's use
497 d.appendVarFlag('do_uboot_assemble_fitimage', 'depends', ' %s:do_populate_sysroot' % d.getVar('UBOOT_PN'))
498
499 # If the Kernel fitImage is being signed, we need to
500 # create the U-Boot fitImage after it
501 if d.getVar('UBOOT_SIGN_ENABLE') == '1':
502 d.appendVarFlag('do_uboot_assemble_fitimage', 'depends', ' %s:do_assemble_fitimage' % d.getVar('KERNEL_PN'))
503 d.appendVarFlag('do_uboot_assemble_fitimage', 'depends', ' %s:do_assemble_fitimage_initramfs' % d.getVar('KERNEL_PN'))
504
505}
diff --git a/meta/classes/uninative.bbclass b/meta/classes/uninative.bbclass
deleted file mode 100644
index 4b7fb36449..0000000000
--- a/meta/classes/uninative.bbclass
+++ /dev/null
@@ -1,177 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7UNINATIVE_LOADER ?= "${UNINATIVE_STAGING_DIR}-uninative/${BUILD_ARCH}-linux/lib/${@bb.utils.contains('BUILD_ARCH', 'x86_64', 'ld-linux-x86-64.so.2', '', d)}${@bb.utils.contains('BUILD_ARCH', 'i686', 'ld-linux.so.2', '', d)}${@bb.utils.contains('BUILD_ARCH', 'aarch64', 'ld-linux-aarch64.so.1', '', d)}${@bb.utils.contains('BUILD_ARCH', 'ppc64le', 'ld64.so.2', '', d)}"
8UNINATIVE_STAGING_DIR ?= "${STAGING_DIR}"
9
10UNINATIVE_URL ?= "unset"
11UNINATIVE_TARBALL ?= "${BUILD_ARCH}-nativesdk-libc-${UNINATIVE_VERSION}.tar.xz"
12# Example checksums
13#UNINATIVE_CHECKSUM[aarch64] = "dead"
14#UNINATIVE_CHECKSUM[i686] = "dead"
15#UNINATIVE_CHECKSUM[x86_64] = "dead"
16UNINATIVE_DLDIR ?= "${DL_DIR}/uninative/"
17
18# Enabling uninative will change the following variables so they need to go the parsing ignored variables list to prevent multiple recipe parsing
19BB_HASHCONFIG_IGNORE_VARS += "NATIVELSBSTRING SSTATEPOSTUNPACKFUNCS BUILD_LDFLAGS"
20
21addhandler uninative_event_fetchloader
22uninative_event_fetchloader[eventmask] = "bb.event.BuildStarted"
23
24addhandler uninative_event_enable
25uninative_event_enable[eventmask] = "bb.event.ConfigParsed"
26
27python uninative_event_fetchloader() {
28 """
29 This event fires on the parent and will try to fetch the tarball if the
30 loader isn't already present.
31 """
32
33 chksum = d.getVarFlag("UNINATIVE_CHECKSUM", d.getVar("BUILD_ARCH"))
34 if not chksum:
35 bb.fatal("Uninative selected but not configured correctly, please set UNINATIVE_CHECKSUM[%s]" % d.getVar("BUILD_ARCH"))
36
37 loader = d.getVar("UNINATIVE_LOADER")
38 loaderchksum = loader + ".chksum"
39 if os.path.exists(loader) and os.path.exists(loaderchksum):
40 with open(loaderchksum, "r") as f:
41 readchksum = f.read().strip()
42 if readchksum == chksum:
43 return
44
45 import subprocess
46 try:
47 # Save and restore cwd as Fetch.download() does a chdir()
48 olddir = os.getcwd()
49
50 tarball = d.getVar("UNINATIVE_TARBALL")
51 tarballdir = os.path.join(d.getVar("UNINATIVE_DLDIR"), chksum)
52 tarballpath = os.path.join(tarballdir, tarball)
53
54 if not os.path.exists(tarballpath + ".done"):
55 bb.utils.mkdirhier(tarballdir)
56 if d.getVar("UNINATIVE_URL") == "unset":
57 bb.fatal("Uninative selected but not configured, please set UNINATIVE_URL")
58
59 localdata = bb.data.createCopy(d)
60 localdata.setVar('FILESPATH', "")
61 localdata.setVar('DL_DIR', tarballdir)
62 # Our games with path manipulation of DL_DIR mean standard PREMIRRORS don't work
63 # and we can't easily put 'chksum' into the url path from a url parameter with
64 # the current fetcher url handling
65 premirrors = bb.fetch2.mirror_from_string(localdata.getVar("PREMIRRORS"))
66 for line in premirrors:
67 try:
68 (find, replace) = line
69 except ValueError:
70 continue
71 if find.startswith("http"):
72 localdata.appendVar("PREMIRRORS", " ${UNINATIVE_URL}${UNINATIVE_TARBALL} %s/uninative/%s/${UNINATIVE_TARBALL}" % (replace, chksum))
73
74 srcuri = d.expand("${UNINATIVE_URL}${UNINATIVE_TARBALL};sha256sum=%s" % chksum)
75 bb.note("Fetching uninative binary shim %s (will check PREMIRRORS first)" % srcuri)
76
77 fetcher = bb.fetch2.Fetch([srcuri], localdata, cache=False)
78 fetcher.download()
79 localpath = fetcher.localpath(srcuri)
80 if localpath != tarballpath and os.path.exists(localpath) and not os.path.exists(tarballpath):
81 # Follow the symlink behavior from the bitbake fetch2.
82 # This will cover the case where an existing symlink is broken
83 # as well as if there are two processes trying to create it
84 # at the same time.
85 if os.path.islink(tarballpath):
86 # Broken symbolic link
87 os.unlink(tarballpath)
88
89 # Deal with two processes trying to make symlink at once
90 try:
91 os.symlink(localpath, tarballpath)
92 except FileExistsError:
93 pass
94
95 # ldd output is "ldd (Ubuntu GLIBC 2.23-0ubuntu10) 2.23", extract last option from first line
96 glibcver = subprocess.check_output(["ldd", "--version"]).decode('utf-8').split('\n')[0].split()[-1]
97 if bb.utils.vercmp_string(d.getVar("UNINATIVE_MAXGLIBCVERSION"), glibcver) < 0:
98 raise RuntimeError("Your host glibc version (%s) is newer than that in uninative (%s). Disabling uninative so that sstate is not corrupted." % (glibcver, d.getVar("UNINATIVE_MAXGLIBCVERSION")))
99
100 cmd = d.expand("\
101mkdir -p ${UNINATIVE_STAGING_DIR}-uninative; \
102cd ${UNINATIVE_STAGING_DIR}-uninative; \
103tar -xJf ${UNINATIVE_DLDIR}/%s/${UNINATIVE_TARBALL}; \
104${UNINATIVE_STAGING_DIR}-uninative/relocate_sdk.py \
105 ${UNINATIVE_STAGING_DIR}-uninative/${BUILD_ARCH}-linux \
106 ${UNINATIVE_LOADER} \
107 ${UNINATIVE_LOADER} \
108 ${UNINATIVE_STAGING_DIR}-uninative/${BUILD_ARCH}-linux/${bindir_native}/patchelf-uninative \
109 ${UNINATIVE_STAGING_DIR}-uninative/${BUILD_ARCH}-linux${base_libdir_native}/libc*.so*" % chksum)
110 subprocess.check_output(cmd, shell=True)
111
112 with open(loaderchksum, "w") as f:
113 f.write(chksum)
114
115 enable_uninative(d)
116
117 except RuntimeError as e:
118 bb.warn(str(e))
119 except bb.fetch2.BBFetchException as exc:
120 bb.warn("Disabling uninative as unable to fetch uninative tarball: %s" % str(exc))
121 bb.warn("To build your own uninative loader, please bitbake uninative-tarball and set UNINATIVE_TARBALL appropriately.")
122 except subprocess.CalledProcessError as exc:
123 bb.warn("Disabling uninative as unable to install uninative tarball: %s" % str(exc))
124 bb.warn("To build your own uninative loader, please bitbake uninative-tarball and set UNINATIVE_TARBALL appropriately.")
125 finally:
126 os.chdir(olddir)
127}
128
129python uninative_event_enable() {
130 """
131 This event handler is called in the workers and is responsible for setting
132 up uninative if a loader is found.
133 """
134 enable_uninative(d)
135}
136
137def enable_uninative(d):
138 loader = d.getVar("UNINATIVE_LOADER")
139 if os.path.exists(loader):
140 bb.debug(2, "Enabling uninative")
141 d.setVar("NATIVELSBSTRING", "universal%s" % oe.utils.host_gcc_version(d))
142 d.appendVar("SSTATEPOSTUNPACKFUNCS", " uninative_changeinterp")
143 d.appendVarFlag("SSTATEPOSTUNPACKFUNCS", "vardepvalueexclude", "| uninative_changeinterp")
144 d.appendVar("BUILD_LDFLAGS", " -Wl,--allow-shlib-undefined -Wl,--dynamic-linker=${UNINATIVE_LOADER}")
145 d.appendVarFlag("BUILD_LDFLAGS", "vardepvalueexclude", "| -Wl,--allow-shlib-undefined -Wl,--dynamic-linker=${UNINATIVE_LOADER}")
146 d.appendVarFlag("BUILD_LDFLAGS", "vardepsexclude", "UNINATIVE_LOADER")
147 d.prependVar("PATH", "${STAGING_DIR}-uninative/${BUILD_ARCH}-linux${bindir_native}:")
148
149python uninative_changeinterp () {
150 import subprocess
151 import stat
152 import oe.qa
153
154 if not (bb.data.inherits_class('native', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('cross', d)):
155 return
156
157 sstateinst = d.getVar('SSTATE_INSTDIR')
158 for walkroot, dirs, files in os.walk(sstateinst):
159 for file in files:
160 if file.endswith(".so") or ".so." in file:
161 continue
162 f = os.path.join(walkroot, file)
163 if os.path.islink(f):
164 continue
165 s = os.stat(f)
166 if not ((s[stat.ST_MODE] & stat.S_IXUSR) or (s[stat.ST_MODE] & stat.S_IXGRP) or (s[stat.ST_MODE] & stat.S_IXOTH)):
167 continue
168 elf = oe.qa.ELFFile(f)
169 try:
170 elf.open()
171 except oe.qa.NotELFFileError:
172 continue
173 if not elf.isDynamic():
174 continue
175
176 subprocess.check_output(("patchelf-uninative", "--set-interpreter", d.getVar("UNINATIVE_LOADER"), f), stderr=subprocess.STDOUT)
177}
diff --git a/meta/classes/update-alternatives.bbclass b/meta/classes/update-alternatives.bbclass
deleted file mode 100644
index 970d9bcd45..0000000000
--- a/meta/classes/update-alternatives.bbclass
+++ /dev/null
@@ -1,333 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# This class is used to help the alternatives system which is useful when
8# multiple sources provide same command. You can use update-alternatives
9# command directly in your recipe, but in most cases this class simplifies
10# that job.
11#
12# To use this class a number of variables should be defined:
13#
14# List all of the alternatives needed by a package:
15# ALTERNATIVE:<pkg> = "name1 name2 name3 ..."
16#
17# i.e. ALTERNATIVE:busybox = "sh sed test bracket"
18#
19# The pathname of the link
20# ALTERNATIVE_LINK_NAME[name] = "target"
21#
22# This is the name of the binary once it's been installed onto the runtime.
23# This name is global to all split packages in this recipe, and should match
24# other recipes with the same functionality.
25# i.e. ALTERNATIVE_LINK_NAME[bracket] = "/usr/bin/["
26#
27# NOTE: If ALTERNATIVE_LINK_NAME is not defined, it defaults to ${bindir}/name
28#
29# The default link to create for all targets
30# ALTERNATIVE_TARGET = "target"
31#
32# This is useful in a multicall binary case
33# i.e. ALTERNATIVE_TARGET = "/bin/busybox"
34#
35# A non-default link to create for a target
36# ALTERNATIVE_TARGET[name] = "target"
37#
38# This is the name of the binary as it's been install by do_install
39# i.e. ALTERNATIVE_TARGET[sh] = "/bin/bash"
40#
41# A package specific link for a target
42# ALTERNATIVE_TARGET_<pkg>[name] = "target"
43#
44# This is useful when a recipe provides multiple alternatives for the
45# same item.
46#
47# NOTE: If ALTERNATIVE_TARGET is not defined, it will inherit the value
48# from ALTERNATIVE_LINK_NAME.
49#
50# NOTE: If the ALTERNATIVE_LINK_NAME and ALTERNATIVE_TARGET are the same,
51# ALTERNATIVE_TARGET will have '.{BPN}' appended to it. If the file
52# referenced has not been renamed, it will also be renamed. (This avoids
53# the need to rename alternative files in the do_install step, but still
54# supports it if necessary for some reason.)
55#
56# The default priority for any alternatives
57# ALTERNATIVE_PRIORITY = "priority"
58#
59# i.e. default is ALTERNATIVE_PRIORITY = "10"
60#
61# The non-default priority for a specific target
62# ALTERNATIVE_PRIORITY[name] = "priority"
63#
64# The package priority for a specific target
65# ALTERNATIVE_PRIORITY_<pkg>[name] = "priority"
66
67ALTERNATIVE_PRIORITY = "10"
68
69# We need special processing for vardeps because it can not work on
70# modified flag values. So we aggregate the flags into a new variable
71# and include that vairable in the set.
72UPDALTVARS = "ALTERNATIVE ALTERNATIVE_LINK_NAME ALTERNATIVE_TARGET ALTERNATIVE_PRIORITY"
73
74PACKAGE_WRITE_DEPS += "virtual/update-alternatives-native"
75
76def gen_updatealternativesvardeps(d):
77 pkgs = (d.getVar("PACKAGES") or "").split()
78 vars = (d.getVar("UPDALTVARS") or "").split()
79
80 # First compute them for non_pkg versions
81 for v in vars:
82 for flag in sorted((d.getVarFlags(v) or {}).keys()):
83 if flag == "doc" or flag == "vardeps" or flag == "vardepsexp":
84 continue
85 d.appendVar('%s_VARDEPS' % (v), ' %s:%s' % (flag, d.getVarFlag(v, flag, False)))
86
87 for p in pkgs:
88 for v in vars:
89 for flag in sorted((d.getVarFlags("%s_%s" % (v,p)) or {}).keys()):
90 if flag == "doc" or flag == "vardeps" or flag == "vardepsexp":
91 continue
92 d.appendVar('%s_VARDEPS_%s' % (v,p), ' %s:%s' % (flag, d.getVarFlag('%s_%s' % (v,p), flag, False)))
93
94def ua_extend_depends(d):
95 if not 'virtual/update-alternatives' in d.getVar('PROVIDES'):
96 d.appendVar('DEPENDS', ' virtual/${MLPREFIX}update-alternatives')
97
98def update_alternatives_enabled(d):
99 # Update Alternatives only works on target packages...
100 if bb.data.inherits_class('native', d) or \
101 bb.data.inherits_class('cross', d) or bb.data.inherits_class('crosssdk', d) or \
102 bb.data.inherits_class('cross-canadian', d):
103 return False
104
105 # Disable when targeting mingw32 (no target support)
106 if d.getVar("TARGET_OS") == "mingw32":
107 return False
108
109 return True
110
111python __anonymous() {
112 if not update_alternatives_enabled(d):
113 return
114
115 # compute special vardeps
116 gen_updatealternativesvardeps(d)
117
118 # extend the depends to include virtual/update-alternatives
119 ua_extend_depends(d)
120}
121
122def gen_updatealternativesvars(d):
123 ret = []
124 pkgs = (d.getVar("PACKAGES") or "").split()
125 vars = (d.getVar("UPDALTVARS") or "").split()
126
127 for v in vars:
128 ret.append(v + "_VARDEPS")
129
130 for p in pkgs:
131 for v in vars:
132 ret.append(v + ":" + p)
133 ret.append(v + "_VARDEPS_" + p)
134 return " ".join(ret)
135
136# Now the new stuff, we use a custom function to generate the right values
137populate_packages[vardeps] += "${UPDALTVARS} ${@gen_updatealternativesvars(d)}"
138
139# We need to do the rename after the image creation step, but before
140# the split and strip steps.. PACKAGE_PREPROCESS_FUNCS is the right
141# place for that.
142PACKAGE_PREPROCESS_FUNCS += "apply_update_alternative_renames"
143python apply_update_alternative_renames () {
144 if not update_alternatives_enabled(d):
145 return
146
147 import re
148
149 def update_files(alt_target, alt_target_rename, pkg, d):
150 f = d.getVar('FILES:' + pkg)
151 if f:
152 f = re.sub(r'(^|\s)%s(\s|$)' % re.escape (alt_target), r'\1%s\2' % alt_target_rename, f)
153 d.setVar('FILES:' + pkg, f)
154
155 # Check for deprecated usage...
156 pn = d.getVar('BPN')
157 if d.getVar('ALTERNATIVE_LINKS') != None:
158 bb.fatal('%s: Use of ALTERNATIVE_LINKS/ALTERNATIVE_PATH/ALTERNATIVE_NAME is no longer supported, please convert to the updated syntax, see update-alternatives.bbclass for more info.' % pn)
159
160 # Do actual update alternatives processing
161 pkgdest = d.getVar('PKGD')
162 for pkg in (d.getVar('PACKAGES') or "").split():
163 # If the src == dest, we know we need to rename the dest by appending ${BPN}
164 link_rename = []
165 for alt_name in (d.getVar('ALTERNATIVE:%s' % pkg) or "").split():
166 alt_link = d.getVarFlag('ALTERNATIVE_LINK_NAME', alt_name)
167 if not alt_link:
168 alt_link = "%s/%s" % (d.getVar('bindir'), alt_name)
169 d.setVarFlag('ALTERNATIVE_LINK_NAME', alt_name, alt_link)
170 if alt_link.startswith(os.path.join(d.getVar('sysconfdir'), 'init.d')):
171 # Managing init scripts does not work (bug #10433), foremost
172 # because of a race with update-rc.d
173 bb.fatal("Using update-alternatives for managing SysV init scripts is not supported")
174
175 alt_target = d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, alt_name) or d.getVarFlag('ALTERNATIVE_TARGET', alt_name)
176 alt_target = alt_target or d.getVar('ALTERNATIVE_TARGET_%s' % pkg) or d.getVar('ALTERNATIVE_TARGET') or alt_link
177 # Sometimes alt_target is specified as relative to the link name.
178 alt_target = os.path.join(os.path.dirname(alt_link), alt_target)
179
180 # If the link and target are the same name, we need to rename the target.
181 if alt_link == alt_target:
182 src = '%s/%s' % (pkgdest, alt_target)
183 alt_target_rename = '%s.%s' % (alt_target, pn)
184 dest = '%s/%s' % (pkgdest, alt_target_rename)
185 if os.path.lexists(dest):
186 bb.note('%s: Already renamed: %s' % (pn, alt_target_rename))
187 elif os.path.lexists(src):
188 if os.path.islink(src):
189 # Delay rename of links
190 link_rename.append((alt_target, alt_target_rename))
191 else:
192 bb.note('%s: Rename %s -> %s' % (pn, alt_target, alt_target_rename))
193 bb.utils.rename(src, dest)
194 update_files(alt_target, alt_target_rename, pkg, d)
195 else:
196 bb.warn("%s: alternative target (%s or %s) does not exist, skipping..." % (pn, alt_target, alt_target_rename))
197 continue
198 d.setVarFlag('ALTERNATIVE_TARGET_%s' % pkg, alt_name, alt_target_rename)
199
200 # Process delayed link names
201 # Do these after other renames so we can correct broken links
202 for (alt_target, alt_target_rename) in link_rename:
203 src = '%s/%s' % (pkgdest, alt_target)
204 dest = '%s/%s' % (pkgdest, alt_target_rename)
205 link_target = oe.path.realpath(src, pkgdest, True)
206
207 if os.path.lexists(link_target):
208 # Ok, the link_target exists, we can rename
209 bb.note('%s: Rename (link) %s -> %s' % (pn, alt_target, alt_target_rename))
210 bb.utils.rename(src, dest)
211 else:
212 # Try to resolve the broken link to link.${BPN}
213 link_maybe = '%s.%s' % (os.readlink(src), pn)
214 if os.path.lexists(os.path.join(os.path.dirname(src), link_maybe)):
215 # Ok, the renamed link target exists.. create a new link, and remove the original
216 bb.note('%s: Creating new link %s -> %s' % (pn, alt_target_rename, link_maybe))
217 os.symlink(link_maybe, dest)
218 os.unlink(src)
219 else:
220 bb.warn('%s: Unable to resolve dangling symlink: %s' % (pn, alt_target))
221 continue
222 update_files(alt_target, alt_target_rename, pkg, d)
223}
224
225def update_alternatives_alt_targets(d, pkg):
226 """
227 Returns the update-alternatives metadata for a package.
228
229 The returned format is a list of tuples where the tuple contains:
230 alt_name: The binary name
231 alt_link: The path for the binary (Shared by different packages)
232 alt_target: The path for the renamed binary (Unique per package)
233 alt_priority: The priority of the alt_target
234
235 All the alt_targets will be installed into the sysroot. The alt_link is
236 a symlink pointing to the alt_target with the highest priority.
237 """
238
239 pn = d.getVar('BPN')
240 pkgdest = d.getVar('PKGD')
241 updates = list()
242 for alt_name in (d.getVar('ALTERNATIVE:%s' % pkg) or "").split():
243 alt_link = d.getVarFlag('ALTERNATIVE_LINK_NAME', alt_name)
244 alt_target = d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, alt_name) or \
245 d.getVarFlag('ALTERNATIVE_TARGET', alt_name) or \
246 d.getVar('ALTERNATIVE_TARGET_%s' % pkg) or \
247 d.getVar('ALTERNATIVE_TARGET') or \
248 alt_link
249 alt_priority = d.getVarFlag('ALTERNATIVE_PRIORITY_%s' % pkg, alt_name) or \
250 d.getVarFlag('ALTERNATIVE_PRIORITY', alt_name) or \
251 d.getVar('ALTERNATIVE_PRIORITY_%s' % pkg) or \
252 d.getVar('ALTERNATIVE_PRIORITY')
253
254 # This shouldn't trigger, as it should have been resolved earlier!
255 if alt_link == alt_target:
256 bb.note('alt_link == alt_target: %s == %s -- correcting, this should not happen!' % (alt_link, alt_target))
257 alt_target = '%s.%s' % (alt_target, pn)
258
259 if not os.path.lexists('%s/%s' % (pkgdest, alt_target)):
260 bb.warn('%s: NOT adding alternative provide %s: %s does not exist' % (pn, alt_link, alt_target))
261 continue
262
263 alt_target = os.path.normpath(alt_target)
264 updates.append( (alt_name, alt_link, alt_target, alt_priority) )
265
266 return updates
267
268PACKAGESPLITFUNCS:prepend = "populate_packages_updatealternatives "
269
270python populate_packages_updatealternatives () {
271 if not update_alternatives_enabled(d):
272 return
273
274 # Do actual update alternatives processing
275 for pkg in (d.getVar('PACKAGES') or "").split():
276 # Create post install/removal scripts
277 alt_setup_links = ""
278 alt_remove_links = ""
279 updates = update_alternatives_alt_targets(d, pkg)
280 for alt_name, alt_link, alt_target, alt_priority in updates:
281 alt_setup_links += '\tupdate-alternatives --install %s %s %s %s\n' % (alt_link, alt_name, alt_target, alt_priority)
282 alt_remove_links += '\tupdate-alternatives --remove %s %s\n' % (alt_name, alt_target)
283
284 if alt_setup_links:
285 # RDEPENDS setup
286 provider = d.getVar('VIRTUAL-RUNTIME_update-alternatives')
287 if provider:
288 #bb.note('adding runtime requirement for update-alternatives for %s' % pkg)
289 d.appendVar('RDEPENDS:%s' % pkg, ' ' + d.getVar('MLPREFIX', False) + provider)
290
291 bb.note('adding update-alternatives calls to postinst/prerm for %s' % pkg)
292 bb.note('%s' % alt_setup_links)
293 postinst = d.getVar('pkg_postinst:%s' % pkg)
294 if postinst:
295 postinst = alt_setup_links + postinst
296 else:
297 postinst = '#!/bin/sh\n' + alt_setup_links
298 d.setVar('pkg_postinst:%s' % pkg, postinst)
299
300 bb.note('%s' % alt_remove_links)
301 prerm = d.getVar('pkg_prerm:%s' % pkg) or '#!/bin/sh\n'
302 prerm += alt_remove_links
303 d.setVar('pkg_prerm:%s' % pkg, prerm)
304}
305
306python package_do_filedeps:append () {
307 if update_alternatives_enabled(d):
308 apply_update_alternative_provides(d)
309}
310
311def apply_update_alternative_provides(d):
312 pn = d.getVar('BPN')
313 pkgdest = d.getVar('PKGDEST')
314
315 for pkg in d.getVar('PACKAGES').split():
316 for alt_name in (d.getVar('ALTERNATIVE:%s' % pkg) or "").split():
317 alt_link = d.getVarFlag('ALTERNATIVE_LINK_NAME', alt_name)
318 alt_target = d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, alt_name) or d.getVarFlag('ALTERNATIVE_TARGET', alt_name)
319 alt_target = alt_target or d.getVar('ALTERNATIVE_TARGET_%s' % pkg) or d.getVar('ALTERNATIVE_TARGET') or alt_link
320
321 if alt_link == alt_target:
322 bb.warn('%s: alt_link == alt_target: %s == %s' % (pn, alt_link, alt_target))
323 alt_target = '%s.%s' % (alt_target, pn)
324
325 if not os.path.lexists('%s/%s/%s' % (pkgdest, pkg, alt_target)):
326 continue
327
328 # Add file provide
329 trans_target = oe.package.file_translate(alt_target)
330 d.appendVar('FILERPROVIDES:%s:%s' % (trans_target, pkg), " " + alt_link)
331 if not trans_target in (d.getVar('FILERPROVIDESFLIST:%s' % pkg) or ""):
332 d.appendVar('FILERPROVIDESFLIST:%s' % pkg, " " + trans_target)
333
diff --git a/meta/classes/update-rc.d.bbclass b/meta/classes/update-rc.d.bbclass
deleted file mode 100644
index cb2aaba57c..0000000000
--- a/meta/classes/update-rc.d.bbclass
+++ /dev/null
@@ -1,129 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7UPDATERCPN ?= "${PN}"
8
9DEPENDS:append:class-target = "${@bb.utils.contains('DISTRO_FEATURES', 'sysvinit', ' update-rc.d initscripts', '', d)}"
10
11UPDATERCD = "update-rc.d"
12UPDATERCD:class-cross = ""
13UPDATERCD:class-native = ""
14UPDATERCD:class-nativesdk = ""
15
16INITSCRIPT_PARAMS ?= "defaults"
17
18INIT_D_DIR = "${sysconfdir}/init.d"
19
20def use_updatercd(d):
21 # If the distro supports both sysvinit and systemd, and the current recipe
22 # supports systemd, only call update-rc.d on rootfs creation or if systemd
23 # is not running. That's because systemctl enable/disable will already call
24 # update-rc.d if it detects initscripts.
25 if bb.utils.contains('DISTRO_FEATURES', 'systemd', True, False, d) and bb.data.inherits_class('systemd', d):
26 return '[ -n "$D" -o ! -d /run/systemd/system ]'
27 return 'true'
28
29PACKAGE_WRITE_DEPS += "update-rc.d-native"
30
31updatercd_postinst() {
32if ${@use_updatercd(d)} && type update-rc.d >/dev/null 2>/dev/null; then
33 if [ -n "$D" ]; then
34 OPT="-r $D"
35 else
36 OPT="-s"
37 fi
38 update-rc.d $OPT ${INITSCRIPT_NAME} ${INITSCRIPT_PARAMS}
39fi
40}
41
42updatercd_prerm() {
43if ${@use_updatercd(d)} && [ -z "$D" -a -x "${INIT_D_DIR}/${INITSCRIPT_NAME}" ]; then
44 ${INIT_D_DIR}/${INITSCRIPT_NAME} stop || :
45fi
46}
47
48updatercd_postrm() {
49if ${@use_updatercd(d)} && type update-rc.d >/dev/null 2>/dev/null; then
50 if [ -n "$D" ]; then
51 OPT="-f -r $D"
52 else
53 OPT="-f"
54 fi
55 update-rc.d $OPT ${INITSCRIPT_NAME} remove
56fi
57}
58
59
60def update_rc_after_parse(d):
61 if d.getVar('INITSCRIPT_PACKAGES', False) == None:
62 if d.getVar('INITSCRIPT_NAME', False) == None:
63 bb.fatal("%s inherits update-rc.d but doesn't set INITSCRIPT_NAME" % d.getVar('FILE', False))
64 if d.getVar('INITSCRIPT_PARAMS', False) == None:
65 bb.fatal("%s inherits update-rc.d but doesn't set INITSCRIPT_PARAMS" % d.getVar('FILE', False))
66
67python __anonymous() {
68 update_rc_after_parse(d)
69}
70
71PACKAGESPLITFUNCS:prepend = "${@bb.utils.contains('DISTRO_FEATURES', 'sysvinit', 'populate_packages_updatercd ', '', d)}"
72PACKAGESPLITFUNCS:remove:class-nativesdk = "populate_packages_updatercd "
73
74populate_packages_updatercd[vardeps] += "updatercd_prerm updatercd_postrm updatercd_postinst"
75populate_packages_updatercd[vardepsexclude] += "OVERRIDES"
76
77python populate_packages_updatercd () {
78 def update_rcd_auto_depend(pkg):
79 import subprocess
80 import os
81 path = d.expand("${D}${INIT_D_DIR}/${INITSCRIPT_NAME}")
82 if not os.path.exists(path):
83 return
84 statement = "grep -q -w '/etc/init.d/functions' %s" % path
85 if subprocess.call(statement, shell=True) == 0:
86 mlprefix = d.getVar('MLPREFIX') or ""
87 d.appendVar('RDEPENDS:' + pkg, ' %sinitd-functions' % (mlprefix))
88
89 def update_rcd_package(pkg):
90 bb.debug(1, 'adding update-rc.d calls to postinst/prerm/postrm for %s' % pkg)
91
92 localdata = bb.data.createCopy(d)
93 overrides = localdata.getVar("OVERRIDES")
94 localdata.setVar("OVERRIDES", "%s:%s" % (pkg, overrides))
95
96 update_rcd_auto_depend(pkg)
97
98 postinst = d.getVar('pkg_postinst:%s' % pkg)
99 if not postinst:
100 postinst = '#!/bin/sh\n'
101 postinst += localdata.getVar('updatercd_postinst')
102 d.setVar('pkg_postinst:%s' % pkg, postinst)
103
104 prerm = d.getVar('pkg_prerm:%s' % pkg)
105 if not prerm:
106 prerm = '#!/bin/sh\n'
107 prerm += localdata.getVar('updatercd_prerm')
108 d.setVar('pkg_prerm:%s' % pkg, prerm)
109
110 postrm = d.getVar('pkg_postrm:%s' % pkg)
111 if not postrm:
112 postrm = '#!/bin/sh\n'
113 postrm += localdata.getVar('updatercd_postrm')
114 d.setVar('pkg_postrm:%s' % pkg, postrm)
115
116 d.appendVar('RRECOMMENDS:' + pkg, " ${MLPREFIX}${UPDATERCD}")
117
118 # Check that this class isn't being inhibited (generally, by
119 # systemd.bbclass) before doing any work.
120 if not d.getVar("INHIBIT_UPDATERCD_BBCLASS"):
121 pkgs = d.getVar('INITSCRIPT_PACKAGES')
122 if pkgs == None:
123 pkgs = d.getVar('UPDATERCPN')
124 packages = (d.getVar('PACKAGES') or "").split()
125 if not pkgs in packages and packages != []:
126 pkgs = packages[0]
127 for pkg in pkgs.split():
128 update_rcd_package(pkg)
129}
diff --git a/meta/classes/upstream-version-is-even.bbclass b/meta/classes/upstream-version-is-even.bbclass
deleted file mode 100644
index 19587cb12c..0000000000
--- a/meta/classes/upstream-version-is-even.bbclass
+++ /dev/null
@@ -1,11 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# This class ensures that the upstream version check only
8# accepts even minor versions (i.e. 3.0.x, 3.2.x, 3.4.x, etc.)
9# This scheme is used by Gnome and a number of other projects
10# to signify stable releases vs development releases.
11UPSTREAM_CHECK_REGEX = "[^\d\.](?P<pver>\d+\.(\d*[02468])+(\.\d+)+)\.tar"
diff --git a/meta/classes/utility-tasks.bbclass b/meta/classes/utility-tasks.bbclass
deleted file mode 100644
index ae2da330b8..0000000000
--- a/meta/classes/utility-tasks.bbclass
+++ /dev/null
@@ -1,60 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7addtask listtasks
8do_listtasks[nostamp] = "1"
9python do_listtasks() {
10 taskdescs = {}
11 maxlen = 0
12 for e in d.keys():
13 if d.getVarFlag(e, 'task'):
14 maxlen = max(maxlen, len(e))
15 if e.endswith('_setscene'):
16 desc = "%s (setscene version)" % (d.getVarFlag(e[:-9], 'doc') or '')
17 else:
18 desc = d.getVarFlag(e, 'doc') or ''
19 taskdescs[e] = desc
20
21 tasks = sorted(taskdescs.keys())
22 for taskname in tasks:
23 bb.plain("%s %s" % (taskname.ljust(maxlen), taskdescs[taskname]))
24}
25
26CLEANFUNCS ?= ""
27
28T:task-clean = "${LOG_DIR}/cleanlogs/${PN}"
29addtask clean
30do_clean[nostamp] = "1"
31python do_clean() {
32 """clear the build and temp directories"""
33 dir = d.expand("${WORKDIR}")
34 bb.note("Removing " + dir)
35 oe.path.remove(dir)
36
37 dir = "%s.*" % d.getVar('STAMP')
38 bb.note("Removing " + dir)
39 oe.path.remove(dir)
40
41 for f in (d.getVar('CLEANFUNCS') or '').split():
42 bb.build.exec_func(f, d)
43}
44
45addtask checkuri
46do_checkuri[nostamp] = "1"
47do_checkuri[network] = "1"
48python do_checkuri() {
49 src_uri = (d.getVar('SRC_URI') or "").split()
50 if len(src_uri) == 0:
51 return
52
53 try:
54 fetcher = bb.fetch2.Fetch(src_uri, d)
55 fetcher.checkstatus()
56 except bb.fetch2.BBFetchException as e:
57 bb.fatal(str(e))
58}
59
60
diff --git a/meta/classes/utils.bbclass b/meta/classes/utils.bbclass
deleted file mode 100644
index 8d797ff126..0000000000
--- a/meta/classes/utils.bbclass
+++ /dev/null
@@ -1,369 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7oe_soinstall() {
8 # Purpose: Install shared library file and
9 # create the necessary links
10 # Example: oe_soinstall libfoo.so.1.2.3 ${D}${libdir}
11 libname=`basename $1`
12 case "$libname" in
13 *.so)
14 bbfatal "oe_soinstall: Shared library must haved versioned filename (e.g. libfoo.so.1.2.3)"
15 ;;
16 esac
17 install -m 755 $1 $2/$libname
18 sonamelink=`${HOST_PREFIX}readelf -d $1 |grep 'Library soname:' |sed -e 's/.*\[\(.*\)\].*/\1/'`
19 if [ -z $sonamelink ]; then
20 bbfatal "oe_soinstall: $libname is missing ELF tag 'SONAME'."
21 fi
22 solink=`echo $libname | sed -e 's/\.so\..*/.so/'`
23 ln -sf $libname $2/$sonamelink
24 ln -sf $libname $2/$solink
25}
26
27oe_libinstall() {
28 # Purpose: Install a library, in all its forms
29 # Example
30 #
31 # oe_libinstall libltdl ${STAGING_LIBDIR}/
32 # oe_libinstall -C src/libblah libblah ${D}/${libdir}/
33 dir=""
34 libtool=""
35 silent=""
36 require_static=""
37 require_shared=""
38 while [ "$#" -gt 0 ]; do
39 case "$1" in
40 -C)
41 shift
42 dir="$1"
43 ;;
44 -s)
45 silent=1
46 ;;
47 -a)
48 require_static=1
49 ;;
50 -so)
51 require_shared=1
52 ;;
53 -*)
54 bbfatal "oe_libinstall: unknown option: $1"
55 ;;
56 *)
57 break;
58 ;;
59 esac
60 shift
61 done
62
63 libname="$1"
64 shift
65 destpath="$1"
66 if [ -z "$destpath" ]; then
67 bbfatal "oe_libinstall: no destination path specified"
68 fi
69
70 __runcmd () {
71 if [ -z "$silent" ]; then
72 echo >&2 "oe_libinstall: $*"
73 fi
74 $*
75 }
76
77 if [ -z "$dir" ]; then
78 dir=`pwd`
79 fi
80
81 dotlai=$libname.lai
82
83 # Sanity check that the libname.lai is unique
84 number_of_files=`(cd $dir; find . -name "$dotlai") | wc -l`
85 if [ $number_of_files -gt 1 ]; then
86 bbfatal "oe_libinstall: $dotlai is not unique in $dir"
87 fi
88
89
90 dir=$dir`(cd $dir;find . -name "$dotlai") | sed "s/^\.//;s/\/$dotlai\$//;q"`
91 olddir=`pwd`
92 __runcmd cd $dir
93
94 lafile=$libname.la
95
96 # If such file doesn't exist, try to cut version suffix
97 if [ ! -f "$lafile" ]; then
98 libname1=`echo "$libname" | sed 's/-[0-9.]*$//'`
99 lafile1=$libname.la
100 if [ -f "$lafile1" ]; then
101 libname=$libname1
102 lafile=$lafile1
103 fi
104 fi
105
106 if [ -f "$lafile" ]; then
107 # libtool archive
108 eval `cat $lafile|grep "^library_names="`
109 libtool=1
110 else
111 library_names="$libname.so* $libname.dll.a $libname.*.dylib"
112 fi
113
114 __runcmd install -d $destpath/
115 dota=$libname.a
116 if [ -f "$dota" -o -n "$require_static" ]; then
117 rm -f $destpath/$dota
118 __runcmd install -m 0644 $dota $destpath/
119 fi
120 if [ -f "$dotlai" -a -n "$libtool" ]; then
121 rm -f $destpath/$libname.la
122 __runcmd install -m 0644 $dotlai $destpath/$libname.la
123 fi
124
125 for name in $library_names; do
126 files=`eval echo $name`
127 for f in $files; do
128 if [ ! -e "$f" ]; then
129 if [ -n "$libtool" ]; then
130 bbfatal "oe_libinstall: $dir/$f not found."
131 fi
132 elif [ -L "$f" ]; then
133 __runcmd cp -P "$f" $destpath/
134 elif [ ! -L "$f" ]; then
135 libfile="$f"
136 rm -f $destpath/$libfile
137 __runcmd install -m 0755 $libfile $destpath/
138 fi
139 done
140 done
141
142 if [ -z "$libfile" ]; then
143 if [ -n "$require_shared" ]; then
144 bbfatal "oe_libinstall: unable to locate shared library"
145 fi
146 elif [ -z "$libtool" ]; then
147 # special case hack for non-libtool .so.#.#.# links
148 baselibfile=`basename "$libfile"`
149 if (echo $baselibfile | grep -qE '^lib.*\.so\.[0-9.]*$'); then
150 sonamelink=`${HOST_PREFIX}readelf -d $libfile |grep 'Library soname:' |sed -e 's/.*\[\(.*\)\].*/\1/'`
151 solink=`echo $baselibfile | sed -e 's/\.so\..*/.so/'`
152 if [ -n "$sonamelink" -a x"$baselibfile" != x"$sonamelink" ]; then
153 __runcmd ln -sf $baselibfile $destpath/$sonamelink
154 fi
155 __runcmd ln -sf $baselibfile $destpath/$solink
156 fi
157 fi
158
159 __runcmd cd "$olddir"
160}
161
162create_cmdline_wrapper () {
163 # Create a wrapper script where commandline options are needed
164 #
165 # These are useful to work around relocation issues, by passing extra options
166 # to a program
167 #
168 # Usage: create_cmdline_wrapper FILENAME <extra-options>
169
170 cmd=$1
171 shift
172
173 echo "Generating wrapper script for $cmd"
174
175 mv $cmd $cmd.real
176 cmdname=`basename $cmd`
177 dirname=`dirname $cmd`
178 cmdoptions=$@
179 if [ "${base_prefix}" != "" ]; then
180 relpath=`python3 -c "import os; print(os.path.relpath('${D}${base_prefix}', '$dirname'))"`
181 cmdoptions=`echo $@ | sed -e "s:${base_prefix}:\\$realdir/$relpath:g"`
182 fi
183 cat <<END >$cmd
184#!/bin/bash
185realpath=\`readlink -fn \$0\`
186realdir=\`dirname \$realpath\`
187exec -a \$realdir/$cmdname \$realdir/$cmdname.real $cmdoptions "\$@"
188END
189 chmod +x $cmd
190}
191
192create_cmdline_shebang_wrapper () {
193 # Create a wrapper script where commandline options are needed
194 #
195 # These are useful to work around shebang relocation issues, where shebangs are too
196 # long or have arguments in them, thus preventing them from using the /usr/bin/env
197 # shebang
198 #
199 # Usage: create_cmdline_wrapper FILENAME <extra-options>
200
201 cmd=$1
202 shift
203
204 echo "Generating wrapper script for $cmd"
205
206 # Strip #! and get remaining interpreter + arg
207 argument="$(sed -ne 's/^#! *//p;q' $cmd)"
208 # strip the shebang from the real script as we do not want it to be usable anyway
209 tail -n +2 $cmd > $cmd.real
210 chown --reference=$cmd $cmd.real
211 chmod --reference=$cmd $cmd.real
212 rm -f $cmd
213 cmdname=$(basename $cmd)
214 dirname=$(dirname $cmd)
215 cmdoptions=$@
216 if [ "${base_prefix}" != "" ]; then
217 relpath=`python3 -c "import os; print(os.path.relpath('${D}${base_prefix}', '$dirname'))"`
218 cmdoptions=`echo $@ | sed -e "s:${base_prefix}:\\$realdir/$relpath:g"`
219 fi
220 cat <<END >$cmd
221#!/usr/bin/env bash
222realpath=\`readlink -fn \$0\`
223realdir=\`dirname \$realpath\`
224exec -a \$realdir/$cmdname $argument \$realdir/$cmdname.real $cmdoptions "\$@"
225END
226 chmod +x $cmd
227}
228
229create_wrapper () {
230 # Create a wrapper script where extra environment variables are needed
231 #
232 # These are useful to work around relocation issues, by setting environment
233 # variables which point to paths in the filesystem.
234 #
235 # Usage: create_wrapper FILENAME [[VAR=VALUE]..]
236
237 cmd=$1
238 shift
239
240 echo "Generating wrapper script for $cmd"
241
242 mv $cmd $cmd.real
243 cmdname=`basename $cmd`
244 dirname=`dirname $cmd`
245 exportstring=$@
246 if [ "${base_prefix}" != "" ]; then
247 relpath=`python3 -c "import os; print(os.path.relpath('${D}${base_prefix}', '$dirname'))"`
248 exportstring=`echo $@ | sed -e "s:${base_prefix}:\\$realdir/$relpath:g"`
249 fi
250 cat <<END >$cmd
251#!/bin/bash
252realpath=\`readlink -fn \$0\`
253realdir=\`dirname \$realpath\`
254export $exportstring
255exec -a "\$0" \$realdir/$cmdname.real "\$@"
256END
257 chmod +x $cmd
258}
259
260# Copy files/directories from $1 to $2 but using hardlinks
261# (preserve symlinks)
262hardlinkdir () {
263 from=$1
264 to=$2
265 (cd $from; find . -print0 | cpio --null -pdlu $to)
266}
267
268
269def check_app_exists(app, d):
270 app = d.expand(app).split()[0].strip()
271 path = d.getVar('PATH')
272 return bool(bb.utils.which(path, app))
273
274def explode_deps(s):
275 return bb.utils.explode_deps(s)
276
277def base_set_filespath(path, d):
278 filespath = []
279 extrapaths = (d.getVar("FILESEXTRAPATHS") or "")
280 # Remove default flag which was used for checking
281 extrapaths = extrapaths.replace("__default:", "")
282 # Don't prepend empty strings to the path list
283 if extrapaths != "":
284 path = extrapaths.split(":") + path
285 # The ":" ensures we have an 'empty' override
286 overrides = (":" + (d.getVar("FILESOVERRIDES") or "")).split(":")
287 overrides.reverse()
288 for o in overrides:
289 for p in path:
290 if p != "":
291 filespath.append(os.path.join(p, o))
292 return ":".join(filespath)
293
294def extend_variants(d, var, extend, delim=':'):
295 """Return a string of all bb class extend variants for the given extend"""
296 variants = []
297 whole = d.getVar(var) or ""
298 for ext in whole.split():
299 eext = ext.split(delim)
300 if len(eext) > 1 and eext[0] == extend:
301 variants.append(eext[1])
302 return " ".join(variants)
303
304def multilib_pkg_extend(d, pkg):
305 variants = (d.getVar("MULTILIB_VARIANTS") or "").split()
306 if not variants:
307 return pkg
308 pkgs = pkg
309 for v in variants:
310 pkgs = pkgs + " " + v + "-" + pkg
311 return pkgs
312
313def get_multilib_datastore(variant, d):
314 return oe.utils.get_multilib_datastore(variant, d)
315
316def all_multilib_tune_values(d, var, unique = True, need_split = True, delim = ' '):
317 """Return a string of all ${var} in all multilib tune configuration"""
318 values = []
319 variants = (d.getVar("MULTILIB_VARIANTS") or "").split() + ['']
320 for item in variants:
321 localdata = get_multilib_datastore(item, d)
322 # We need WORKDIR to be consistent with the original datastore
323 localdata.setVar("WORKDIR", d.getVar("WORKDIR"))
324 value = localdata.getVar(var) or ""
325 if value != "":
326 if need_split:
327 for item in value.split(delim):
328 values.append(item)
329 else:
330 values.append(value)
331 if unique:
332 #we do this to keep order as much as possible
333 ret = []
334 for value in values:
335 if not value in ret:
336 ret.append(value)
337 else:
338 ret = values
339 return " ".join(ret)
340
341def all_multilib_tune_list(vars, d):
342 """
343 Return a list of ${VAR} for each variable VAR in vars from each
344 multilib tune configuration.
345 Is safe to be called from a multilib recipe/context as it can
346 figure out the original tune and remove the multilib overrides.
347 """
348 values = {}
349 for v in vars:
350 values[v] = []
351 values['ml'] = ['']
352
353 variants = (d.getVar("MULTILIB_VARIANTS") or "").split() + ['']
354 for item in variants:
355 localdata = get_multilib_datastore(item, d)
356 values[v].append(localdata.getVar(v))
357 values['ml'].append(item)
358 return values
359all_multilib_tune_list[vardepsexclude] = "OVERRIDES"
360
361# If the user hasn't set up their name/email, set some defaults
362check_git_config() {
363 if ! git config user.email > /dev/null ; then
364 git config --local user.email "${PATCH_GIT_USER_EMAIL}"
365 fi
366 if ! git config user.name > /dev/null ; then
367 git config --local user.name "${PATCH_GIT_USER_NAME}"
368 fi
369}
diff --git a/meta/classes/vala.bbclass b/meta/classes/vala.bbclass
deleted file mode 100644
index 460ddb36f0..0000000000
--- a/meta/classes/vala.bbclass
+++ /dev/null
@@ -1,30 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# Everyone needs vala-native and targets need vala, too,
8# because that is where target builds look for .vapi files.
9#
10VALADEPENDS = ""
11VALADEPENDS:class-target = "vala"
12DEPENDS:append = " vala-native ${VALADEPENDS}"
13
14# Our patched version of Vala looks in STAGING_DATADIR for .vapi files
15export STAGING_DATADIR
16# Upstream Vala >= 0.11 looks in XDG_DATA_DIRS for .vapi files
17export XDG_DATA_DIRS = "${STAGING_DATADIR}:${STAGING_LIBDIR}"
18
19# Package additional files
20FILES:${PN}-dev += "\
21 ${datadir}/vala/vapi/*.vapi \
22 ${datadir}/vala/vapi/*.deps \
23 ${datadir}/gir-1.0 \
24"
25
26# Remove vapigen.m4 that is bundled with tarballs
27# because it does not yet have our cross-compile fixes
28do_configure:prepend() {
29 rm -f ${S}/m4/vapigen.m4
30}
diff --git a/meta/classes/waf.bbclass b/meta/classes/waf.bbclass
deleted file mode 100644
index 5fa0cc4987..0000000000
--- a/meta/classes/waf.bbclass
+++ /dev/null
@@ -1,81 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# avoids build breaks when using no-static-libs.inc
8DISABLE_STATIC = ""
9
10# What Python interpretter to use. Defaults to Python 3 but can be
11# overridden if required.
12WAF_PYTHON ?= "python3"
13
14B = "${WORKDIR}/build"
15do_configure[cleandirs] += "${B}"
16
17EXTRA_OECONF:append = " ${PACKAGECONFIG_CONFARGS}"
18
19EXTRA_OEWAF_BUILD ??= ""
20# In most cases, you want to pass the same arguments to `waf build` and `waf
21# install`, but you can override it if necessary
22EXTRA_OEWAF_INSTALL ??= "${EXTRA_OEWAF_BUILD}"
23
24def waflock_hash(d):
25 # Calculates the hash used for the waf lock file. This should include
26 # all of the user controllable inputs passed to waf configure. Note
27 # that the full paths for ${B} and ${S} are used; this is OK and desired
28 # because a change to either of these should create a unique lock file
29 # to prevent collisions.
30 import hashlib
31 h = hashlib.sha512()
32 def update(name):
33 val = d.getVar(name)
34 if val is not None:
35 h.update(val.encode('utf-8'))
36 update('S')
37 update('B')
38 update('prefix')
39 update('EXTRA_OECONF')
40 return h.hexdigest()
41
42# Use WAFLOCK to specify a separate lock file. The build is already
43# sufficiently isolated by setting the output directory, this ensures that
44# bitbake won't step on toes of any other configured context in the source
45# directory (e.g. if the source is coming from externalsrc and was previously
46# configured elsewhere).
47export WAFLOCK = ".lock-waf_oe_${@waflock_hash(d)}_build"
48BB_BASEHASH_IGNORE_VARS += "WAFLOCK"
49
50python waf_preconfigure() {
51 import subprocess
52 subsrcdir = d.getVar('S')
53 python = d.getVar('WAF_PYTHON')
54 wafbin = os.path.join(subsrcdir, 'waf')
55 try:
56 result = subprocess.check_output([python, wafbin, '--version'], cwd=subsrcdir, stderr=subprocess.STDOUT)
57 version = result.decode('utf-8').split()[1]
58 if bb.utils.vercmp_string_op(version, "1.8.7", ">="):
59 d.setVar("WAF_EXTRA_CONF", "--bindir=${bindir} --libdir=${libdir}")
60 except subprocess.CalledProcessError as e:
61 bb.warn("Unable to execute waf --version, exit code %d. Assuming waf version without bindir/libdir support." % e.returncode)
62 except FileNotFoundError:
63 bb.fatal("waf does not exist in %s" % subsrcdir)
64}
65
66do_configure[prefuncs] += "waf_preconfigure"
67
68waf_do_configure() {
69 (cd ${S} && ${WAF_PYTHON} ./waf configure -o ${B} --prefix=${prefix} ${WAF_EXTRA_CONF} ${EXTRA_OECONF})
70}
71
72do_compile[progress] = "outof:^\[\s*(\d+)/\s*(\d+)\]\s+"
73waf_do_compile() {
74 (cd ${S} && ${WAF_PYTHON} ./waf build ${@oe.utils.parallel_make_argument(d, '-j%d', limit=64)} ${EXTRA_OEWAF_BUILD})
75}
76
77waf_do_install() {
78 (cd ${S} && ${WAF_PYTHON} ./waf install --destdir=${D} ${EXTRA_OEWAF_INSTALL})
79}
80
81EXPORT_FUNCTIONS do_configure do_compile do_install
diff --git a/meta/classes/xmlcatalog.bbclass b/meta/classes/xmlcatalog.bbclass
deleted file mode 100644
index 5826d0a8b5..0000000000
--- a/meta/classes/xmlcatalog.bbclass
+++ /dev/null
@@ -1,32 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7DEPENDS = "libxml2-native"
8
9# A whitespace-separated list of XML catalogs to be registered, for example
10# "${sysconfdir}/xml/docbook-xml.xml".
11XMLCATALOGS ?= ""
12
13SYSROOT_PREPROCESS_FUNCS:append = " xmlcatalog_sstate_postinst"
14
15xmlcatalog_complete() {
16 ROOTCATALOG="${STAGING_ETCDIR_NATIVE}/xml/catalog"
17 if [ ! -f $ROOTCATALOG ]; then
18 mkdir --parents $(dirname $ROOTCATALOG)
19 xmlcatalog --noout --create $ROOTCATALOG
20 fi
21 for CATALOG in ${XMLCATALOGS}; do
22 xmlcatalog --noout --add nextCatalog unused file://$CATALOG $ROOTCATALOG
23 done
24}
25
26xmlcatalog_sstate_postinst() {
27 mkdir -p ${SYSROOT_DESTDIR}${bindir}
28 dest=${SYSROOT_DESTDIR}${bindir}/postinst-${PN}-xmlcatalog
29 echo '#!/bin/sh' > $dest
30 echo '${xmlcatalog_complete}' >> $dest
31 chmod 0755 $dest
32}