summaryrefslogtreecommitdiffstats
path: root/meta/classes-recipe
diff options
context:
space:
mode:
Diffstat (limited to 'meta/classes-recipe')
-rw-r--r--meta/classes-recipe/allarch.bbclass71
-rw-r--r--meta/classes-recipe/autotools-brokensep.bbclass11
-rw-r--r--meta/classes-recipe/autotools.bbclass260
-rw-r--r--meta/classes-recipe/baremetal-image.bbclass128
-rw-r--r--meta/classes-recipe/bash-completion.bbclass13
-rw-r--r--meta/classes-recipe/bin_package.bbclass42
-rw-r--r--meta/classes-recipe/binconfig-disabled.bbclass36
-rw-r--r--meta/classes-recipe/binconfig.bbclass60
-rw-r--r--meta/classes-recipe/cargo.bbclass97
-rw-r--r--meta/classes-recipe/cargo_common.bbclass139
-rw-r--r--meta/classes-recipe/cmake.bbclass223
-rw-r--r--meta/classes-recipe/cml1.bbclass107
-rw-r--r--meta/classes-recipe/compress_doc.bbclass269
-rw-r--r--meta/classes-recipe/core-image.bbclass81
-rw-r--r--meta/classes-recipe/cpan-base.bbclass33
-rw-r--r--meta/classes-recipe/cpan.bbclass71
-rw-r--r--meta/classes-recipe/cpan_build.bbclass47
-rw-r--r--meta/classes-recipe/cross-canadian.bbclass200
-rw-r--r--meta/classes-recipe/cross.bbclass103
-rw-r--r--meta/classes-recipe/crosssdk.bbclass57
-rw-r--r--meta/classes-recipe/deploy.bbclass18
-rw-r--r--meta/classes-recipe/devicetree.bbclass154
-rw-r--r--meta/classes-recipe/devupstream.bbclass61
-rw-r--r--meta/classes-recipe/distro_features_check.bbclass13
-rw-r--r--meta/classes-recipe/distrooverrides.bbclass38
-rw-r--r--meta/classes-recipe/dos2unix.bbclass20
-rw-r--r--meta/classes-recipe/externalsrc.bbclass269
-rw-r--r--meta/classes-recipe/features_check.bbclass57
-rw-r--r--meta/classes-recipe/fontcache.bbclass63
-rw-r--r--meta/classes-recipe/fs-uuid.bbclass30
-rw-r--r--meta/classes-recipe/gconf.bbclass77
-rw-r--r--meta/classes-recipe/gettext.bbclass28
-rw-r--r--meta/classes-recipe/gi-docgen.bbclass30
-rw-r--r--meta/classes-recipe/gio-module-cache.bbclass44
-rw-r--r--meta/classes-recipe/glide.bbclass15
-rw-r--r--meta/classes-recipe/gnomebase.bbclass37
-rw-r--r--meta/classes-recipe/go-mod.bbclass26
-rw-r--r--meta/classes-recipe/go-ptest.bbclass60
-rw-r--r--meta/classes-recipe/go.bbclass170
-rw-r--r--meta/classes-recipe/goarch.bbclass122
-rw-r--r--meta/classes-recipe/gobject-introspection-data.bbclass18
-rw-r--r--meta/classes-recipe/gobject-introspection.bbclass61
-rw-r--r--meta/classes-recipe/grub-efi-cfg.bbclass122
-rw-r--r--meta/classes-recipe/grub-efi.bbclass14
-rw-r--r--meta/classes-recipe/gsettings.bbclass48
-rw-r--r--meta/classes-recipe/gtk-doc.bbclass89
-rw-r--r--meta/classes-recipe/gtk-icon-cache.bbclass95
-rw-r--r--meta/classes-recipe/gtk-immodules-cache.bbclass82
-rw-r--r--meta/classes-recipe/image-artifact-names.bbclass28
-rw-r--r--meta/classes-recipe/image-combined-dbg.bbclass15
-rw-r--r--meta/classes-recipe/image-container.bbclass27
-rw-r--r--meta/classes-recipe/image-live.bbclass265
-rw-r--r--meta/classes-recipe/image-postinst-intercepts.bbclass29
-rw-r--r--meta/classes-recipe/image.bbclass684
-rw-r--r--meta/classes-recipe/image_types.bbclass355
-rw-r--r--meta/classes-recipe/image_types_wic.bbclass190
-rw-r--r--meta/classes-recipe/kernel-arch.bbclass74
-rw-r--r--meta/classes-recipe/kernel-artifact-names.bbclass37
-rw-r--r--meta/classes-recipe/kernel-devicetree.bbclass119
-rw-r--r--meta/classes-recipe/kernel-fitimage.bbclass803
-rw-r--r--meta/classes-recipe/kernel-grub.bbclass111
-rw-r--r--meta/classes-recipe/kernel-module-split.bbclass197
-rw-r--r--meta/classes-recipe/kernel-uboot.bbclass49
-rw-r--r--meta/classes-recipe/kernel-uimage.bbclass41
-rw-r--r--meta/classes-recipe/kernel-yocto.bbclass732
-rw-r--r--meta/classes-recipe/kernel.bbclass821
-rw-r--r--meta/classes-recipe/kernelsrc.bbclass16
-rw-r--r--meta/classes-recipe/lib_package.bbclass12
-rw-r--r--meta/classes-recipe/libc-package.bbclass390
-rw-r--r--meta/classes-recipe/license_image.bbclass295
-rw-r--r--meta/classes-recipe/linux-dummy.bbclass31
-rw-r--r--meta/classes-recipe/linux-kernel-base.bbclass47
-rw-r--r--meta/classes-recipe/linuxloader.bbclass82
-rw-r--r--meta/classes-recipe/live-vm-common.bbclass100
-rw-r--r--meta/classes-recipe/manpages.bbclass51
-rw-r--r--meta/classes-recipe/meson-routines.bbclass57
-rw-r--r--meta/classes-recipe/meson.bbclass179
-rw-r--r--meta/classes-recipe/mime-xdg.bbclass78
-rw-r--r--meta/classes-recipe/mime.bbclass76
-rw-r--r--meta/classes-recipe/module-base.bbclass27
-rw-r--r--meta/classes-recipe/module.bbclass80
-rw-r--r--meta/classes-recipe/multilib_header.bbclass58
-rw-r--r--meta/classes-recipe/multilib_script.bbclass40
-rw-r--r--meta/classes-recipe/native.bbclass236
-rw-r--r--meta/classes-recipe/nativesdk.bbclass124
-rw-r--r--meta/classes-recipe/nopackages.bbclass19
-rw-r--r--meta/classes-recipe/npm.bbclass340
-rw-r--r--meta/classes-recipe/packagegroup.bbclass67
-rw-r--r--meta/classes-recipe/perl-version.bbclass72
-rw-r--r--meta/classes-recipe/perlnative.bbclass9
-rw-r--r--meta/classes-recipe/pixbufcache.bbclass69
-rw-r--r--meta/classes-recipe/pkgconfig.bbclass8
-rw-r--r--meta/classes-recipe/populate_sdk.bbclass13
-rw-r--r--meta/classes-recipe/populate_sdk_base.bbclass384
-rw-r--r--meta/classes-recipe/populate_sdk_ext.bbclass842
-rw-r--r--meta/classes-recipe/ptest-gnome.bbclass14
-rw-r--r--meta/classes-recipe/ptest-perl.bbclass36
-rw-r--r--meta/classes-recipe/ptest.bbclass142
-rw-r--r--meta/classes-recipe/pypi.bbclass34
-rw-r--r--meta/classes-recipe/python3-dir.bbclass11
-rw-r--r--meta/classes-recipe/python3native.bbclass30
-rw-r--r--meta/classes-recipe/python3targetconfig.bbclass35
-rw-r--r--meta/classes-recipe/python_flit_core.bbclass14
-rw-r--r--meta/classes-recipe/python_hatchling.bbclass9
-rw-r--r--meta/classes-recipe/python_pep517.bbclass60
-rw-r--r--meta/classes-recipe/python_poetry_core.bbclass9
-rw-r--r--meta/classes-recipe/python_pyo3.bbclass36
-rw-r--r--meta/classes-recipe/python_setuptools3_rust.bbclass17
-rw-r--r--meta/classes-recipe/python_setuptools_build_meta.bbclass9
-rw-r--r--meta/classes-recipe/qemu.bbclass77
-rw-r--r--meta/classes-recipe/qemuboot.bbclass171
-rw-r--r--meta/classes-recipe/rootfs-postcommands.bbclass440
-rw-r--r--meta/classes-recipe/rootfs_deb.bbclass41
-rw-r--r--meta/classes-recipe/rootfs_ipk.bbclass44
-rw-r--r--meta/classes-recipe/rootfs_rpm.bbclass45
-rw-r--r--meta/classes-recipe/rootfsdebugfiles.bbclass47
-rw-r--r--meta/classes-recipe/rust-bin.bbclass154
-rw-r--r--meta/classes-recipe/rust-common.bbclass177
-rw-r--r--meta/classes-recipe/rust-target-config.bbclass391
-rw-r--r--meta/classes-recipe/rust.bbclass51
-rw-r--r--meta/classes-recipe/scons.bbclass34
-rw-r--r--meta/classes-recipe/setuptools3-base.bbclass37
-rw-r--r--meta/classes-recipe/setuptools3.bbclass38
-rw-r--r--meta/classes-recipe/setuptools3_legacy.bbclass84
-rw-r--r--meta/classes-recipe/siteinfo.bbclass232
-rw-r--r--meta/classes-recipe/syslinux.bbclass194
-rw-r--r--meta/classes-recipe/systemd-boot-cfg.bbclass77
-rw-r--r--meta/classes-recipe/systemd-boot.bbclass35
-rw-r--r--meta/classes-recipe/systemd.bbclass239
-rw-r--r--meta/classes-recipe/testimage.bbclass508
-rw-r--r--meta/classes-recipe/testsdk.bbclass52
-rw-r--r--meta/classes-recipe/texinfo.bbclass24
-rw-r--r--meta/classes-recipe/toolchain-scripts-base.bbclass17
-rw-r--r--meta/classes-recipe/toolchain-scripts.bbclass236
-rw-r--r--meta/classes-recipe/uboot-config.bbclass133
-rw-r--r--meta/classes-recipe/uboot-extlinux-config.bbclass158
-rw-r--r--meta/classes-recipe/uboot-sign.bbclass505
-rw-r--r--meta/classes-recipe/update-alternatives.bbclass333
-rw-r--r--meta/classes-recipe/update-rc.d.bbclass129
-rw-r--r--meta/classes-recipe/upstream-version-is-even.bbclass11
-rw-r--r--meta/classes-recipe/vala.bbclass30
-rw-r--r--meta/classes-recipe/waf.bbclass81
-rw-r--r--meta/classes-recipe/xmlcatalog.bbclass32
143 files changed, 17601 insertions, 0 deletions
diff --git a/meta/classes-recipe/allarch.bbclass b/meta/classes-recipe/allarch.bbclass
new file mode 100644
index 0000000000..9138f40ed8
--- /dev/null
+++ b/meta/classes-recipe/allarch.bbclass
@@ -0,0 +1,71 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7#
8# This class is used for architecture independent recipes/data files (usually scripts)
9#
10
11python allarch_package_arch_handler () {
12 if bb.data.inherits_class("native", d) or bb.data.inherits_class("nativesdk", d) \
13 or bb.data.inherits_class("crosssdk", d):
14 return
15
16 variants = d.getVar("MULTILIB_VARIANTS")
17 if not variants:
18 d.setVar("PACKAGE_ARCH", "all" )
19}
20
21addhandler allarch_package_arch_handler
22allarch_package_arch_handler[eventmask] = "bb.event.RecipePreFinalise"
23
24python () {
25 # Allow this class to be included but overridden - only set
26 # the values if we're still "all" package arch.
27 if d.getVar("PACKAGE_ARCH") == "all":
28 # No need for virtual/libc or a cross compiler
29 d.setVar("INHIBIT_DEFAULT_DEPS","1")
30
31 # Set these to a common set of values, we shouldn't be using them other that for WORKDIR directory
32 # naming anyway
33 d.setVar("baselib", "lib")
34 d.setVar("TARGET_ARCH", "allarch")
35 d.setVar("TARGET_OS", "linux")
36 d.setVar("TARGET_CC_ARCH", "none")
37 d.setVar("TARGET_LD_ARCH", "none")
38 d.setVar("TARGET_AS_ARCH", "none")
39 d.setVar("TARGET_FPU", "")
40 d.setVar("TARGET_PREFIX", "")
41 # Expand PACKAGE_EXTRA_ARCHS since the staging code needs this
42 # (this removes any dependencies from the hash perspective)
43 d.setVar("PACKAGE_EXTRA_ARCHS", d.getVar("PACKAGE_EXTRA_ARCHS"))
44 d.setVar("SDK_ARCH", "none")
45 d.setVar("SDK_CC_ARCH", "none")
46 d.setVar("TARGET_CPPFLAGS", "none")
47 d.setVar("TARGET_CFLAGS", "none")
48 d.setVar("TARGET_CXXFLAGS", "none")
49 d.setVar("TARGET_LDFLAGS", "none")
50 d.setVar("POPULATESYSROOTDEPS", "")
51
52 # Avoid this being unnecessarily different due to nuances of
53 # the target machine that aren't important for "all" arch
54 # packages.
55 d.setVar("LDFLAGS", "")
56
57 # No need to do shared library processing or debug symbol handling
58 d.setVar("EXCLUDE_FROM_SHLIBS", "1")
59 d.setVar("INHIBIT_PACKAGE_DEBUG_SPLIT", "1")
60 d.setVar("INHIBIT_PACKAGE_STRIP", "1")
61
62 # These multilib values shouldn't change allarch packages so exclude them
63 d.appendVarFlag("emit_pkgdata", "vardepsexclude", " MULTILIB_VARIANTS")
64 d.appendVarFlag("write_specfile", "vardepsexclude", " MULTILIBS")
65 d.appendVarFlag("do_package", "vardepsexclude", " package_do_shlibs")
66 elif bb.data.inherits_class('packagegroup', d) and not bb.data.inherits_class('nativesdk', d):
67 bb.error("Please ensure recipe %s sets PACKAGE_ARCH before inherit packagegroup" % d.getVar("FILE"))
68}
69
70def qemu_wrapper_cmdline(data, rootfs_path, library_paths):
71 return 'false'
diff --git a/meta/classes-recipe/autotools-brokensep.bbclass b/meta/classes-recipe/autotools-brokensep.bbclass
new file mode 100644
index 0000000000..a0fb4b7b50
--- /dev/null
+++ b/meta/classes-recipe/autotools-brokensep.bbclass
@@ -0,0 +1,11 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# Autotools class for recipes where separate build dir doesn't work
8# Ideally we should fix software so it does work. Standard autotools supports
9# this.
10inherit autotools
11B = "${S}"
diff --git a/meta/classes-recipe/autotools.bbclass b/meta/classes-recipe/autotools.bbclass
new file mode 100644
index 0000000000..a4c1c4be41
--- /dev/null
+++ b/meta/classes-recipe/autotools.bbclass
@@ -0,0 +1,260 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7def get_autotools_dep(d):
8 if d.getVar('INHIBIT_AUTOTOOLS_DEPS'):
9 return ''
10
11 pn = d.getVar('PN')
12 deps = ''
13
14 if pn in ['autoconf-native', 'automake-native']:
15 return deps
16 deps += 'autoconf-native automake-native '
17
18 if not pn in ['libtool', 'libtool-native'] and not pn.endswith("libtool-cross"):
19 deps += 'libtool-native '
20 if not bb.data.inherits_class('native', d) \
21 and not bb.data.inherits_class('nativesdk', d) \
22 and not bb.data.inherits_class('cross', d) \
23 and not d.getVar('INHIBIT_DEFAULT_DEPS'):
24 deps += 'libtool-cross '
25
26 return deps
27
28
29DEPENDS:prepend = "${@get_autotools_dep(d)} "
30
31inherit siteinfo
32
33# Space separated list of shell scripts with variables defined to supply test
34# results for autoconf tests we cannot run at build time.
35# The value of this variable is filled in in a prefunc because it depends on
36# the contents of the sysroot.
37export CONFIG_SITE
38
39acpaths ?= "default"
40EXTRA_AUTORECONF = "--exclude=autopoint --exclude=gtkdocize"
41
42export lt_cv_sys_lib_dlsearch_path_spec = "${libdir} ${base_libdir}"
43
44# When building tools for use at build-time it's recommended for the build
45# system to use these variables when cross-compiling.
46# (http://sources.redhat.com/autobook/autobook/autobook_270.html)
47export CPP_FOR_BUILD = "${BUILD_CPP}"
48export CPPFLAGS_FOR_BUILD = "${BUILD_CPPFLAGS}"
49
50export CC_FOR_BUILD = "${BUILD_CC}"
51export CFLAGS_FOR_BUILD = "${BUILD_CFLAGS}"
52
53export CXX_FOR_BUILD = "${BUILD_CXX}"
54export CXXFLAGS_FOR_BUILD="${BUILD_CXXFLAGS}"
55
56export LD_FOR_BUILD = "${BUILD_LD}"
57export LDFLAGS_FOR_BUILD = "${BUILD_LDFLAGS}"
58
59def append_libtool_sysroot(d):
60 # Only supply libtool sysroot option for non-native packages
61 if not bb.data.inherits_class('native', d):
62 return '--with-libtool-sysroot=${STAGING_DIR_HOST}'
63 return ""
64
65CONFIGUREOPTS = " --build=${BUILD_SYS} \
66 --host=${HOST_SYS} \
67 --target=${TARGET_SYS} \
68 --prefix=${prefix} \
69 --exec_prefix=${exec_prefix} \
70 --bindir=${bindir} \
71 --sbindir=${sbindir} \
72 --libexecdir=${libexecdir} \
73 --datadir=${datadir} \
74 --sysconfdir=${sysconfdir} \
75 --sharedstatedir=${sharedstatedir} \
76 --localstatedir=${localstatedir} \
77 --libdir=${libdir} \
78 --includedir=${includedir} \
79 --oldincludedir=${oldincludedir} \
80 --infodir=${infodir} \
81 --mandir=${mandir} \
82 --disable-silent-rules \
83 ${CONFIGUREOPT_DEPTRACK} \
84 ${@append_libtool_sysroot(d)}"
85CONFIGUREOPT_DEPTRACK ?= "--disable-dependency-tracking"
86
87CACHED_CONFIGUREVARS ?= ""
88
89AUTOTOOLS_SCRIPT_PATH ?= "${S}"
90CONFIGURE_SCRIPT ?= "${AUTOTOOLS_SCRIPT_PATH}/configure"
91
92AUTOTOOLS_AUXDIR ?= "${AUTOTOOLS_SCRIPT_PATH}"
93
94oe_runconf () {
95 # Use relative path to avoid buildpaths in files
96 cfgscript_name="`basename ${CONFIGURE_SCRIPT}`"
97 cfgscript=`python3 -c "import os; print(os.path.relpath(os.path.dirname('${CONFIGURE_SCRIPT}'), '.'))"`/$cfgscript_name
98 if [ -x "$cfgscript" ] ; then
99 bbnote "Running $cfgscript ${CONFIGUREOPTS} ${EXTRA_OECONF} $@"
100 if ! CONFIG_SHELL=${CONFIG_SHELL-/bin/bash} ${CACHED_CONFIGUREVARS} $cfgscript ${CONFIGUREOPTS} ${EXTRA_OECONF} "$@"; then
101 bbnote "The following config.log files may provide further information."
102 bbnote `find ${B} -ignore_readdir_race -type f -name config.log`
103 bbfatal_log "configure failed"
104 fi
105 else
106 bbfatal "no configure script found at $cfgscript"
107 fi
108}
109
110CONFIGURESTAMPFILE = "${WORKDIR}/configure.sstate"
111
112autotools_preconfigure() {
113 if [ -n "${CONFIGURESTAMPFILE}" -a -e "${CONFIGURESTAMPFILE}" ]; then
114 if [ "`cat ${CONFIGURESTAMPFILE}`" != "${BB_TASKHASH}" ]; then
115 if [ "${S}" != "${B}" ]; then
116 echo "Previously configured separate build directory detected, cleaning ${B}"
117 rm -rf ${B}
118 mkdir -p ${B}
119 else
120 # At least remove the .la files since automake won't automatically
121 # regenerate them even if CFLAGS/LDFLAGS are different
122 cd ${S}
123 if [ "${CLEANBROKEN}" != "1" -a \( -e Makefile -o -e makefile -o -e GNUmakefile \) ]; then
124 oe_runmake clean
125 fi
126 find ${S} -ignore_readdir_race -name \*.la -delete
127 fi
128 fi
129 fi
130}
131
132autotools_postconfigure(){
133 if [ -n "${CONFIGURESTAMPFILE}" ]; then
134 mkdir -p `dirname ${CONFIGURESTAMPFILE}`
135 echo ${BB_TASKHASH} > ${CONFIGURESTAMPFILE}
136 fi
137}
138
139EXTRACONFFUNCS ??= ""
140
141EXTRA_OECONF:append = " ${PACKAGECONFIG_CONFARGS}"
142
143do_configure[prefuncs] += "autotools_preconfigure autotools_aclocals ${EXTRACONFFUNCS}"
144do_compile[prefuncs] += "autotools_aclocals"
145do_install[prefuncs] += "autotools_aclocals"
146do_configure[postfuncs] += "autotools_postconfigure"
147
148ACLOCALDIR = "${STAGING_DATADIR}/aclocal"
149ACLOCALEXTRAPATH = ""
150ACLOCALEXTRAPATH:class-target = " -I ${STAGING_DATADIR_NATIVE}/aclocal/"
151ACLOCALEXTRAPATH:class-nativesdk = " -I ${STAGING_DATADIR_NATIVE}/aclocal/"
152
153python autotools_aclocals () {
154 sitefiles, searched = siteinfo_get_files(d, sysrootcache=True)
155 d.setVar("CONFIG_SITE", " ".join(sitefiles))
156}
157
158do_configure[file-checksums] += "${@' '.join(siteinfo_get_files(d, sysrootcache=False)[1])}"
159
160CONFIGURE_FILES = "${S}/configure.in ${S}/configure.ac ${S}/config.h.in ${S}/acinclude.m4 Makefile.am"
161
162autotools_do_configure() {
163 # WARNING: gross hack follows:
164 # An autotools built package generally needs these scripts, however only
165 # automake or libtoolize actually install the current versions of them.
166 # This is a problem in builds that do not use libtool or automake, in the case
167 # where we -need- the latest version of these scripts. e.g. running a build
168 # for a package whose autotools are old, on an x86_64 machine, which the old
169 # config.sub does not support. Work around this by installing them manually
170 # regardless.
171
172 PRUNE_M4=""
173
174 for ac in `find ${S} -ignore_readdir_race -name configure.in -o -name configure.ac`; do
175 rm -f `dirname $ac`/configure
176 done
177 if [ -e ${AUTOTOOLS_SCRIPT_PATH}/configure.in -o -e ${AUTOTOOLS_SCRIPT_PATH}/configure.ac ]; then
178 olddir=`pwd`
179 cd ${AUTOTOOLS_SCRIPT_PATH}
180 mkdir -p ${ACLOCALDIR}
181 ACLOCAL="aclocal --system-acdir=${ACLOCALDIR}/"
182 if [ x"${acpaths}" = xdefault ]; then
183 acpaths=
184 for i in `find ${AUTOTOOLS_SCRIPT_PATH} -ignore_readdir_race -maxdepth 2 -name \*.m4|grep -v 'aclocal.m4'| \
185 grep -v 'acinclude.m4' | sed -e 's,\(.*/\).*$,\1,'|sort -u`; do
186 acpaths="$acpaths -I $i"
187 done
188 else
189 acpaths="${acpaths}"
190 fi
191 acpaths="$acpaths ${ACLOCALEXTRAPATH}"
192 AUTOV=`automake --version | sed -e '1{s/.* //;s/\.[0-9]\+$//};q'`
193 automake --version
194 echo "AUTOV is $AUTOV"
195 if [ -d ${STAGING_DATADIR_NATIVE}/aclocal-$AUTOV ]; then
196 ACLOCAL="$ACLOCAL --automake-acdir=${STAGING_DATADIR_NATIVE}/aclocal-$AUTOV"
197 fi
198 # autoreconf is too shy to overwrite aclocal.m4 if it doesn't look
199 # like it was auto-generated. Work around this by blowing it away
200 # by hand, unless the package specifically asked not to run aclocal.
201 if ! echo ${EXTRA_AUTORECONF} | grep -q "aclocal"; then
202 rm -f aclocal.m4
203 fi
204 if [ -e configure.in ]; then
205 CONFIGURE_AC=configure.in
206 else
207 CONFIGURE_AC=configure.ac
208 fi
209 if grep -q "^[[:space:]]*AM_GLIB_GNU_GETTEXT" $CONFIGURE_AC; then
210 if grep -q "sed.*POTFILES" $CONFIGURE_AC; then
211 : do nothing -- we still have an old unmodified configure.ac
212 else
213 bbnote Executing glib-gettextize --force --copy
214 echo "no" | glib-gettextize --force --copy
215 fi
216 elif [ "${BPN}" != "gettext" ] && grep -q "^[[:space:]]*AM_GNU_GETTEXT" $CONFIGURE_AC; then
217 # We'd call gettextize here if it wasn't so broken...
218 cp ${STAGING_DATADIR_NATIVE}/gettext/config.rpath ${AUTOTOOLS_AUXDIR}/
219 if [ -d ${S}/po/ ]; then
220 cp -f ${STAGING_DATADIR_NATIVE}/gettext/po/Makefile.in.in ${S}/po/
221 if [ ! -e ${S}/po/remove-potcdate.sin ]; then
222 cp ${STAGING_DATADIR_NATIVE}/gettext/po/remove-potcdate.sin ${S}/po/
223 fi
224 fi
225 PRUNE_M4="$PRUNE_M4 gettext.m4 iconv.m4 lib-ld.m4 lib-link.m4 lib-prefix.m4 nls.m4 po.m4 progtest.m4"
226 fi
227 mkdir -p m4
228
229 for i in $PRUNE_M4; do
230 find ${S} -ignore_readdir_race -name $i -delete
231 done
232
233 bbnote Executing ACLOCAL=\"$ACLOCAL\" autoreconf -Wcross --verbose --install --force ${EXTRA_AUTORECONF} $acpaths
234 ACLOCAL="$ACLOCAL" autoreconf -Wcross -Wno-obsolete --verbose --install --force ${EXTRA_AUTORECONF} $acpaths || die "autoreconf execution failed."
235 cd $olddir
236 fi
237 if [ -e ${CONFIGURE_SCRIPT} ]; then
238 oe_runconf
239 else
240 bbnote "nothing to configure"
241 fi
242}
243
244autotools_do_compile() {
245 oe_runmake
246}
247
248autotools_do_install() {
249 oe_runmake 'DESTDIR=${D}' install
250 # Info dir listing isn't interesting at this point so remove it if it exists.
251 if [ -e "${D}${infodir}/dir" ]; then
252 rm -f ${D}${infodir}/dir
253 fi
254}
255
256inherit siteconfig
257
258EXPORT_FUNCTIONS do_configure do_compile do_install
259
260B = "${WORKDIR}/build"
diff --git a/meta/classes-recipe/baremetal-image.bbclass b/meta/classes-recipe/baremetal-image.bbclass
new file mode 100644
index 0000000000..3a979f2ed1
--- /dev/null
+++ b/meta/classes-recipe/baremetal-image.bbclass
@@ -0,0 +1,128 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# Baremetal image class
8#
9# This class is meant to be inherited by recipes for baremetal/RTOS applications
10# It contains code that would be used by all of them, every recipe just needs to
11# override certain variables.
12#
13# For scalability purposes, code within this class focuses on the "image" wiring
14# to satisfy the OpenEmbedded image creation and testing infrastructure.
15#
16# See meta-skeleton for a working example.
17
18
19# Toolchain should be baremetal or newlib based.
20# TCLIBC="baremetal" or TCLIBC="newlib"
21COMPATIBLE_HOST:libc-musl:class-target = "null"
22COMPATIBLE_HOST:libc-glibc:class-target = "null"
23
24
25inherit rootfs-postcommands
26
27# Set some defaults, but these should be overriden by each recipe if required
28IMGDEPLOYDIR ?= "${WORKDIR}/deploy-${PN}-image-complete"
29BAREMETAL_BINNAME ?= "hello_baremetal_${MACHINE}"
30IMAGE_LINK_NAME ?= "baremetal-helloworld-image-${MACHINE}"
31IMAGE_NAME_SUFFIX ?= ""
32
33do_rootfs[dirs] = "${IMGDEPLOYDIR} ${DEPLOY_DIR_IMAGE}"
34
35do_image(){
36 install ${D}/${base_libdir}/firmware/${BAREMETAL_BINNAME}.bin ${IMGDEPLOYDIR}/${IMAGE_LINK_NAME}.bin
37 install ${D}/${base_libdir}/firmware/${BAREMETAL_BINNAME}.elf ${IMGDEPLOYDIR}/${IMAGE_LINK_NAME}.elf
38}
39
40do_image_complete(){
41 :
42}
43
44python do_rootfs(){
45 from oe.utils import execute_pre_post_process
46 from pathlib import Path
47
48 # Write empty manifest file to satisfy test infrastructure
49 deploy_dir = d.getVar('IMGDEPLOYDIR')
50 link_name = d.getVar('IMAGE_LINK_NAME')
51 manifest_name = d.getVar('IMAGE_MANIFEST')
52
53 Path(manifest_name).touch()
54 if os.path.exists(manifest_name) and link_name:
55 manifest_link = deploy_dir + "/" + link_name + ".manifest"
56 if manifest_link != manifest_name:
57 if os.path.lexists(manifest_link):
58 os.remove(manifest_link)
59 os.symlink(os.path.basename(manifest_name), manifest_link)
60 # A lot of postprocess commands assume the existence of rootfs/etc
61 sysconfdir = d.getVar("IMAGE_ROOTFS") + d.getVar('sysconfdir')
62 bb.utils.mkdirhier(sysconfdir)
63
64 execute_pre_post_process(d, d.getVar('ROOTFS_POSTPROCESS_COMMAND'))
65}
66
67
68# Assure binaries, manifest and qemubootconf are populated on DEPLOY_DIR_IMAGE
69do_image_complete[dirs] = "${TOPDIR}"
70SSTATETASKS += "do_image_complete"
71SSTATE_SKIP_CREATION:task-image-complete = '1'
72do_image_complete[sstate-inputdirs] = "${IMGDEPLOYDIR}"
73do_image_complete[sstate-outputdirs] = "${DEPLOY_DIR_IMAGE}"
74do_image_complete[stamp-extra-info] = "${MACHINE_ARCH}"
75addtask do_image_complete after do_image before do_build
76
77python do_image_complete_setscene () {
78 sstate_setscene(d)
79}
80addtask do_image_complete_setscene
81
82# QEMU generic Baremetal/RTOS parameters
83QB_DEFAULT_KERNEL ?= "${IMAGE_LINK_NAME}.bin"
84QB_MEM ?= "-m 256"
85QB_DEFAULT_FSTYPE ?= "bin"
86QB_DTB ?= ""
87QB_OPT_APPEND:append = " -nographic"
88
89# RISC-V tunes set the BIOS, unset, and instruct QEMU to
90# ignore the BIOS and boot from -kernel
91QB_DEFAULT_BIOS:qemuriscv64 = ""
92QB_DEFAULT_BIOS:qemuriscv32 = ""
93QB_OPT_APPEND:append:qemuriscv64 = " -bios none"
94QB_OPT_APPEND:append:qemuriscv32 = " -bios none"
95
96
97# Use the medium-any code model for the RISC-V 64 bit implementation,
98# since medlow can only access addresses below 0x80000000 and RAM
99# starts at 0x80000000 on RISC-V 64
100# Keep RISC-V 32 using -mcmodel=medlow (symbols lie between -2GB:2GB)
101CFLAGS:append:qemuriscv64 = " -mcmodel=medany"
102
103
104# This next part is necessary to trick the build system into thinking
105# its building an image recipe so it generates the qemuboot.conf
106addtask do_rootfs before do_image after do_install
107addtask do_image after do_rootfs before do_image_complete
108addtask do_image_complete after do_image before do_build
109inherit qemuboot
110
111# Based on image.bbclass to make sure we build qemu
112python(){
113 # do_addto_recipe_sysroot doesnt exist for all recipes, but we need it to have
114 # /usr/bin on recipe-sysroot (qemu) populated
115 # The do_addto_recipe_sysroot dependency is coming from EXTRA_IMAGDEPENDS now,
116 # we just need to add the logic to add its dependency to do_image.
117 def extraimage_getdepends(task):
118 deps = ""
119 for dep in (d.getVar('EXTRA_IMAGEDEPENDS') or "").split():
120 # Make sure we only add it for qemu
121 if 'qemu' in dep:
122 if ":" in dep:
123 deps += " %s " % (dep)
124 else:
125 deps += " %s:%s" % (dep, task)
126 return deps
127 d.appendVarFlag('do_image', 'depends', extraimage_getdepends('do_populate_sysroot'))
128}
diff --git a/meta/classes-recipe/bash-completion.bbclass b/meta/classes-recipe/bash-completion.bbclass
new file mode 100644
index 0000000000..b656e76c09
--- /dev/null
+++ b/meta/classes-recipe/bash-completion.bbclass
@@ -0,0 +1,13 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7DEPENDS:append:class-target = " bash-completion"
8
9PACKAGES += "${PN}-bash-completion"
10
11FILES:${PN}-bash-completion = "${datadir}/bash-completion ${sysconfdir}/bash_completion.d"
12
13RDEPENDS:${PN}-bash-completion = "bash-completion"
diff --git a/meta/classes-recipe/bin_package.bbclass b/meta/classes-recipe/bin_package.bbclass
new file mode 100644
index 0000000000..3a1befc29c
--- /dev/null
+++ b/meta/classes-recipe/bin_package.bbclass
@@ -0,0 +1,42 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# Common variable and task for the binary package recipe.
8# Basic principle:
9# * The files have been unpacked to ${S} by base.bbclass
10# * Skip do_configure and do_compile
11# * Use do_install to install the files to ${D}
12#
13# Note:
14# The "subdir" parameter in the SRC_URI is useful when the input package
15# is rpm, ipk, deb and so on, for example:
16#
17# SRC_URI = "http://foo.com/foo-1.0-r1.i586.rpm;subdir=foo-1.0"
18#
19# Then the files would be unpacked to ${WORKDIR}/foo-1.0, otherwise
20# they would be in ${WORKDIR}.
21#
22
23# Skip the unwanted steps
24do_configure[noexec] = "1"
25do_compile[noexec] = "1"
26
27# Install the files to ${D}
28bin_package_do_install () {
29 # Do it carefully
30 [ -d "${S}" ] || exit 1
31 if [ -z "$(ls -A ${S})" ]; then
32 bbfatal bin_package has nothing to install. Be sure the SRC_URI unpacks into S.
33 fi
34 cd ${S}
35 install -d ${D}${base_prefix}
36 tar --no-same-owner --exclude='./patches' --exclude='./.pc' -cpf - . \
37 | tar --no-same-owner -xpf - -C ${D}${base_prefix}
38}
39
40FILES:${PN} = "/"
41
42EXPORT_FUNCTIONS do_install
diff --git a/meta/classes-recipe/binconfig-disabled.bbclass b/meta/classes-recipe/binconfig-disabled.bbclass
new file mode 100644
index 0000000000..cbe2078e0f
--- /dev/null
+++ b/meta/classes-recipe/binconfig-disabled.bbclass
@@ -0,0 +1,36 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7#
8# Class to disable binconfig files instead of installing them
9#
10
11# The list of scripts which should be disabled.
12BINCONFIG ?= ""
13
14FILES:${PN}-dev += "${bindir}/*-config"
15
16do_install:append () {
17 for x in ${BINCONFIG}; do
18 # Make the disabled script emit invalid parameters for those configure
19 # scripts which call it without checking the return code.
20 echo "#!/bin/sh" > ${D}$x
21 echo "echo 'ERROR: $x should not be used, use an alternative such as pkg-config' >&2" >> ${D}$x
22 echo "echo '--should-not-have-used-$x'" >> ${D}$x
23 echo "exit 1" >> ${D}$x
24 chmod +x ${D}$x
25 done
26}
27
28SYSROOT_PREPROCESS_FUNCS += "binconfig_disabled_sysroot_preprocess"
29
30binconfig_disabled_sysroot_preprocess () {
31 for x in ${BINCONFIG}; do
32 configname=`basename $x`
33 install -d ${SYSROOT_DESTDIR}${bindir_crossscripts}
34 install ${D}$x ${SYSROOT_DESTDIR}${bindir_crossscripts}
35 done
36}
diff --git a/meta/classes-recipe/binconfig.bbclass b/meta/classes-recipe/binconfig.bbclass
new file mode 100644
index 0000000000..427dba7f1f
--- /dev/null
+++ b/meta/classes-recipe/binconfig.bbclass
@@ -0,0 +1,60 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7FILES:${PN}-dev += "${bindir}/*-config"
8
9# The namespaces can clash here hence the two step replace
10def get_binconfig_mangle(d):
11 s = "-e ''"
12 if not bb.data.inherits_class('native', d):
13 optional_quote = r"\(\"\?\)"
14 s += " -e 's:=%s${base_libdir}:=\\1OEBASELIBDIR:;'" % optional_quote
15 s += " -e 's:=%s${libdir}:=\\1OELIBDIR:;'" % optional_quote
16 s += " -e 's:=%s${includedir}:=\\1OEINCDIR:;'" % optional_quote
17 s += " -e 's:=%s${datadir}:=\\1OEDATADIR:'" % optional_quote
18 s += " -e 's:=%s${prefix}/:=\\1OEPREFIX/:'" % optional_quote
19 s += " -e 's:=%s${exec_prefix}/:=\\1OEEXECPREFIX/:'" % optional_quote
20 s += " -e 's:-L${libdir}:-LOELIBDIR:;'"
21 s += " -e 's:-I${includedir}:-IOEINCDIR:;'"
22 s += " -e 's:-L${WORKDIR}:-LOELIBDIR:'"
23 s += " -e 's:-I${WORKDIR}:-IOEINCDIR:'"
24 s += " -e 's:OEBASELIBDIR:${STAGING_BASELIBDIR}:;'"
25 s += " -e 's:OELIBDIR:${STAGING_LIBDIR}:;'"
26 s += " -e 's:OEINCDIR:${STAGING_INCDIR}:;'"
27 s += " -e 's:OEDATADIR:${STAGING_DATADIR}:'"
28 s += " -e 's:OEPREFIX:${STAGING_DIR_HOST}${prefix}:'"
29 s += " -e 's:OEEXECPREFIX:${STAGING_DIR_HOST}${exec_prefix}:'"
30 if d.getVar("OE_BINCONFIG_EXTRA_MANGLE", False):
31 s += d.getVar("OE_BINCONFIG_EXTRA_MANGLE")
32
33 return s
34
35BINCONFIG_GLOB ?= "*-config"
36
37PACKAGE_PREPROCESS_FUNCS += "binconfig_package_preprocess"
38
39binconfig_package_preprocess () {
40 for config in `find ${PKGD} -type f -name '${BINCONFIG_GLOB}'`; do
41 sed -i \
42 -e 's:${STAGING_BASELIBDIR}:${base_libdir}:g;' \
43 -e 's:${STAGING_LIBDIR}:${libdir}:g;' \
44 -e 's:${STAGING_INCDIR}:${includedir}:g;' \
45 -e 's:${STAGING_DATADIR}:${datadir}:' \
46 -e 's:${STAGING_DIR_HOST}${prefix}:${prefix}:' \
47 $config
48 done
49}
50
51SYSROOT_PREPROCESS_FUNCS += "binconfig_sysroot_preprocess"
52
53binconfig_sysroot_preprocess () {
54 for config in `find ${S} -type f -name '${BINCONFIG_GLOB}'` `find ${B} -type f -name '${BINCONFIG_GLOB}'`; do
55 configname=`basename $config`
56 install -d ${SYSROOT_DESTDIR}${bindir_crossscripts}
57 sed ${@get_binconfig_mangle(d)} $config > ${SYSROOT_DESTDIR}${bindir_crossscripts}/$configname
58 chmod u+x ${SYSROOT_DESTDIR}${bindir_crossscripts}/$configname
59 done
60}
diff --git a/meta/classes-recipe/cargo.bbclass b/meta/classes-recipe/cargo.bbclass
new file mode 100644
index 0000000000..d1e83518b5
--- /dev/null
+++ b/meta/classes-recipe/cargo.bbclass
@@ -0,0 +1,97 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7##
8## Purpose:
9## This class is used by any recipes that are built using
10## Cargo.
11
12inherit cargo_common
13inherit rust-target-config
14
15# the binary we will use
16CARGO = "cargo"
17
18# We need cargo to compile for the target
19BASEDEPENDS:append = " cargo-native"
20
21# Ensure we get the right rust variant
22DEPENDS:append:class-target = " rust-native ${RUSTLIB_DEP}"
23DEPENDS:append:class-nativesdk = " rust-native ${RUSTLIB_DEP}"
24DEPENDS:append:class-native = " rust-native"
25
26# Enable build separation
27B = "${WORKDIR}/build"
28
29# In case something fails in the build process, give a bit more feedback on
30# where the issue occured
31export RUST_BACKTRACE = "1"
32
33# The directory of the Cargo.toml relative to the root directory, per default
34# assume there's a Cargo.toml directly in the root directory
35CARGO_SRC_DIR ??= ""
36
37# The actual path to the Cargo.toml
38MANIFEST_PATH ??= "${S}/${CARGO_SRC_DIR}/Cargo.toml"
39
40RUSTFLAGS ??= ""
41BUILD_MODE = "${@['--release', ''][d.getVar('DEBUG_BUILD') == '1']}"
42CARGO_BUILD_FLAGS = "-v --target ${RUST_HOST_SYS} ${BUILD_MODE} --manifest-path=${MANIFEST_PATH}"
43
44# This is based on the content of CARGO_BUILD_FLAGS and generally will need to
45# change if CARGO_BUILD_FLAGS changes.
46BUILD_DIR = "${@['release', 'debug'][d.getVar('DEBUG_BUILD') == '1']}"
47CARGO_TARGET_SUBDIR="${RUST_HOST_SYS}/${BUILD_DIR}"
48oe_cargo_build () {
49 export RUSTFLAGS="${RUSTFLAGS}"
50 bbnote "Using rust targets from ${RUST_TARGET_PATH}"
51 bbnote "cargo = $(which ${CARGO})"
52 bbnote "rustc = $(which ${RUSTC})"
53 bbnote "${CARGO} build ${CARGO_BUILD_FLAGS} $@"
54 "${CARGO}" build ${CARGO_BUILD_FLAGS} "$@"
55}
56
57do_compile[progress] = "outof:\s+(\d+)/(\d+)"
58cargo_do_compile () {
59 oe_cargo_fix_env
60 oe_cargo_build
61}
62
63cargo_do_install () {
64 local have_installed=false
65 for tgt in "${B}/target/${CARGO_TARGET_SUBDIR}/"*; do
66 case $tgt in
67 *.so|*.rlib)
68 install -d "${D}${rustlibdir}"
69 install -m755 "$tgt" "${D}${rustlibdir}"
70 have_installed=true
71 ;;
72 *examples)
73 if [ -d "$tgt" ]; then
74 for example in "$tgt/"*; do
75 if [ -f "$example" ] && [ -x "$example" ]; then
76 install -d "${D}${bindir}"
77 install -m755 "$example" "${D}${bindir}"
78 have_installed=true
79 fi
80 done
81 fi
82 ;;
83 *)
84 if [ -f "$tgt" ] && [ -x "$tgt" ]; then
85 install -d "${D}${bindir}"
86 install -m755 "$tgt" "${D}${bindir}"
87 have_installed=true
88 fi
89 ;;
90 esac
91 done
92 if ! $have_installed; then
93 die "Did not find anything to install"
94 fi
95}
96
97EXPORT_FUNCTIONS do_compile do_install
diff --git a/meta/classes-recipe/cargo_common.bbclass b/meta/classes-recipe/cargo_common.bbclass
new file mode 100644
index 0000000000..eec7710a4c
--- /dev/null
+++ b/meta/classes-recipe/cargo_common.bbclass
@@ -0,0 +1,139 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7##
8## Purpose:
9## This class is to support building with cargo. It
10## must be different than cargo.bbclass because Rust
11## now builds with Cargo but cannot use cargo.bbclass
12## due to dependencies and assumptions in cargo.bbclass
13## that Rust & Cargo are already installed. So this
14## is used by cargo.bbclass and Rust
15##
16
17# add crate fetch support
18inherit rust-common
19
20# Where we download our registry and dependencies to
21export CARGO_HOME = "${WORKDIR}/cargo_home"
22
23# The pkg-config-rs library used by cargo build scripts disables itself when
24# cross compiling unless this is defined. We set up pkg-config appropriately
25# for cross compilation, so tell it we know better than it.
26export PKG_CONFIG_ALLOW_CROSS = "1"
27
28# Don't instruct cargo to use crates downloaded by bitbake. Some rust packages,
29# for example the rust compiler itself, come with their own vendored sources.
30# Specifying two [source.crates-io] will not work.
31CARGO_DISABLE_BITBAKE_VENDORING ?= "0"
32
33# Used by libstd-rs to point to the vendor dir included in rustc src
34CARGO_VENDORING_DIRECTORY ?= "${CARGO_HOME}/bitbake"
35
36CARGO_RUST_TARGET_CCLD ?= "${RUST_TARGET_CCLD}"
37cargo_common_do_configure () {
38 mkdir -p ${CARGO_HOME}/bitbake
39
40 cat <<- EOF > ${CARGO_HOME}/config
41 # EXTRA_OECARGO_PATHS
42 paths = [
43 $(for p in ${EXTRA_OECARGO_PATHS}; do echo \"$p\",; done)
44 ]
45 EOF
46
47 cat <<- EOF >> ${CARGO_HOME}/config
48
49 # Local mirror vendored by bitbake
50 [source.bitbake]
51 directory = "${CARGO_VENDORING_DIRECTORY}"
52 EOF
53
54 if [ ${CARGO_DISABLE_BITBAKE_VENDORING} = "0" ]; then
55 cat <<- EOF >> ${CARGO_HOME}/config
56
57 [source.crates-io]
58 replace-with = "bitbake"
59 local-registry = "/nonexistant"
60 EOF
61 fi
62
63 cat <<- EOF >> ${CARGO_HOME}/config
64
65 [http]
66 # Multiplexing can't be enabled because http2 can't be enabled
67 # in curl-native without dependency loops
68 multiplexing = false
69
70 # Ignore the hard coded and incorrect path to certificates
71 cainfo = "${STAGING_ETCDIR_NATIVE}/ssl/certs/ca-certificates.crt"
72
73 EOF
74
75 cat <<- EOF >> ${CARGO_HOME}/config
76
77 # HOST_SYS
78 [target.${RUST_HOST_SYS}]
79 linker = "${CARGO_RUST_TARGET_CCLD}"
80 EOF
81
82 if [ "${RUST_HOST_SYS}" != "${RUST_BUILD_SYS}" ]; then
83 cat <<- EOF >> ${CARGO_HOME}/config
84
85 # BUILD_SYS
86 [target.${RUST_BUILD_SYS}]
87 linker = "${RUST_BUILD_CCLD}"
88 EOF
89 fi
90
91 if [ "${RUST_TARGET_SYS}" != "${RUST_BUILD_SYS}" -a "${RUST_TARGET_SYS}" != "${RUST_HOST_SYS}"]; then
92 cat <<- EOF >> ${CARGO_HOME}/config
93
94 # TARGET_SYS
95 [target.${RUST_TARGET_SYS}]
96 linker = "${RUST_TARGET_CCLD}"
97 EOF
98 fi
99
100 # Put build output in build directory preferred by bitbake instead of
101 # inside source directory unless they are the same
102 if [ "${B}" != "${S}" ]; then
103 cat <<- EOF >> ${CARGO_HOME}/config
104
105 [build]
106 # Use out of tree build destination to avoid poluting the source tree
107 target-dir = "${B}/target"
108 EOF
109 fi
110
111 cat <<- EOF >> ${CARGO_HOME}/config
112
113 [term]
114 progress.when = 'always'
115 progress.width = 80
116 EOF
117}
118
119oe_cargo_fix_env () {
120 export CC="${RUST_TARGET_CC}"
121 export CXX="${RUST_TARGET_CXX}"
122 export CFLAGS="${CFLAGS}"
123 export CXXFLAGS="${CXXFLAGS}"
124 export AR="${AR}"
125 export TARGET_CC="${RUST_TARGET_CC}"
126 export TARGET_CXX="${RUST_TARGET_CXX}"
127 export TARGET_CFLAGS="${CFLAGS}"
128 export TARGET_CXXFLAGS="${CXXFLAGS}"
129 export TARGET_AR="${AR}"
130 export HOST_CC="${RUST_BUILD_CC}"
131 export HOST_CXX="${RUST_BUILD_CXX}"
132 export HOST_CFLAGS="${BUILD_CFLAGS}"
133 export HOST_CXXFLAGS="${BUILD_CXXFLAGS}"
134 export HOST_AR="${BUILD_AR}"
135}
136
137EXTRA_OECARGO_PATHS ??= ""
138
139EXPORT_FUNCTIONS do_configure
diff --git a/meta/classes-recipe/cmake.bbclass b/meta/classes-recipe/cmake.bbclass
new file mode 100644
index 0000000000..554b948c32
--- /dev/null
+++ b/meta/classes-recipe/cmake.bbclass
@@ -0,0 +1,223 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# Path to the CMake file to process.
8OECMAKE_SOURCEPATH ??= "${S}"
9
10DEPENDS:prepend = "cmake-native "
11B = "${WORKDIR}/build"
12
13# What CMake generator to use.
14# The supported options are "Unix Makefiles" or "Ninja".
15OECMAKE_GENERATOR ?= "Ninja"
16
17python() {
18 generator = d.getVar("OECMAKE_GENERATOR")
19 if "Unix Makefiles" in generator:
20 args = "-G '" + generator + "' -DCMAKE_MAKE_PROGRAM=" + d.getVar("MAKE")
21 d.setVar("OECMAKE_GENERATOR_ARGS", args)
22 d.setVarFlag("do_compile", "progress", "percent")
23 elif "Ninja" in generator:
24 args = "-G '" + generator + "' -DCMAKE_MAKE_PROGRAM=ninja"
25 d.appendVar("DEPENDS", " ninja-native")
26 d.setVar("OECMAKE_GENERATOR_ARGS", args)
27 d.setVarFlag("do_compile", "progress", r"outof:^\[(\d+)/(\d+)\]\s+")
28 else:
29 bb.fatal("Unknown CMake Generator %s" % generator)
30}
31OECMAKE_AR ?= "${AR}"
32
33# Compiler flags
34OECMAKE_C_FLAGS ?= "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS} ${CFLAGS}"
35OECMAKE_CXX_FLAGS ?= "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS} ${CXXFLAGS}"
36OECMAKE_C_FLAGS_RELEASE ?= "-DNDEBUG"
37OECMAKE_CXX_FLAGS_RELEASE ?= "-DNDEBUG"
38OECMAKE_C_LINK_FLAGS ?= "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS} ${CPPFLAGS} ${LDFLAGS}"
39OECMAKE_CXX_LINK_FLAGS ?= "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS} ${CXXFLAGS} ${LDFLAGS}"
40
41def oecmake_map_compiler(compiler, d):
42 args = d.getVar(compiler).split()
43 if args[0] == "ccache":
44 return args[1], args[0]
45 return args[0], ""
46
47# C/C++ Compiler (without cpu arch/tune arguments)
48OECMAKE_C_COMPILER ?= "${@oecmake_map_compiler('CC', d)[0]}"
49OECMAKE_C_COMPILER_LAUNCHER ?= "${@oecmake_map_compiler('CC', d)[1]}"
50OECMAKE_CXX_COMPILER ?= "${@oecmake_map_compiler('CXX', d)[0]}"
51OECMAKE_CXX_COMPILER_LAUNCHER ?= "${@oecmake_map_compiler('CXX', d)[1]}"
52
53# clear compiler vars for allarch to avoid sig hash difference
54OECMAKE_C_COMPILER_allarch = ""
55OECMAKE_C_COMPILER_LAUNCHER_allarch = ""
56OECMAKE_CXX_COMPILER_allarch = ""
57OECMAKE_CXX_COMPILER_LAUNCHER_allarch = ""
58
59OECMAKE_RPATH ?= ""
60OECMAKE_PERLNATIVE_DIR ??= ""
61OECMAKE_EXTRA_ROOT_PATH ?= ""
62
63OECMAKE_FIND_ROOT_PATH_MODE_PROGRAM = "ONLY"
64OECMAKE_FIND_ROOT_PATH_MODE_PROGRAM:class-native = "BOTH"
65
66EXTRA_OECMAKE:append = " ${PACKAGECONFIG_CONFARGS}"
67
68export CMAKE_BUILD_PARALLEL_LEVEL
69CMAKE_BUILD_PARALLEL_LEVEL:task-compile = "${@oe.utils.parallel_make(d, False)}"
70CMAKE_BUILD_PARALLEL_LEVEL:task-install = "${@oe.utils.parallel_make(d, True)}"
71
72OECMAKE_TARGET_COMPILE ?= "all"
73OECMAKE_TARGET_INSTALL ?= "install"
74
75def map_host_os_to_system_name(host_os):
76 if host_os.startswith('mingw'):
77 return 'Windows'
78 if host_os.startswith('linux'):
79 return 'Linux'
80 return host_os
81
82# CMake expects target architectures in the format of uname(2),
83# which do not always match TARGET_ARCH, so all the necessary
84# conversions should happen here.
85def map_host_arch_to_uname_arch(host_arch):
86 if host_arch == "powerpc":
87 return "ppc"
88 if host_arch == "powerpc64le":
89 return "ppc64le"
90 if host_arch == "powerpc64":
91 return "ppc64"
92 return host_arch
93
94cmake_do_generate_toolchain_file() {
95 if [ "${BUILD_SYS}" = "${HOST_SYS}" ]; then
96 cmake_crosscompiling="set( CMAKE_CROSSCOMPILING FALSE )"
97 fi
98 cat > ${WORKDIR}/toolchain.cmake <<EOF
99# CMake system name must be something like "Linux".
100# This is important for cross-compiling.
101$cmake_crosscompiling
102set( CMAKE_SYSTEM_NAME ${@map_host_os_to_system_name(d.getVar('HOST_OS'))} )
103set( CMAKE_SYSTEM_PROCESSOR ${@map_host_arch_to_uname_arch(d.getVar('HOST_ARCH'))} )
104set( CMAKE_C_COMPILER ${OECMAKE_C_COMPILER} )
105set( CMAKE_CXX_COMPILER ${OECMAKE_CXX_COMPILER} )
106set( CMAKE_C_COMPILER_LAUNCHER ${OECMAKE_C_COMPILER_LAUNCHER} )
107set( CMAKE_CXX_COMPILER_LAUNCHER ${OECMAKE_CXX_COMPILER_LAUNCHER} )
108set( CMAKE_ASM_COMPILER ${OECMAKE_C_COMPILER} )
109find_program( CMAKE_AR ${OECMAKE_AR} DOC "Archiver" REQUIRED )
110
111set( CMAKE_C_FLAGS "${OECMAKE_C_FLAGS}" CACHE STRING "CFLAGS" )
112set( CMAKE_CXX_FLAGS "${OECMAKE_CXX_FLAGS}" CACHE STRING "CXXFLAGS" )
113set( CMAKE_ASM_FLAGS "${OECMAKE_C_FLAGS}" CACHE STRING "ASM FLAGS" )
114set( CMAKE_C_FLAGS_RELEASE "${OECMAKE_C_FLAGS_RELEASE}" CACHE STRING "Additional CFLAGS for release" )
115set( CMAKE_CXX_FLAGS_RELEASE "${OECMAKE_CXX_FLAGS_RELEASE}" CACHE STRING "Additional CXXFLAGS for release" )
116set( CMAKE_ASM_FLAGS_RELEASE "${OECMAKE_C_FLAGS_RELEASE}" CACHE STRING "Additional ASM FLAGS for release" )
117set( CMAKE_C_LINK_FLAGS "${OECMAKE_C_LINK_FLAGS}" CACHE STRING "LDFLAGS" )
118set( CMAKE_CXX_LINK_FLAGS "${OECMAKE_CXX_LINK_FLAGS}" CACHE STRING "LDFLAGS" )
119
120# only search in the paths provided so cmake doesnt pick
121# up libraries and tools from the native build machine
122set( CMAKE_FIND_ROOT_PATH ${STAGING_DIR_HOST} ${STAGING_DIR_NATIVE} ${CROSS_DIR} ${OECMAKE_PERLNATIVE_DIR} ${OECMAKE_EXTRA_ROOT_PATH} ${EXTERNAL_TOOLCHAIN} ${HOSTTOOLS_DIR})
123set( CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY )
124set( CMAKE_FIND_ROOT_PATH_MODE_PROGRAM ${OECMAKE_FIND_ROOT_PATH_MODE_PROGRAM} )
125set( CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY )
126set( CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY )
127set( CMAKE_PROGRAM_PATH "/" )
128
129# Use qt.conf settings
130set( ENV{QT_CONF_PATH} ${WORKDIR}/qt.conf )
131
132# We need to set the rpath to the correct directory as cmake does not provide any
133# directory as rpath by default
134set( CMAKE_INSTALL_RPATH ${OECMAKE_RPATH} )
135
136# Use RPATHs relative to build directory for reproducibility
137set( CMAKE_BUILD_RPATH_USE_ORIGIN ON )
138
139# Use our cmake modules
140list(APPEND CMAKE_MODULE_PATH "${STAGING_DATADIR}/cmake/Modules/")
141
142# add for non /usr/lib libdir, e.g. /usr/lib64
143set( CMAKE_LIBRARY_PATH ${libdir} ${base_libdir})
144
145# add include dir to implicit includes in case it differs from /usr/include
146list(APPEND CMAKE_C_IMPLICIT_INCLUDE_DIRECTORIES ${includedir})
147list(APPEND CMAKE_CXX_IMPLICIT_INCLUDE_DIRECTORIES ${includedir})
148
149EOF
150}
151
152addtask generate_toolchain_file after do_patch before do_configure
153
154CONFIGURE_FILES = "CMakeLists.txt"
155
156do_configure[cleandirs] = "${@d.getVar('B') if d.getVar('S') != d.getVar('B') else ''}"
157
158cmake_do_configure() {
159 if [ "${OECMAKE_BUILDPATH}" ]; then
160 bbnote "cmake.bbclass no longer uses OECMAKE_BUILDPATH. The default behaviour is now out-of-tree builds with B=WORKDIR/build."
161 fi
162
163 if [ "${S}" = "${B}" ]; then
164 find ${B} -name CMakeFiles -or -name Makefile -or -name cmake_install.cmake -or -name CMakeCache.txt -delete
165 fi
166
167 # Just like autotools cmake can use a site file to cache result that need generated binaries to run
168 if [ -e ${WORKDIR}/site-file.cmake ] ; then
169 oecmake_sitefile="-C ${WORKDIR}/site-file.cmake"
170 else
171 oecmake_sitefile=
172 fi
173
174 cmake \
175 ${OECMAKE_GENERATOR_ARGS} \
176 $oecmake_sitefile \
177 ${OECMAKE_SOURCEPATH} \
178 -DCMAKE_INSTALL_PREFIX:PATH=${prefix} \
179 -DCMAKE_INSTALL_BINDIR:PATH=${@os.path.relpath(d.getVar('bindir'), d.getVar('prefix') + '/')} \
180 -DCMAKE_INSTALL_SBINDIR:PATH=${@os.path.relpath(d.getVar('sbindir'), d.getVar('prefix') + '/')} \
181 -DCMAKE_INSTALL_LIBEXECDIR:PATH=${@os.path.relpath(d.getVar('libexecdir'), d.getVar('prefix') + '/')} \
182 -DCMAKE_INSTALL_SYSCONFDIR:PATH=${sysconfdir} \
183 -DCMAKE_INSTALL_SHAREDSTATEDIR:PATH=${@os.path.relpath(d.getVar('sharedstatedir'), d. getVar('prefix') + '/')} \
184 -DCMAKE_INSTALL_LOCALSTATEDIR:PATH=${localstatedir} \
185 -DCMAKE_INSTALL_LIBDIR:PATH=${@os.path.relpath(d.getVar('libdir'), d.getVar('prefix') + '/')} \
186 -DCMAKE_INSTALL_INCLUDEDIR:PATH=${@os.path.relpath(d.getVar('includedir'), d.getVar('prefix') + '/')} \
187 -DCMAKE_INSTALL_DATAROOTDIR:PATH=${@os.path.relpath(d.getVar('datadir'), d.getVar('prefix') + '/')} \
188 -DPYTHON_EXECUTABLE:PATH=${PYTHON} \
189 -DPython_EXECUTABLE:PATH=${PYTHON} \
190 -DPython3_EXECUTABLE:PATH=${PYTHON} \
191 -DLIB_SUFFIX=${@d.getVar('baselib').replace('lib', '')} \
192 -DCMAKE_INSTALL_SO_NO_EXE=0 \
193 -DCMAKE_TOOLCHAIN_FILE=${WORKDIR}/toolchain.cmake \
194 -DCMAKE_NO_SYSTEM_FROM_IMPORTED=1 \
195 -DCMAKE_EXPORT_NO_PACKAGE_REGISTRY=ON \
196 -DFETCHCONTENT_FULLY_DISCONNECTED=ON \
197 ${EXTRA_OECMAKE} \
198 -Wno-dev
199}
200
201# To disable verbose cmake logs for a given recipe or globally config metadata e.g. local.conf
202# add following
203#
204# CMAKE_VERBOSE = ""
205#
206
207CMAKE_VERBOSE ??= "VERBOSE=1"
208
209# Then run do_compile again
210cmake_runcmake_build() {
211 bbnote ${DESTDIR:+DESTDIR=${DESTDIR} }${CMAKE_VERBOSE} cmake --build '${B}' "$@" -- ${EXTRA_OECMAKE_BUILD}
212 eval ${DESTDIR:+DESTDIR=${DESTDIR} }${CMAKE_VERBOSE} cmake --build '${B}' "$@" -- ${EXTRA_OECMAKE_BUILD}
213}
214
215cmake_do_compile() {
216 cmake_runcmake_build --target ${OECMAKE_TARGET_COMPILE}
217}
218
219cmake_do_install() {
220 DESTDIR='${D}' cmake_runcmake_build --target ${OECMAKE_TARGET_INSTALL}
221}
222
223EXPORT_FUNCTIONS do_configure do_compile do_install do_generate_toolchain_file
diff --git a/meta/classes-recipe/cml1.bbclass b/meta/classes-recipe/cml1.bbclass
new file mode 100644
index 0000000000..b79091383d
--- /dev/null
+++ b/meta/classes-recipe/cml1.bbclass
@@ -0,0 +1,107 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# returns all the elements from the src uri that are .cfg files
8def find_cfgs(d):
9 sources=src_patches(d, True)
10 sources_list=[]
11 for s in sources:
12 if s.endswith('.cfg'):
13 sources_list.append(s)
14
15 return sources_list
16
17cml1_do_configure() {
18 set -e
19 unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS
20 yes '' | oe_runmake oldconfig
21}
22
23EXPORT_FUNCTIONS do_configure
24addtask configure after do_unpack do_patch before do_compile
25
26inherit terminal
27
28OE_TERMINAL_EXPORTS += "HOST_EXTRACFLAGS HOSTLDFLAGS TERMINFO CROSS_CURSES_LIB CROSS_CURSES_INC"
29HOST_EXTRACFLAGS = "${BUILD_CFLAGS} ${BUILD_LDFLAGS}"
30HOSTLDFLAGS = "${BUILD_LDFLAGS}"
31CROSS_CURSES_LIB = "-lncurses -ltinfo"
32CROSS_CURSES_INC = '-DCURSES_LOC="<curses.h>"'
33TERMINFO = "${STAGING_DATADIR_NATIVE}/terminfo"
34
35KCONFIG_CONFIG_COMMAND ??= "menuconfig"
36KCONFIG_CONFIG_ROOTDIR ??= "${B}"
37python do_menuconfig() {
38 import shutil
39
40 config = os.path.join(d.getVar('KCONFIG_CONFIG_ROOTDIR'), ".config")
41 configorig = os.path.join(d.getVar('KCONFIG_CONFIG_ROOTDIR'), ".config.orig")
42
43 try:
44 mtime = os.path.getmtime(config)
45 shutil.copy(config, configorig)
46 except OSError:
47 mtime = 0
48
49 # setup native pkg-config variables (kconfig scripts call pkg-config directly, cannot generically be overriden to pkg-config-native)
50 d.setVar("PKG_CONFIG_DIR", "${STAGING_DIR_NATIVE}${libdir_native}/pkgconfig")
51 d.setVar("PKG_CONFIG_PATH", "${PKG_CONFIG_DIR}:${STAGING_DATADIR_NATIVE}/pkgconfig")
52 d.setVar("PKG_CONFIG_LIBDIR", "${PKG_CONFIG_DIR}")
53 d.setVarFlag("PKG_CONFIG_SYSROOT_DIR", "unexport", "1")
54 # ensure that environment variables are overwritten with this tasks 'd' values
55 d.appendVar("OE_TERMINAL_EXPORTS", " PKG_CONFIG_DIR PKG_CONFIG_PATH PKG_CONFIG_LIBDIR PKG_CONFIG_SYSROOT_DIR")
56
57 oe_terminal("sh -c \"make %s; if [ \\$? -ne 0 ]; then echo 'Command failed.'; printf 'Press any key to continue... '; read r; fi\"" % d.getVar('KCONFIG_CONFIG_COMMAND'),
58 d.getVar('PN') + ' Configuration', d)
59
60 # FIXME this check can be removed when the minimum bitbake version has been bumped
61 if hasattr(bb.build, 'write_taint'):
62 try:
63 newmtime = os.path.getmtime(config)
64 except OSError:
65 newmtime = 0
66
67 if newmtime > mtime:
68 bb.note("Configuration changed, recompile will be forced")
69 bb.build.write_taint('do_compile', d)
70}
71do_menuconfig[depends] += "ncurses-native:do_populate_sysroot"
72do_menuconfig[nostamp] = "1"
73do_menuconfig[dirs] = "${KCONFIG_CONFIG_ROOTDIR}"
74addtask menuconfig after do_configure
75
76python do_diffconfig() {
77 import shutil
78 import subprocess
79
80 workdir = d.getVar('WORKDIR')
81 fragment = workdir + '/fragment.cfg'
82 configorig = os.path.join(d.getVar('KCONFIG_CONFIG_ROOTDIR'), ".config.orig")
83 config = os.path.join(d.getVar('KCONFIG_CONFIG_ROOTDIR'), ".config")
84
85 try:
86 md5newconfig = bb.utils.md5_file(configorig)
87 md5config = bb.utils.md5_file(config)
88 isdiff = md5newconfig != md5config
89 except IOError as e:
90 bb.fatal("No config files found. Did you do menuconfig ?\n%s" % e)
91
92 if isdiff:
93 statement = 'diff --unchanged-line-format= --old-line-format= --new-line-format="%L" ' + configorig + ' ' + config + '>' + fragment
94 subprocess.call(statement, shell=True)
95 # No need to check the exit code as we know it's going to be
96 # non-zero, but that's what we expect.
97 shutil.copy(configorig, config)
98
99 bb.plain("Config fragment has been dumped into:\n %s" % fragment)
100 else:
101 if os.path.exists(fragment):
102 os.unlink(fragment)
103}
104
105do_diffconfig[nostamp] = "1"
106do_diffconfig[dirs] = "${KCONFIG_CONFIG_ROOTDIR}"
107addtask diffconfig
diff --git a/meta/classes-recipe/compress_doc.bbclass b/meta/classes-recipe/compress_doc.bbclass
new file mode 100644
index 0000000000..d603caf858
--- /dev/null
+++ b/meta/classes-recipe/compress_doc.bbclass
@@ -0,0 +1,269 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# Compress man pages in ${mandir} and info pages in ${infodir}
8#
9# 1. The doc will be compressed to gz format by default.
10#
11# 2. It will automatically correct the compressed doc which is not
12# in ${DOC_COMPRESS} but in ${DOC_COMPRESS_LIST} to the format
13# of ${DOC_COMPRESS} policy
14#
15# 3. It is easy to add a new type compression by editing
16# local.conf, such as:
17# DOC_COMPRESS_LIST:append = ' abc'
18# DOC_COMPRESS = 'abc'
19# DOC_COMPRESS_CMD[abc] = 'abc compress cmd ***'
20# DOC_DECOMPRESS_CMD[abc] = 'abc decompress cmd ***'
21
22# All supported compression policy
23DOC_COMPRESS_LIST ?= "gz xz bz2"
24
25# Compression policy, must be one of ${DOC_COMPRESS_LIST}
26DOC_COMPRESS ?= "gz"
27
28# Compression shell command
29DOC_COMPRESS_CMD[gz] ?= 'gzip -v -9 -n'
30DOC_COMPRESS_CMD[bz2] ?= "bzip2 -v -9"
31DOC_COMPRESS_CMD[xz] ?= "xz -v"
32
33# Decompression shell command
34DOC_DECOMPRESS_CMD[gz] ?= 'gunzip -v'
35DOC_DECOMPRESS_CMD[bz2] ?= "bunzip2 -v"
36DOC_DECOMPRESS_CMD[xz] ?= "unxz -v"
37
38PACKAGE_PREPROCESS_FUNCS += "package_do_compress_doc compress_doc_updatealternatives"
39python package_do_compress_doc() {
40 compress_mode = d.getVar('DOC_COMPRESS')
41 compress_list = (d.getVar('DOC_COMPRESS_LIST') or '').split()
42 if compress_mode not in compress_list:
43 bb.fatal('Compression policy %s not supported (not listed in %s)\n' % (compress_mode, compress_list))
44
45 dvar = d.getVar('PKGD')
46 compress_cmds = {}
47 decompress_cmds = {}
48 for mode in compress_list:
49 compress_cmds[mode] = d.getVarFlag('DOC_COMPRESS_CMD', mode)
50 decompress_cmds[mode] = d.getVarFlag('DOC_DECOMPRESS_CMD', mode)
51
52 mandir = os.path.abspath(dvar + os.sep + d.getVar("mandir"))
53 if os.path.exists(mandir):
54 # Decompress doc files which format is not compress_mode
55 decompress_doc(mandir, compress_mode, decompress_cmds)
56 compress_doc(mandir, compress_mode, compress_cmds)
57
58 infodir = os.path.abspath(dvar + os.sep + d.getVar("infodir"))
59 if os.path.exists(infodir):
60 # Decompress doc files which format is not compress_mode
61 decompress_doc(infodir, compress_mode, decompress_cmds)
62 compress_doc(infodir, compress_mode, compress_cmds)
63}
64
65def _get_compress_format(file, compress_format_list):
66 for compress_format in compress_format_list:
67 compress_suffix = '.' + compress_format
68 if file.endswith(compress_suffix):
69 return compress_format
70
71 return ''
72
73# Collect hardlinks to dict, each element in dict lists hardlinks
74# which points to the same doc file.
75# {hardlink10: [hardlink11, hardlink12],,,}
76# The hardlink10, hardlink11 and hardlink12 are the same file.
77def _collect_hardlink(hardlink_dict, file):
78 for hardlink in hardlink_dict:
79 # Add to the existed hardlink
80 if os.path.samefile(hardlink, file):
81 hardlink_dict[hardlink].append(file)
82 return hardlink_dict
83
84 hardlink_dict[file] = []
85 return hardlink_dict
86
87def _process_hardlink(hardlink_dict, compress_mode, shell_cmds, decompress=False):
88 import subprocess
89 for target in hardlink_dict:
90 if decompress:
91 compress_format = _get_compress_format(target, shell_cmds.keys())
92 cmd = "%s -f %s" % (shell_cmds[compress_format], target)
93 bb.note('decompress hardlink %s' % target)
94 else:
95 cmd = "%s -f %s" % (shell_cmds[compress_mode], target)
96 bb.note('compress hardlink %s' % target)
97 (retval, output) = subprocess.getstatusoutput(cmd)
98 if retval:
99 bb.warn("de/compress file failed %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else ""))
100 return
101
102 for hardlink_dup in hardlink_dict[target]:
103 if decompress:
104 # Remove compress suffix
105 compress_suffix = '.' + compress_format
106 new_hardlink = hardlink_dup[:-len(compress_suffix)]
107 new_target = target[:-len(compress_suffix)]
108 else:
109 # Append compress suffix
110 compress_suffix = '.' + compress_mode
111 new_hardlink = hardlink_dup + compress_suffix
112 new_target = target + compress_suffix
113
114 bb.note('hardlink %s-->%s' % (new_hardlink, new_target))
115 if not os.path.exists(new_hardlink):
116 os.link(new_target, new_hardlink)
117 if os.path.exists(hardlink_dup):
118 os.unlink(hardlink_dup)
119
120def _process_symlink(file, compress_format, decompress=False):
121 compress_suffix = '.' + compress_format
122 if decompress:
123 # Remove compress suffix
124 new_linkname = file[:-len(compress_suffix)]
125 new_source = os.readlink(file)[:-len(compress_suffix)]
126 else:
127 # Append compress suffix
128 new_linkname = file + compress_suffix
129 new_source = os.readlink(file) + compress_suffix
130
131 bb.note('symlink %s-->%s' % (new_linkname, new_source))
132 if not os.path.exists(new_linkname):
133 os.symlink(new_source, new_linkname)
134
135 os.unlink(file)
136
137def _is_info(file):
138 flags = '.info .info-'.split()
139 for flag in flags:
140 if flag in os.path.basename(file):
141 return True
142
143 return False
144
145def _is_man(file):
146 import re
147
148 # It refers MANSECT-var in man(1.6g)'s man.config
149 # ".1:.1p:.8:.2:.3:.3p:.4:.5:.6:.7:.9:.0p:.tcl:.n:.l:.p:.o"
150 # Not start with '.', and contain the above colon-seperate element
151 p = re.compile(r'[^\.]+\.([1-9lnop]|0p|tcl)')
152 if p.search(file):
153 return True
154
155 return False
156
157def _is_compress_doc(file, compress_format_list):
158 compress_format = _get_compress_format(file, compress_format_list)
159 compress_suffix = '.' + compress_format
160 if file.endswith(compress_suffix):
161 # Remove the compress suffix
162 uncompress_file = file[:-len(compress_suffix)]
163 if _is_info(uncompress_file) or _is_man(uncompress_file):
164 return True, compress_format
165
166 return False, ''
167
168def compress_doc(topdir, compress_mode, compress_cmds):
169 import subprocess
170 hardlink_dict = {}
171 for root, dirs, files in os.walk(topdir):
172 for f in files:
173 file = os.path.join(root, f)
174 if os.path.isdir(file):
175 continue
176
177 if _is_info(file) or _is_man(file):
178 # Symlink
179 if os.path.islink(file):
180 _process_symlink(file, compress_mode)
181 # Hardlink
182 elif os.lstat(file).st_nlink > 1:
183 _collect_hardlink(hardlink_dict, file)
184 # Normal file
185 elif os.path.isfile(file):
186 cmd = "%s %s" % (compress_cmds[compress_mode], file)
187 (retval, output) = subprocess.getstatusoutput(cmd)
188 if retval:
189 bb.warn("compress failed %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else ""))
190 continue
191 bb.note('compress file %s' % file)
192
193 _process_hardlink(hardlink_dict, compress_mode, compress_cmds)
194
195# Decompress doc files which format is not compress_mode
196def decompress_doc(topdir, compress_mode, decompress_cmds):
197 import subprocess
198 hardlink_dict = {}
199 decompress = True
200 for root, dirs, files in os.walk(topdir):
201 for f in files:
202 file = os.path.join(root, f)
203 if os.path.isdir(file):
204 continue
205
206 res, compress_format = _is_compress_doc(file, decompress_cmds.keys())
207 # Decompress files which format is not compress_mode
208 if res and compress_mode!=compress_format:
209 # Symlink
210 if os.path.islink(file):
211 _process_symlink(file, compress_format, decompress)
212 # Hardlink
213 elif os.lstat(file).st_nlink > 1:
214 _collect_hardlink(hardlink_dict, file)
215 # Normal file
216 elif os.path.isfile(file):
217 cmd = "%s %s" % (decompress_cmds[compress_format], file)
218 (retval, output) = subprocess.getstatusoutput(cmd)
219 if retval:
220 bb.warn("decompress failed %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else ""))
221 continue
222 bb.note('decompress file %s' % file)
223
224 _process_hardlink(hardlink_dict, compress_mode, decompress_cmds, decompress)
225
226python compress_doc_updatealternatives () {
227 if not bb.data.inherits_class('update-alternatives', d):
228 return
229
230 mandir = d.getVar("mandir")
231 infodir = d.getVar("infodir")
232 compress_mode = d.getVar('DOC_COMPRESS')
233 for pkg in (d.getVar('PACKAGES') or "").split():
234 old_names = (d.getVar('ALTERNATIVE:%s' % pkg) or "").split()
235 new_names = []
236 for old_name in old_names:
237 old_link = d.getVarFlag('ALTERNATIVE_LINK_NAME', old_name)
238 old_target = d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, old_name) or \
239 d.getVarFlag('ALTERNATIVE_TARGET', old_name) or \
240 d.getVar('ALTERNATIVE_TARGET_%s' % pkg) or \
241 d.getVar('ALTERNATIVE_TARGET') or \
242 old_link
243 # Sometimes old_target is specified as relative to the link name.
244 old_target = os.path.join(os.path.dirname(old_link), old_target)
245
246 # The updatealternatives used for compress doc
247 if mandir in old_target or infodir in old_target:
248 new_name = old_name + '.' + compress_mode
249 new_link = old_link + '.' + compress_mode
250 new_target = old_target + '.' + compress_mode
251 d.delVarFlag('ALTERNATIVE_LINK_NAME', old_name)
252 d.setVarFlag('ALTERNATIVE_LINK_NAME', new_name, new_link)
253 if d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, old_name):
254 d.delVarFlag('ALTERNATIVE_TARGET_%s' % pkg, old_name)
255 d.setVarFlag('ALTERNATIVE_TARGET_%s' % pkg, new_name, new_target)
256 elif d.getVarFlag('ALTERNATIVE_TARGET', old_name):
257 d.delVarFlag('ALTERNATIVE_TARGET', old_name)
258 d.setVarFlag('ALTERNATIVE_TARGET', new_name, new_target)
259 elif d.getVar('ALTERNATIVE_TARGET_%s' % pkg):
260 d.setVar('ALTERNATIVE_TARGET_%s' % pkg, new_target)
261 elif d.getVar('ALTERNATIVE_TARGET'):
262 d.setVar('ALTERNATIVE_TARGET', new_target)
263
264 new_names.append(new_name)
265
266 if new_names:
267 d.setVar('ALTERNATIVE:%s' % pkg, ' '.join(new_names))
268}
269
diff --git a/meta/classes-recipe/core-image.bbclass b/meta/classes-recipe/core-image.bbclass
new file mode 100644
index 0000000000..7ef7d07390
--- /dev/null
+++ b/meta/classes-recipe/core-image.bbclass
@@ -0,0 +1,81 @@
1# Common code for generating core reference images
2#
3# Copyright (C) 2007-2011 Linux Foundation
4#
5# SPDX-License-Identifier: MIT
6
7# IMAGE_FEATURES control content of the core reference images
8#
9# By default we install packagegroup-core-boot and packagegroup-base-extended packages;
10# this gives us working (console only) rootfs.
11#
12# Available IMAGE_FEATURES:
13#
14# - weston - Weston Wayland compositor
15# - x11 - X server
16# - x11-base - X server with minimal environment
17# - x11-sato - OpenedHand Sato environment
18# - tools-debug - debugging tools
19# - eclipse-debug - Eclipse remote debugging support
20# - tools-profile - profiling tools
21# - tools-testapps - tools usable to make some device tests
22# - tools-sdk - SDK (C/C++ compiler, autotools, etc.)
23# - nfs-server - NFS server
24# - nfs-client - NFS client
25# - ssh-server-dropbear - SSH server (dropbear)
26# - ssh-server-openssh - SSH server (openssh)
27# - hwcodecs - Install hardware acceleration codecs
28# - package-management - installs package management tools and preserves the package manager database
29# - debug-tweaks - makes an image suitable for development, e.g. allowing passwordless root logins
30# - empty-root-password
31# - allow-empty-password
32# - allow-root-login
33# - post-install-logging
34# - dev-pkgs - development packages (headers, etc.) for all installed packages in the rootfs
35# - dbg-pkgs - debug symbol packages for all installed packages in the rootfs
36# - lic-pkgs - license packages for all installed pacakges in the rootfs, requires
37# LICENSE_CREATE_PACKAGE="1" to be set when building packages too
38# - doc-pkgs - documentation packages for all installed packages in the rootfs
39# - bash-completion-pkgs - bash-completion packages for recipes using bash-completion bbclass
40# - ptest-pkgs - ptest packages for all ptest-enabled recipes
41# - read-only-rootfs - tweaks an image to support read-only rootfs
42# - stateless-rootfs - systemctl-native not run, image populated by systemd at runtime
43# - splash - bootup splash screen
44#
45FEATURE_PACKAGES_weston = "packagegroup-core-weston"
46FEATURE_PACKAGES_x11 = "packagegroup-core-x11"
47FEATURE_PACKAGES_x11-base = "packagegroup-core-x11-base"
48FEATURE_PACKAGES_x11-sato = "packagegroup-core-x11-sato"
49FEATURE_PACKAGES_tools-debug = "packagegroup-core-tools-debug"
50FEATURE_PACKAGES_eclipse-debug = "packagegroup-core-eclipse-debug"
51FEATURE_PACKAGES_tools-profile = "packagegroup-core-tools-profile"
52FEATURE_PACKAGES_tools-testapps = "packagegroup-core-tools-testapps"
53FEATURE_PACKAGES_tools-sdk = "packagegroup-core-sdk packagegroup-core-standalone-sdk-target"
54FEATURE_PACKAGES_nfs-server = "packagegroup-core-nfs-server"
55FEATURE_PACKAGES_nfs-client = "packagegroup-core-nfs-client"
56FEATURE_PACKAGES_ssh-server-dropbear = "packagegroup-core-ssh-dropbear"
57FEATURE_PACKAGES_ssh-server-openssh = "packagegroup-core-ssh-openssh"
58FEATURE_PACKAGES_hwcodecs = "${MACHINE_HWCODECS}"
59
60
61# IMAGE_FEATURES_REPLACES_foo = 'bar1 bar2'
62# Including image feature foo would replace the image features bar1 and bar2
63IMAGE_FEATURES_REPLACES_ssh-server-openssh = "ssh-server-dropbear"
64
65# IMAGE_FEATURES_CONFLICTS_foo = 'bar1 bar2'
66# An error exception would be raised if both image features foo and bar1(or bar2) are included
67
68MACHINE_HWCODECS ??= ""
69
70CORE_IMAGE_BASE_INSTALL = '\
71 packagegroup-core-boot \
72 packagegroup-base-extended \
73 \
74 ${CORE_IMAGE_EXTRA_INSTALL} \
75 '
76
77CORE_IMAGE_EXTRA_INSTALL ?= ""
78
79IMAGE_INSTALL ?= "${CORE_IMAGE_BASE_INSTALL}"
80
81inherit image
diff --git a/meta/classes-recipe/cpan-base.bbclass b/meta/classes-recipe/cpan-base.bbclass
new file mode 100644
index 0000000000..1db0a4ded6
--- /dev/null
+++ b/meta/classes-recipe/cpan-base.bbclass
@@ -0,0 +1,33 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7#
8# cpan-base providers various perl related information needed for building
9# cpan modules
10#
11FILES:${PN} += "${libdir}/perl5 ${datadir}/perl5"
12
13DEPENDS += "${@["perl", "perl-native"][(bb.data.inherits_class('native', d))]}"
14RDEPENDS:${PN} += "${@["perl", ""][(bb.data.inherits_class('native', d))]}"
15
16inherit perl-version
17
18def is_target(d):
19 if not bb.data.inherits_class('native', d):
20 return "yes"
21 return "no"
22
23PERLLIBDIRS = "${libdir}/perl5"
24PERLLIBDIRS:class-native = "${libdir}/perl5"
25
26def cpan_upstream_check_pattern(d):
27 for x in (d.getVar('SRC_URI') or '').split(' '):
28 if x.startswith("https://cpan.metacpan.org"):
29 _pattern = x.split('/')[-1].replace(d.getVar('PV'), r'(?P<pver>\d+.\d+)')
30 return _pattern
31 return ''
32
33UPSTREAM_CHECK_REGEX ?= "${@cpan_upstream_check_pattern(d)}"
diff --git a/meta/classes-recipe/cpan.bbclass b/meta/classes-recipe/cpan.bbclass
new file mode 100644
index 0000000000..bb76a5b326
--- /dev/null
+++ b/meta/classes-recipe/cpan.bbclass
@@ -0,0 +1,71 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7#
8# This is for perl modules that use the old Makefile.PL build system
9#
10inherit cpan-base perlnative
11
12EXTRA_CPANFLAGS ?= ""
13EXTRA_PERLFLAGS ?= ""
14
15# Env var which tells perl if it should use host (no) or target (yes) settings
16export PERLCONFIGTARGET = "${@is_target(d)}"
17
18# Env var which tells perl where the perl include files are
19export PERL_INC = "${STAGING_LIBDIR}${PERL_OWN_DIR}/perl5/${@get_perl_version(d)}/${@get_perl_arch(d)}/CORE"
20export PERL_LIB = "${STAGING_LIBDIR}${PERL_OWN_DIR}/perl5/${@get_perl_version(d)}"
21export PERL_ARCHLIB = "${STAGING_LIBDIR}${PERL_OWN_DIR}/perl5/${@get_perl_version(d)}/${@get_perl_arch(d)}"
22export PERLHOSTLIB = "${STAGING_LIBDIR_NATIVE}/perl5/${@get_perl_version(d)}/"
23export PERLHOSTARCHLIB = "${STAGING_LIBDIR_NATIVE}/perl5/${@get_perl_version(d)}/${@get_perl_hostarch(d)}/"
24
25cpan_do_configure () {
26 yes '' | perl ${EXTRA_PERLFLAGS} Makefile.PL INSTALLDIRS=vendor NO_PERLLOCAL=1 NO_PACKLIST=1 PERL=$(which perl) ${EXTRA_CPANFLAGS}
27
28 # Makefile.PLs can exit with success without generating a
29 # Makefile, e.g. in cases of missing configure time
30 # dependencies. This is considered a best practice by
31 # cpantesters.org. See:
32 # * http://wiki.cpantesters.org/wiki/CPANAuthorNotes
33 # * http://www.nntp.perl.org/group/perl.qa/2008/08/msg11236.html
34 [ -e Makefile ] || bbfatal "No Makefile was generated by Makefile.PL"
35
36 if [ "${BUILD_SYS}" != "${HOST_SYS}" ]; then
37 . ${STAGING_LIBDIR}${PERL_OWN_DIR}/perl5/config.sh
38 # Use find since there can be a Makefile generated for each Makefile.PL
39 for f in `find -name Makefile.PL`; do
40 f2=`echo $f | sed -e 's/.PL//'`
41 test -f $f2 || continue
42 sed -i -e "s:\(PERL_ARCHLIB = \).*:\1${PERL_ARCHLIB}:" \
43 -e 's/perl.real/perl/' \
44 -e "s|^\(CCFLAGS =.*\)|\1 ${CFLAGS}|" \
45 $f2
46 done
47 fi
48}
49
50do_configure:append:class-target() {
51 find . -name Makefile | xargs sed -E -i \
52 -e 's:LD_RUN_PATH ?= ?"?[^"]*"?::g'
53}
54
55do_configure:append:class-nativesdk() {
56 find . -name Makefile | xargs sed -E -i \
57 -e 's:LD_RUN_PATH ?= ?"?[^"]*"?::g'
58}
59
60cpan_do_compile () {
61 oe_runmake PASTHRU_INC="${CFLAGS}" LD="${CCLD}"
62}
63
64cpan_do_install () {
65 oe_runmake DESTDIR="${D}" install_vendor
66 for PERLSCRIPT in `grep -rIEl '#! *${bindir}/perl-native.*/perl' ${D}`; do
67 sed -i -e 's|${bindir}/perl-native.*/perl|/usr/bin/env nativeperl|' $PERLSCRIPT
68 done
69}
70
71EXPORT_FUNCTIONS do_configure do_compile do_install
diff --git a/meta/classes-recipe/cpan_build.bbclass b/meta/classes-recipe/cpan_build.bbclass
new file mode 100644
index 0000000000..026859b6c7
--- /dev/null
+++ b/meta/classes-recipe/cpan_build.bbclass
@@ -0,0 +1,47 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7#
8# This is for perl modules that use the new Build.PL build system
9#
10inherit cpan-base perlnative
11
12EXTRA_CPAN_BUILD_FLAGS ?= ""
13
14# Env var which tells perl if it should use host (no) or target (yes) settings
15export PERLCONFIGTARGET = "${@is_target(d)}"
16export PERL_ARCHLIB = "${STAGING_LIBDIR}${PERL_OWN_DIR}/perl5/${@get_perl_version(d)}/${@get_perl_arch(d)}"
17export PERLHOSTLIB = "${STAGING_LIBDIR_NATIVE}/perl5/${@get_perl_version(d)}/"
18export PERLHOSTARCHLIB = "${STAGING_LIBDIR_NATIVE}/perl5/${@get_perl_version(d)}/${@get_perl_hostarch(d)}/"
19export LD = "${CCLD}"
20
21cpan_build_do_configure () {
22 if [ "${@is_target(d)}" = "yes" ]; then
23 # build for target
24 . ${STAGING_LIBDIR}/perl5/config.sh
25 fi
26
27 perl Build.PL --installdirs vendor --destdir ${D} \
28 ${EXTRA_CPAN_BUILD_FLAGS}
29
30 # Build.PLs can exit with success without generating a
31 # Build, e.g. in cases of missing configure time
32 # dependencies. This is considered a best practice by
33 # cpantesters.org. See:
34 # * http://wiki.cpantesters.org/wiki/CPANAuthorNotes
35 # * http://www.nntp.perl.org/group/perl.qa/2008/08/msg11236.html
36 [ -e Build ] || bbfatal "No Build was generated by Build.PL"
37}
38
39cpan_build_do_compile () {
40 perl Build --perl "${bindir}/perl" verbose=1
41}
42
43cpan_build_do_install () {
44 perl Build install --destdir ${D}
45}
46
47EXPORT_FUNCTIONS do_configure do_compile do_install
diff --git a/meta/classes-recipe/cross-canadian.bbclass b/meta/classes-recipe/cross-canadian.bbclass
new file mode 100644
index 0000000000..1670217d69
--- /dev/null
+++ b/meta/classes-recipe/cross-canadian.bbclass
@@ -0,0 +1,200 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6# NOTE - When using this class the user is responsible for ensuring that
7# TRANSLATED_TARGET_ARCH is added into PN. This ensures that if the TARGET_ARCH
8# is changed, another nativesdk xxx-canadian-cross can be installed
9#
10
11
12# SDK packages are built either explicitly by the user,
13# or indirectly via dependency. No need to be in 'world'.
14EXCLUDE_FROM_WORLD = "1"
15NATIVESDKLIBC ?= "libc-glibc"
16LIBCOVERRIDE = ":${NATIVESDKLIBC}"
17CLASSOVERRIDE = "class-cross-canadian"
18STAGING_BINDIR_TOOLCHAIN = "${STAGING_DIR_NATIVE}${bindir_native}/${SDK_ARCH}${SDK_VENDOR}-${SDK_OS}:${STAGING_DIR_NATIVE}${bindir_native}/${TARGET_ARCH}${TARGET_VENDOR}-${TARGET_OS}"
19
20#
21# Update BASE_PACKAGE_ARCH and PACKAGE_ARCHS
22#
23PACKAGE_ARCH = "${SDK_ARCH}-${SDKPKGSUFFIX}"
24BASECANADIANEXTRAOS ?= "linux-musl"
25CANADIANEXTRAOS = "${BASECANADIANEXTRAOS}"
26CANADIANEXTRAVENDOR = ""
27MODIFYTOS ??= "1"
28python () {
29 archs = d.getVar('PACKAGE_ARCHS').split()
30 sdkarchs = []
31 for arch in archs:
32 sdkarchs.append(arch + '-${SDKPKGSUFFIX}')
33 d.setVar('PACKAGE_ARCHS', " ".join(sdkarchs))
34
35 # Allow the following code segment to be disabled, e.g. meta-environment
36 if d.getVar("MODIFYTOS") != "1":
37 return
38
39 if d.getVar("TCLIBC") in [ 'baremetal', 'newlib' ]:
40 return
41
42 tos = d.getVar("TARGET_OS")
43 tos_known = ["mingw32"]
44 extralibcs = [""]
45 if "musl" in d.getVar("BASECANADIANEXTRAOS"):
46 extralibcs.append("musl")
47 if "android" in tos:
48 extralibcs.append("android")
49 for variant in ["", "spe", "x32", "eabi", "n32", "_ilp32"]:
50 for libc in extralibcs:
51 entry = "linux"
52 if variant and libc:
53 entry = entry + "-" + libc + variant
54 elif variant:
55 entry = entry + "-gnu" + variant
56 elif libc:
57 entry = entry + "-" + libc
58 tos_known.append(entry)
59 if tos not in tos_known:
60 bb.fatal("Building cross-candian for an unknown TARGET_SYS (%s), please update cross-canadian.bbclass" % d.getVar("TARGET_SYS"))
61
62 for n in ["PROVIDES", "DEPENDS"]:
63 d.setVar(n, d.getVar(n))
64 d.setVar("STAGING_BINDIR_TOOLCHAIN", d.getVar("STAGING_BINDIR_TOOLCHAIN"))
65 for prefix in ["AR", "AS", "DLLTOOL", "CC", "CXX", "GCC", "LD", "LIPO", "NM", "OBJDUMP", "RANLIB", "STRIP", "WINDRES"]:
66 n = prefix + "_FOR_TARGET"
67 d.setVar(n, d.getVar(n))
68 # This is a bit ugly. We need to zero LIBC/ABI extension which will change TARGET_OS
69 # however we need the old value in some variables. We expand those here first.
70 tarch = d.getVar("TARGET_ARCH")
71 if tarch == "x86_64":
72 d.setVar("LIBCEXTENSION", "")
73 d.setVar("ABIEXTENSION", "")
74 d.appendVar("CANADIANEXTRAOS", " linux-gnux32")
75 for extraos in d.getVar("BASECANADIANEXTRAOS").split():
76 d.appendVar("CANADIANEXTRAOS", " " + extraos + "x32")
77 elif tarch == "powerpc":
78 # PowerPC can build "linux" and "linux-gnuspe"
79 d.setVar("LIBCEXTENSION", "")
80 d.setVar("ABIEXTENSION", "")
81 d.appendVar("CANADIANEXTRAOS", " linux-gnuspe")
82 for extraos in d.getVar("BASECANADIANEXTRAOS").split():
83 d.appendVar("CANADIANEXTRAOS", " " + extraos + "spe")
84 elif tarch == "mips64":
85 d.appendVar("CANADIANEXTRAOS", " linux-gnun32")
86 for extraos in d.getVar("BASECANADIANEXTRAOS").split():
87 d.appendVar("CANADIANEXTRAOS", " " + extraos + "n32")
88 if tarch == "arm" or tarch == "armeb":
89 d.appendVar("CANADIANEXTRAOS", " linux-gnueabi linux-musleabi")
90 d.setVar("TARGET_OS", "linux-gnueabi")
91 else:
92 d.setVar("TARGET_OS", "linux")
93
94 # Also need to handle multilib target vendors
95 vendors = d.getVar("CANADIANEXTRAVENDOR")
96 if not vendors:
97 vendors = all_multilib_tune_values(d, 'TARGET_VENDOR')
98 origvendor = d.getVar("TARGET_VENDOR_MULTILIB_ORIGINAL")
99 if origvendor:
100 d.setVar("TARGET_VENDOR", origvendor)
101 if origvendor not in vendors.split():
102 vendors = origvendor + " " + vendors
103 d.setVar("CANADIANEXTRAVENDOR", vendors)
104}
105MULTIMACH_TARGET_SYS = "${PACKAGE_ARCH}${HOST_VENDOR}-${HOST_OS}"
106
107INHIBIT_DEFAULT_DEPS = "1"
108
109STAGING_DIR_HOST = "${RECIPE_SYSROOT}"
110
111TOOLCHAIN_OPTIONS = " --sysroot=${RECIPE_SYSROOT}"
112
113PATH:append = ":${TMPDIR}/sysroots/${HOST_ARCH}/${bindir_cross}"
114PKGHIST_DIR = "${TMPDIR}/pkghistory/${HOST_ARCH}-${SDKPKGSUFFIX}${HOST_VENDOR}-${HOST_OS}/"
115
116HOST_ARCH = "${SDK_ARCH}"
117HOST_VENDOR = "${SDK_VENDOR}"
118HOST_OS = "${SDK_OS}"
119HOST_PREFIX = "${SDK_PREFIX}"
120HOST_CC_ARCH = "${SDK_CC_ARCH}"
121HOST_LD_ARCH = "${SDK_LD_ARCH}"
122HOST_AS_ARCH = "${SDK_AS_ARCH}"
123
124#assign DPKG_ARCH
125DPKG_ARCH = "${@debian_arch_map(d.getVar('SDK_ARCH'), '')}"
126
127CPPFLAGS = "${BUILDSDK_CPPFLAGS}"
128CFLAGS = "${BUILDSDK_CFLAGS}"
129CXXFLAGS = "${BUILDSDK_CFLAGS}"
130LDFLAGS = "${BUILDSDK_LDFLAGS} \
131 -Wl,-rpath-link,${STAGING_LIBDIR}/.. \
132 -Wl,-rpath,${libdir}/.. "
133
134#
135# We need chrpath >= 0.14 to ensure we can deal with 32 and 64 bit
136# binaries
137#
138DEPENDS:append = " chrpath-replacement-native"
139EXTRANATIVEPATH += "chrpath-native"
140
141# Path mangling needed by the cross packaging
142# Note that we use := here to ensure that libdir and includedir are
143# target paths.
144target_base_prefix := "${base_prefix}"
145target_prefix := "${prefix}"
146target_exec_prefix := "${exec_prefix}"
147target_base_libdir = "${target_base_prefix}/${baselib}"
148target_libdir = "${target_exec_prefix}/${baselib}"
149target_includedir := "${includedir}"
150
151# Change to place files in SDKPATH
152base_prefix = "${SDKPATHNATIVE}"
153prefix = "${SDKPATHNATIVE}${prefix_nativesdk}"
154exec_prefix = "${SDKPATHNATIVE}${prefix_nativesdk}"
155bindir = "${exec_prefix}/bin/${TARGET_ARCH}${TARGET_VENDOR}-${TARGET_OS}"
156sbindir = "${bindir}"
157base_bindir = "${bindir}"
158base_sbindir = "${bindir}"
159libdir = "${exec_prefix}/lib/${TARGET_ARCH}${TARGET_VENDOR}-${TARGET_OS}"
160libexecdir = "${exec_prefix}/libexec/${TARGET_ARCH}${TARGET_VENDOR}-${TARGET_OS}"
161
162FILES:${PN} = "${prefix}"
163
164export PKG_CONFIG_DIR = "${STAGING_DIR_HOST}${exec_prefix}/lib/pkgconfig"
165export PKG_CONFIG_SYSROOT_DIR = "${STAGING_DIR_HOST}"
166
167do_populate_sysroot[stamp-extra-info] = ""
168do_packagedata[stamp-extra-info] = ""
169
170USE_NLS = "${SDKUSE_NLS}"
171
172# We have to us TARGET_ARCH but we care about the absolute value
173# and not any particular tune that is enabled.
174TARGET_ARCH[vardepsexclude] = "TUNE_ARCH"
175
176PKGDATA_DIR = "${PKGDATA_DIR_SDK}"
177# If MLPREFIX is set by multilib code, shlibs
178# points to the wrong place so force it
179SHLIBSDIRS = "${PKGDATA_DIR}/nativesdk-shlibs2"
180SHLIBSWORKDIR = "${PKGDATA_DIR}/nativesdk-shlibs2"
181
182cross_canadian_bindirlinks () {
183 for i in linux ${CANADIANEXTRAOS}
184 do
185 for v in ${CANADIANEXTRAVENDOR}
186 do
187 d=${D}${bindir}/../${TARGET_ARCH}$v-$i
188 if [ -d $d ];
189 then
190 continue
191 fi
192 install -d $d
193 for j in `ls ${D}${bindir}`
194 do
195 p=${TARGET_ARCH}$v-$i-`echo $j | sed -e s,${TARGET_PREFIX},,`
196 ln -s ../${TARGET_SYS}/$j $d/$p
197 done
198 done
199 done
200}
diff --git a/meta/classes-recipe/cross.bbclass b/meta/classes-recipe/cross.bbclass
new file mode 100644
index 0000000000..93de9a5274
--- /dev/null
+++ b/meta/classes-recipe/cross.bbclass
@@ -0,0 +1,103 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7inherit relocatable
8
9# Cross packages are built indirectly via dependency,
10# no need for them to be a direct target of 'world'
11EXCLUDE_FROM_WORLD = "1"
12
13CLASSOVERRIDE = "class-cross"
14PACKAGES = ""
15PACKAGES_DYNAMIC = ""
16PACKAGES_DYNAMIC:class-native = ""
17
18HOST_ARCH = "${BUILD_ARCH}"
19HOST_VENDOR = "${BUILD_VENDOR}"
20HOST_OS = "${BUILD_OS}"
21HOST_PREFIX = "${BUILD_PREFIX}"
22HOST_CC_ARCH = "${BUILD_CC_ARCH}"
23HOST_LD_ARCH = "${BUILD_LD_ARCH}"
24HOST_AS_ARCH = "${BUILD_AS_ARCH}"
25
26# No strip sysroot when DEBUG_BUILD is enabled
27INHIBIT_SYSROOT_STRIP ?= "${@oe.utils.vartrue('DEBUG_BUILD', '1', '', d)}"
28
29export lt_cv_sys_lib_dlsearch_path_spec = "${libdir} ${base_libdir} /lib /lib64 /usr/lib /usr/lib64"
30
31STAGING_DIR_HOST = "${RECIPE_SYSROOT_NATIVE}"
32
33PACKAGE_ARCH = "${BUILD_ARCH}"
34
35MULTIMACH_TARGET_SYS = "${BUILD_ARCH}${BUILD_VENDOR}-${BUILD_OS}"
36
37export PKG_CONFIG_DIR = "${exec_prefix}/lib/pkgconfig"
38export PKG_CONFIG_SYSROOT_DIR = ""
39
40TARGET_CPPFLAGS = ""
41TARGET_CFLAGS = ""
42TARGET_CXXFLAGS = ""
43TARGET_LDFLAGS = ""
44
45CPPFLAGS = "${BUILD_CPPFLAGS}"
46CFLAGS = "${BUILD_CFLAGS}"
47CXXFLAGS = "${BUILD_CFLAGS}"
48LDFLAGS = "${BUILD_LDFLAGS}"
49
50TOOLCHAIN_OPTIONS = ""
51
52# This class encodes staging paths into its scripts data so can only be
53# reused if we manipulate the paths.
54SSTATE_SCAN_CMD ?= "${SSTATE_SCAN_CMD_NATIVE}"
55
56# Path mangling needed by the cross packaging
57# Note that we use := here to ensure that libdir and includedir are
58# target paths.
59target_base_prefix := "${root_prefix}"
60target_prefix := "${prefix}"
61target_exec_prefix := "${exec_prefix}"
62target_base_libdir = "${target_base_prefix}/${baselib}"
63target_libdir = "${target_exec_prefix}/${baselib}"
64target_includedir := "${includedir}"
65
66# Overrides for paths
67CROSS_TARGET_SYS_DIR = "${TARGET_SYS}"
68prefix = "${STAGING_DIR_NATIVE}${prefix_native}"
69base_prefix = "${STAGING_DIR_NATIVE}"
70exec_prefix = "${STAGING_DIR_NATIVE}${prefix_native}"
71bindir = "${exec_prefix}/bin/${CROSS_TARGET_SYS_DIR}"
72sbindir = "${bindir}"
73base_bindir = "${bindir}"
74base_sbindir = "${bindir}"
75libdir = "${exec_prefix}/lib/${CROSS_TARGET_SYS_DIR}"
76libexecdir = "${exec_prefix}/libexec/${CROSS_TARGET_SYS_DIR}"
77
78do_populate_sysroot[sstate-inputdirs] = "${SYSROOT_DESTDIR}/${STAGING_DIR_NATIVE}/"
79do_packagedata[stamp-extra-info] = ""
80
81USE_NLS = "no"
82
83export CC = "${BUILD_CC}"
84export CXX = "${BUILD_CXX}"
85export FC = "${BUILD_FC}"
86export CPP = "${BUILD_CPP}"
87export LD = "${BUILD_LD}"
88export CCLD = "${BUILD_CCLD}"
89export AR = "${BUILD_AR}"
90export AS = "${BUILD_AS}"
91export RANLIB = "${BUILD_RANLIB}"
92export STRIP = "${BUILD_STRIP}"
93export NM = "${BUILD_NM}"
94
95inherit nopackages
96
97python do_addto_recipe_sysroot () {
98 bb.build.exec_func("extend_recipe_sysroot", d)
99}
100addtask addto_recipe_sysroot after do_populate_sysroot
101do_addto_recipe_sysroot[deptask] = "do_populate_sysroot"
102
103PATH:prepend = "${COREBASE}/scripts/cross-intercept:"
diff --git a/meta/classes-recipe/crosssdk.bbclass b/meta/classes-recipe/crosssdk.bbclass
new file mode 100644
index 0000000000..824b1bcff4
--- /dev/null
+++ b/meta/classes-recipe/crosssdk.bbclass
@@ -0,0 +1,57 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7inherit cross
8
9CLASSOVERRIDE = "class-crosssdk"
10NATIVESDKLIBC ?= "libc-glibc"
11LIBCOVERRIDE = ":${NATIVESDKLIBC}"
12MACHINEOVERRIDES = ""
13PACKAGE_ARCH = "${SDK_ARCH}"
14
15python () {
16 # set TUNE_PKGARCH to SDK_ARCH
17 d.setVar('TUNE_PKGARCH', d.getVar('SDK_ARCH'))
18 # Set features here to prevent appends and distro features backfill
19 # from modifying nativesdk distro features
20 features = set(d.getVar("DISTRO_FEATURES_NATIVESDK").split())
21 filtered = set(bb.utils.filter("DISTRO_FEATURES", d.getVar("DISTRO_FEATURES_FILTER_NATIVESDK"), d).split())
22 d.setVar("DISTRO_FEATURES", " ".join(sorted(features | filtered)))
23}
24
25STAGING_BINDIR_TOOLCHAIN = "${STAGING_DIR_NATIVE}${bindir_native}/${TARGET_ARCH}${TARGET_VENDOR}-${TARGET_OS}"
26
27# This class encodes staging paths into its scripts data so can only be
28# reused if we manipulate the paths.
29SSTATE_SCAN_CMD ?= "${SSTATE_SCAN_CMD_NATIVE}"
30
31TARGET_ARCH = "${SDK_ARCH}"
32TARGET_VENDOR = "${SDK_VENDOR}"
33TARGET_OS = "${SDK_OS}"
34TARGET_PREFIX = "${SDK_PREFIX}"
35TARGET_CC_ARCH = "${SDK_CC_ARCH}"
36TARGET_LD_ARCH = "${SDK_LD_ARCH}"
37TARGET_AS_ARCH = "${SDK_AS_ARCH}"
38TARGET_CPPFLAGS = ""
39TARGET_CFLAGS = ""
40TARGET_CXXFLAGS = ""
41TARGET_LDFLAGS = ""
42TARGET_FPU = ""
43
44
45target_libdir = "${SDKPATHNATIVE}${libdir_nativesdk}"
46target_includedir = "${SDKPATHNATIVE}${includedir_nativesdk}"
47target_base_libdir = "${SDKPATHNATIVE}${base_libdir_nativesdk}"
48target_prefix = "${SDKPATHNATIVE}${prefix_nativesdk}"
49target_exec_prefix = "${SDKPATHNATIVE}${prefix_nativesdk}"
50baselib = "lib"
51
52do_packagedata[stamp-extra-info] = ""
53
54# Need to force this to ensure consitency across architectures
55EXTRA_OECONF_GCC_FLOAT = ""
56
57USE_NLS = "no"
diff --git a/meta/classes-recipe/deploy.bbclass b/meta/classes-recipe/deploy.bbclass
new file mode 100644
index 0000000000..f56fe98d6d
--- /dev/null
+++ b/meta/classes-recipe/deploy.bbclass
@@ -0,0 +1,18 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7DEPLOYDIR = "${WORKDIR}/deploy-${PN}"
8SSTATETASKS += "do_deploy"
9do_deploy[sstate-inputdirs] = "${DEPLOYDIR}"
10do_deploy[sstate-outputdirs] = "${DEPLOY_DIR_IMAGE}"
11
12python do_deploy_setscene () {
13 sstate_setscene(d)
14}
15addtask do_deploy_setscene
16do_deploy[dirs] = "${B}"
17do_deploy[cleandirs] = "${DEPLOYDIR}"
18do_deploy[stamp-extra-info] = "${MACHINE_ARCH}"
diff --git a/meta/classes-recipe/devicetree.bbclass b/meta/classes-recipe/devicetree.bbclass
new file mode 100644
index 0000000000..ac1d284ccd
--- /dev/null
+++ b/meta/classes-recipe/devicetree.bbclass
@@ -0,0 +1,154 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# This bbclass implements device tree compliation for user provided device tree
8# sources. The compilation of the device tree sources is the same as the kernel
9# device tree compilation process, this includes being able to include sources
10# from the kernel such as soc dtsi files or header files such as gpio.h. In
11# addition to device trees this bbclass also handles compilation of device tree
12# overlays.
13#
14# The output of this class behaves similar to how kernel-devicetree.bbclass
15# operates in that the output files are installed into /boot/devicetree.
16# However this class on purpose separates the deployed device trees into the
17# 'devicetree' subdirectory. This prevents clashes with the kernel-devicetree
18# output. Additionally the device trees are populated into the sysroot for
19# access via the sysroot from within other recipes.
20
21SECTION ?= "bsp"
22
23# The default inclusion of kernel device tree includes and headers means that
24# device trees built with them are at least GPL-2.0-only (and in some cases dual
25# licensed). Default to GPL-2.0-only if the recipe does not specify a license.
26LICENSE ?= "GPL-2.0-only"
27LIC_FILES_CHKSUM ?= "file://${COMMON_LICENSE_DIR}/GPL-2.0-only;md5=801f80980d171dd6425610833a22dbe6"
28
29INHIBIT_DEFAULT_DEPS = "1"
30DEPENDS += "dtc-native"
31
32inherit deploy kernel-arch
33
34COMPATIBLE_MACHINE ?= "^$"
35
36PROVIDES = "virtual/dtb"
37
38PACKAGE_ARCH = "${MACHINE_ARCH}"
39
40SYSROOT_DIRS += "/boot/devicetree"
41FILES:${PN} = "/boot/devicetree/*.dtb /boot/devicetree/*.dtbo"
42
43S = "${WORKDIR}"
44B = "${WORKDIR}/build"
45
46# Default kernel includes, these represent what are normally used for in-kernel
47# sources.
48KERNEL_INCLUDE ??= " \
49 ${STAGING_KERNEL_DIR}/arch/${ARCH}/boot/dts \
50 ${STAGING_KERNEL_DIR}/arch/${ARCH}/boot/dts/* \
51 ${STAGING_KERNEL_DIR}/scripts/dtc/include-prefixes \
52 "
53
54DT_INCLUDE[doc] = "Search paths to be made available to both the device tree compiler and preprocessor for inclusion."
55DT_INCLUDE ?= "${DT_FILES_PATH} ${KERNEL_INCLUDE}"
56DT_FILES_PATH[doc] = "Defaults to source directory, can be used to select dts files that are not in source (e.g. generated)."
57DT_FILES_PATH ?= "${S}"
58
59DT_PADDING_SIZE[doc] = "Size of padding on the device tree blob, used as extra space typically for additional properties during boot."
60DT_PADDING_SIZE ??= "0x3000"
61DT_RESERVED_MAP[doc] = "Number of reserved map entires."
62DT_RESERVED_MAP ??= "8"
63DT_BOOT_CPU[doc] = "The boot cpu, defaults to 0"
64DT_BOOT_CPU ??= "0"
65
66DTC_FLAGS ?= "-R ${DT_RESERVED_MAP} -b ${DT_BOOT_CPU}"
67DTC_PPFLAGS ?= "-nostdinc -undef -D__DTS__ -x assembler-with-cpp"
68DTC_BFLAGS ?= "-p ${DT_PADDING_SIZE} -@"
69DTC_OFLAGS ?= "-p 0 -@ -H epapr"
70
71python () {
72 if d.getVar("KERNEL_INCLUDE"):
73 # auto add dependency on kernel tree, but only if kernel include paths
74 # are specified.
75 d.appendVarFlag("do_compile", "depends", " virtual/kernel:do_configure")
76}
77
78def expand_includes(varname, d):
79 import glob
80 includes = set()
81 # expand all includes with glob
82 for i in (d.getVar(varname) or "").split():
83 for g in glob.glob(i):
84 if os.path.isdir(g): # only add directories to include path
85 includes.add(g)
86 return includes
87
88def devicetree_source_is_overlay(path):
89 # determine if a dts file is an overlay by checking if it uses "/plugin/;"
90 with open(path, "r") as f:
91 for i in f:
92 if i.startswith("/plugin/;"):
93 return True
94 return False
95
96def devicetree_compile(dtspath, includes, d):
97 import subprocess
98 dts = os.path.basename(dtspath)
99 dtname = os.path.splitext(dts)[0]
100 bb.note("Processing {0} [{1}]".format(dtname, dts))
101
102 # preprocess
103 ppargs = d.getVar("BUILD_CPP").split()
104 ppargs += (d.getVar("DTC_PPFLAGS") or "").split()
105 for i in includes:
106 ppargs.append("-I{0}".format(i))
107 ppargs += ["-o", "{0}.pp".format(dts), dtspath]
108 bb.note("Running {0}".format(" ".join(ppargs)))
109 subprocess.run(ppargs, check = True)
110
111 # determine if the file is an overlay or not (using the preprocessed file)
112 isoverlay = devicetree_source_is_overlay("{0}.pp".format(dts))
113
114 # compile
115 dtcargs = ["dtc"] + (d.getVar("DTC_FLAGS") or "").split()
116 if isoverlay:
117 dtcargs += (d.getVar("DTC_OFLAGS") or "").split()
118 else:
119 dtcargs += (d.getVar("DTC_BFLAGS") or "").split()
120 for i in includes:
121 dtcargs += ["-i", i]
122 dtcargs += ["-o", "{0}.{1}".format(dtname, "dtbo" if isoverlay else "dtb")]
123 dtcargs += ["-I", "dts", "-O", "dtb", "{0}.pp".format(dts)]
124 bb.note("Running {0}".format(" ".join(dtcargs)))
125 subprocess.run(dtcargs, check = True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
126
127python devicetree_do_compile() {
128 includes = expand_includes("DT_INCLUDE", d)
129 listpath = d.getVar("DT_FILES_PATH")
130 for dts in os.listdir(listpath):
131 dtspath = os.path.join(listpath, dts)
132 try:
133 if not(os.path.isfile(dtspath)) or not(dts.endswith(".dts") or devicetree_source_is_overlay(dtspath)):
134 continue # skip non-.dts files and non-overlay files
135 except:
136 continue # skip if can't determine if overlay
137 devicetree_compile(dtspath, includes, d)
138}
139
140devicetree_do_install() {
141 for DTB_FILE in `ls *.dtb *.dtbo`; do
142 install -Dm 0644 ${B}/${DTB_FILE} ${D}/boot/devicetree/${DTB_FILE}
143 done
144}
145
146devicetree_do_deploy() {
147 for DTB_FILE in `ls *.dtb *.dtbo`; do
148 install -Dm 0644 ${B}/${DTB_FILE} ${DEPLOYDIR}/devicetree/${DTB_FILE}
149 done
150}
151addtask deploy before do_build after do_install
152
153EXPORT_FUNCTIONS do_compile do_install do_deploy
154
diff --git a/meta/classes-recipe/devupstream.bbclass b/meta/classes-recipe/devupstream.bbclass
new file mode 100644
index 0000000000..1529cc8fca
--- /dev/null
+++ b/meta/classes-recipe/devupstream.bbclass
@@ -0,0 +1,61 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# Class for use in BBCLASSEXTEND to make it easier to have a single recipe that
8# can build both stable tarballs and snapshots from upstream source
9# repositories.
10#
11# Usage:
12# BBCLASSEXTEND = "devupstream:target"
13# SRC_URI:class-devupstream = "git://git.example.com/example;branch=master"
14# SRCREV:class-devupstream = "abcdef"
15#
16# If the first entry in SRC_URI is a git: URL then S is rewritten to
17# WORKDIR/git.
18#
19# There are a few caveats that remain to be solved:
20# - You can't build native or nativesdk recipes using for example
21# devupstream:native, you can only build target recipes.
22# - If the fetcher requires native tools (such as subversion-native) then
23# bitbake won't be able to add them automatically.
24
25python devupstream_virtclass_handler () {
26 # Do nothing if this is inherited, as it's for BBCLASSEXTEND
27 if "devupstream" not in (d.getVar('BBCLASSEXTEND') or ""):
28 bb.error("Don't inherit devupstream, use BBCLASSEXTEND")
29 return
30
31 variant = d.getVar("BBEXTENDVARIANT")
32 if variant not in ("target", "native"):
33 bb.error("Unsupported variant %s. Pass the variant when using devupstream, for example devupstream:target" % variant)
34 return
35
36 # Develpment releases are never preferred by default
37 d.setVar("DEFAULT_PREFERENCE", "-1")
38
39 src_uri = d.getVar("SRC_URI:class-devupstream") or d.getVar("SRC_URI")
40 uri = bb.fetch2.URI(src_uri.split()[0])
41
42 if uri.scheme == "git" and not d.getVar("S:class-devupstream"):
43 d.setVar("S", "${WORKDIR}/git")
44
45 # Modify the PV if the recipe hasn't already overridden it
46 pv = d.getVar("PV")
47 proto_marker = "+" + uri.scheme
48 if proto_marker not in pv and not d.getVar("PV:class-devupstream"):
49 d.setVar("PV", pv + proto_marker + "${SRCPV}")
50
51 if variant == "native":
52 pn = d.getVar("PN")
53 d.setVar("PN", "%s-native" % (pn))
54 fn = d.getVar("FILE")
55 bb.parse.BBHandler.inherit("native", fn, 0, d)
56
57 d.appendVar("CLASSOVERRIDE", ":class-devupstream")
58}
59
60addhandler devupstream_virtclass_handler
61devupstream_virtclass_handler[eventmask] = "bb.event.RecipePreFinalise"
diff --git a/meta/classes-recipe/distro_features_check.bbclass b/meta/classes-recipe/distro_features_check.bbclass
new file mode 100644
index 0000000000..1f2674fd6e
--- /dev/null
+++ b/meta/classes-recipe/distro_features_check.bbclass
@@ -0,0 +1,13 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# Temporarily provide fallback to the old name of the class
8
9python __anonymous() {
10 bb.warn("distro_features_check.bbclass is deprecated, please use features_check.bbclass instead")
11}
12
13inherit features_check
diff --git a/meta/classes-recipe/distrooverrides.bbclass b/meta/classes-recipe/distrooverrides.bbclass
new file mode 100644
index 0000000000..8d9d7cda7d
--- /dev/null
+++ b/meta/classes-recipe/distrooverrides.bbclass
@@ -0,0 +1,38 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# Turns certain DISTRO_FEATURES into overrides with the same
8# name plus a df- prefix. Ensures that these special
9# distro features remain set also for native and nativesdk
10# recipes, so that these overrides can also be used there.
11#
12# This makes it simpler to write .bbappends that only change the
13# task signatures of the recipe if the change is really enabled,
14# for example with:
15# do_install:append:df-my-feature () { ... }
16# where "my-feature" is a DISTRO_FEATURE.
17#
18# The class is meant to be used in a layer.conf or distro
19# .inc file with:
20# INHERIT += "distrooverrides"
21# DISTRO_FEATURES_OVERRIDES += "my-feature"
22#
23# Beware that this part of OVERRIDES changes during parsing, so usage
24# of these overrides should be limited to .bb and .bbappend files,
25# because then DISTRO_FEATURES is final.
26
27DISTRO_FEATURES_OVERRIDES ?= ""
28DISTRO_FEATURES_OVERRIDES[doc] = "A space-separated list of <feature> entries. \
29Each entry is added to OVERRIDES as df-<feature> if <feature> is in DISTRO_FEATURES."
30
31DISTRO_FEATURES_FILTER_NATIVE:append = " ${DISTRO_FEATURES_OVERRIDES}"
32DISTRO_FEATURES_FILTER_NATIVESDK:append = " ${DISTRO_FEATURES_OVERRIDES}"
33
34# If DISTRO_FEATURES_OVERRIDES or DISTRO_FEATURES show up in a task
35# signature because of this line, then the task dependency on
36# OVERRIDES itself should be fixed. Excluding these two variables
37# with DISTROOVERRIDES[vardepsexclude] would just work around the problem.
38DISTROOVERRIDES .= "${@ ''.join([':df-' + x for x in sorted(set(d.getVar('DISTRO_FEATURES_OVERRIDES').split()) & set((d.getVar('DISTRO_FEATURES') or '').split()))]) }"
diff --git a/meta/classes-recipe/dos2unix.bbclass b/meta/classes-recipe/dos2unix.bbclass
new file mode 100644
index 0000000000..18e89b1cf2
--- /dev/null
+++ b/meta/classes-recipe/dos2unix.bbclass
@@ -0,0 +1,20 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# Class for use to convert all CRLF line terminators to LF
8# provided that some projects are being developed/maintained
9# on Windows so they have different line terminators(CRLF) vs
10# on Linux(LF), which can cause annoying patching errors during
11# git push/checkout processes.
12
13do_convert_crlf_to_lf[depends] += "dos2unix-native:do_populate_sysroot"
14
15# Convert CRLF line terminators to LF
16do_convert_crlf_to_lf () {
17 find ${S} -type f -exec dos2unix {} \;
18}
19
20addtask convert_crlf_to_lf after do_unpack before do_patch
diff --git a/meta/classes-recipe/externalsrc.bbclass b/meta/classes-recipe/externalsrc.bbclass
new file mode 100644
index 0000000000..51dbe9ea5a
--- /dev/null
+++ b/meta/classes-recipe/externalsrc.bbclass
@@ -0,0 +1,269 @@
1# Copyright (C) 2012 Linux Foundation
2# Author: Richard Purdie
3# Some code and influence taken from srctree.bbclass:
4# Copyright (C) 2009 Chris Larson <clarson@kergoth.com>
5#
6# SPDX-License-Identifier: MIT
7#
8# externalsrc.bbclass enables use of an existing source tree, usually external to
9# the build system to build a piece of software rather than the usual fetch/unpack/patch
10# process.
11#
12# To use, add externalsrc to the global inherit and set EXTERNALSRC to point at the
13# directory you want to use containing the sources e.g. from local.conf for a recipe
14# called "myrecipe" you would do:
15#
16# INHERIT += "externalsrc"
17# EXTERNALSRC:pn-myrecipe = "/path/to/my/source/tree"
18#
19# In order to make this class work for both target and native versions (or with
20# multilibs/cross or other BBCLASSEXTEND variants), B is set to point to a separate
21# directory under the work directory (split source and build directories). This is
22# the default, but the build directory can be set to the source directory if
23# circumstances dictate by setting EXTERNALSRC_BUILD to the same value, e.g.:
24#
25# EXTERNALSRC_BUILD:pn-myrecipe = "/path/to/my/source/tree"
26#
27
28SRCTREECOVEREDTASKS ?= "do_patch do_unpack do_fetch"
29EXTERNALSRC_SYMLINKS ?= "oe-workdir:${WORKDIR} oe-logs:${T}"
30
31python () {
32 externalsrc = d.getVar('EXTERNALSRC')
33 externalsrcbuild = d.getVar('EXTERNALSRC_BUILD')
34
35 if externalsrc and not externalsrc.startswith("/"):
36 bb.error("EXTERNALSRC must be an absolute path")
37 if externalsrcbuild and not externalsrcbuild.startswith("/"):
38 bb.error("EXTERNALSRC_BUILD must be an absolute path")
39
40 # If this is the base recipe and EXTERNALSRC is set for it or any of its
41 # derivatives, then enable BB_DONT_CACHE to force the recipe to always be
42 # re-parsed so that the file-checksums function for do_compile is run every
43 # time.
44 bpn = d.getVar('BPN')
45 classextend = (d.getVar('BBCLASSEXTEND') or '').split()
46 if bpn == d.getVar('PN') or not classextend:
47 if (externalsrc or
48 ('native' in classextend and
49 d.getVar('EXTERNALSRC:pn-%s-native' % bpn)) or
50 ('nativesdk' in classextend and
51 d.getVar('EXTERNALSRC:pn-nativesdk-%s' % bpn)) or
52 ('cross' in classextend and
53 d.getVar('EXTERNALSRC:pn-%s-cross' % bpn))):
54 d.setVar('BB_DONT_CACHE', '1')
55
56 if externalsrc:
57 import oe.recipeutils
58 import oe.path
59
60 d.setVar('S', externalsrc)
61 if externalsrcbuild:
62 d.setVar('B', externalsrcbuild)
63 else:
64 d.setVar('B', '${WORKDIR}/${BPN}-${PV}/')
65
66 local_srcuri = []
67 fetch = bb.fetch2.Fetch((d.getVar('SRC_URI') or '').split(), d)
68 for url in fetch.urls:
69 url_data = fetch.ud[url]
70 parm = url_data.parm
71 if (url_data.type == 'file' or
72 url_data.type == 'npmsw' or url_data.type == 'crate' or
73 'type' in parm and parm['type'] == 'kmeta'):
74 local_srcuri.append(url)
75
76 d.setVar('SRC_URI', ' '.join(local_srcuri))
77
78 # Dummy value because the default function can't be called with blank SRC_URI
79 d.setVar('SRCPV', '999')
80
81 if d.getVar('CONFIGUREOPT_DEPTRACK') == '--disable-dependency-tracking':
82 d.setVar('CONFIGUREOPT_DEPTRACK', '')
83
84 tasks = filter(lambda k: d.getVarFlag(k, "task"), d.keys())
85
86 for task in tasks:
87 if task.endswith("_setscene"):
88 # sstate is never going to work for external source trees, disable it
89 bb.build.deltask(task, d)
90 elif os.path.realpath(d.getVar('S')) == os.path.realpath(d.getVar('B')):
91 # Since configure will likely touch ${S}, ensure only we lock so one task has access at a time
92 d.appendVarFlag(task, "lockfiles", " ${S}/singletask.lock")
93
94 for funcname in [task, "base_" + task, "kernel_" + task]:
95 # We do not want our source to be wiped out, ever (kernel.bbclass does this for do_clean)
96 cleandirs = oe.recipeutils.split_var_value(d.getVarFlag(funcname, 'cleandirs', False) or '')
97 setvalue = False
98 for cleandir in cleandirs[:]:
99 if oe.path.is_path_parent(externalsrc, d.expand(cleandir)):
100 cleandirs.remove(cleandir)
101 setvalue = True
102 if setvalue:
103 d.setVarFlag(funcname, 'cleandirs', ' '.join(cleandirs))
104
105 fetch_tasks = ['do_fetch', 'do_unpack']
106 # If we deltask do_patch, there's no dependency to ensure do_unpack gets run, so add one
107 # Note that we cannot use d.appendVarFlag() here because deps is expected to be a list object, not a string
108 d.setVarFlag('do_configure', 'deps', (d.getVarFlag('do_configure', 'deps', False) or []) + ['do_unpack'])
109
110 for task in d.getVar("SRCTREECOVEREDTASKS").split():
111 if local_srcuri and task in fetch_tasks:
112 continue
113 bb.build.deltask(task, d)
114 if task == 'do_unpack':
115 # The reproducible build create_source_date_epoch_stamp function must
116 # be run after the source is available and before the
117 # do_deploy_source_date_epoch task. In the normal case, it's attached
118 # to do_unpack as a postfuncs, but since we removed do_unpack (above)
119 # we need to move the function elsewhere. The easiest thing to do is
120 # move it into the prefuncs of the do_deploy_source_date_epoch task.
121 # This is safe, as externalsrc runs with the source already unpacked.
122 d.prependVarFlag('do_deploy_source_date_epoch', 'prefuncs', 'create_source_date_epoch_stamp ')
123
124 d.prependVarFlag('do_compile', 'prefuncs', "externalsrc_compile_prefunc ")
125 d.prependVarFlag('do_configure', 'prefuncs', "externalsrc_configure_prefunc ")
126
127 d.setVarFlag('do_compile', 'file-checksums', '${@srctree_hash_files(d)}')
128 d.setVarFlag('do_configure', 'file-checksums', '${@srctree_configure_hash_files(d)}')
129
130 # We don't want the workdir to go away
131 d.appendVar('RM_WORK_EXCLUDE', ' ' + d.getVar('PN'))
132
133 bb.build.addtask('do_buildclean',
134 'do_clean' if d.getVar('S') == d.getVar('B') else None,
135 None, d)
136
137 # If B=S the same builddir is used even for different architectures.
138 # Thus, use a shared CONFIGURESTAMPFILE and STAMP directory so that
139 # change of do_configure task hash is correctly detected and stamps are
140 # invalidated if e.g. MACHINE changes.
141 if d.getVar('S') == d.getVar('B'):
142 configstamp = '${TMPDIR}/work-shared/${PN}/${EXTENDPE}${PV}-${PR}/configure.sstate'
143 d.setVar('CONFIGURESTAMPFILE', configstamp)
144 d.setVar('STAMP', '${STAMPS_DIR}/work-shared/${PN}/${EXTENDPE}${PV}-${PR}')
145 d.setVar('STAMPCLEAN', '${STAMPS_DIR}/work-shared/${PN}/*-*')
146}
147
148python externalsrc_configure_prefunc() {
149 s_dir = d.getVar('S')
150 # Create desired symlinks
151 symlinks = (d.getVar('EXTERNALSRC_SYMLINKS') or '').split()
152 newlinks = []
153 for symlink in symlinks:
154 symsplit = symlink.split(':', 1)
155 lnkfile = os.path.join(s_dir, symsplit[0])
156 target = d.expand(symsplit[1])
157 if len(symsplit) > 1:
158 if os.path.islink(lnkfile):
159 # Link already exists, leave it if it points to the right location already
160 if os.readlink(lnkfile) == target:
161 continue
162 os.unlink(lnkfile)
163 elif os.path.exists(lnkfile):
164 # File/dir exists with same name as link, just leave it alone
165 continue
166 os.symlink(target, lnkfile)
167 newlinks.append(symsplit[0])
168 # Hide the symlinks from git
169 try:
170 git_exclude_file = os.path.join(s_dir, '.git/info/exclude')
171 if os.path.exists(git_exclude_file):
172 with open(git_exclude_file, 'r+') as efile:
173 elines = efile.readlines()
174 for link in newlinks:
175 if link in elines or '/'+link in elines:
176 continue
177 efile.write('/' + link + '\n')
178 except IOError as ioe:
179 bb.note('Failed to hide EXTERNALSRC_SYMLINKS from git')
180}
181
182python externalsrc_compile_prefunc() {
183 # Make it obvious that this is happening, since forgetting about it could lead to much confusion
184 bb.plain('NOTE: %s: compiling from external source tree %s' % (d.getVar('PN'), d.getVar('EXTERNALSRC')))
185}
186
187do_buildclean[dirs] = "${S} ${B}"
188do_buildclean[nostamp] = "1"
189do_buildclean[doc] = "Call 'make clean' or equivalent in ${B}"
190externalsrc_do_buildclean() {
191 if [ -e Makefile -o -e makefile -o -e GNUmakefile ]; then
192 rm -f ${@' '.join([x.split(':')[0] for x in (d.getVar('EXTERNALSRC_SYMLINKS') or '').split()])}
193 if [ "${CLEANBROKEN}" != "1" ]; then
194 oe_runmake clean || die "make failed"
195 fi
196 else
197 bbnote "nothing to do - no makefile found"
198 fi
199}
200
201def srctree_hash_files(d, srcdir=None):
202 import shutil
203 import subprocess
204 import tempfile
205 import hashlib
206
207 s_dir = srcdir or d.getVar('EXTERNALSRC')
208 git_dir = None
209
210 try:
211 git_dir = os.path.join(s_dir,
212 subprocess.check_output(['git', '-C', s_dir, 'rev-parse', '--git-dir'], stderr=subprocess.DEVNULL).decode("utf-8").rstrip())
213 top_git_dir = os.path.join(s_dir, subprocess.check_output(['git', '-C', d.getVar("TOPDIR"), 'rev-parse', '--git-dir'],
214 stderr=subprocess.DEVNULL).decode("utf-8").rstrip())
215 if git_dir == top_git_dir:
216 git_dir = None
217 except subprocess.CalledProcessError:
218 pass
219
220 ret = " "
221 if git_dir is not None:
222 oe_hash_file = os.path.join(git_dir, 'oe-devtool-tree-sha1-%s' % d.getVar('PN'))
223 with tempfile.NamedTemporaryFile(prefix='oe-devtool-index') as tmp_index:
224 # Clone index
225 shutil.copyfile(os.path.join(git_dir, 'index'), tmp_index.name)
226 # Update our custom index
227 env = os.environ.copy()
228 env['GIT_INDEX_FILE'] = tmp_index.name
229 subprocess.check_output(['git', 'add', '-A', '.'], cwd=s_dir, env=env)
230 git_sha1 = subprocess.check_output(['git', 'write-tree'], cwd=s_dir, env=env).decode("utf-8")
231 submodule_helper = subprocess.check_output(['git', 'submodule--helper', 'list'], cwd=s_dir, env=env).decode("utf-8")
232 for line in submodule_helper.splitlines():
233 module_dir = os.path.join(s_dir, line.rsplit(maxsplit=1)[1])
234 if os.path.isdir(module_dir):
235 proc = subprocess.Popen(['git', 'add', '-A', '.'], cwd=module_dir, env=env, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
236 proc.communicate()
237 proc = subprocess.Popen(['git', 'write-tree'], cwd=module_dir, env=env, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
238 stdout, _ = proc.communicate()
239 git_sha1 += stdout.decode("utf-8")
240 sha1 = hashlib.sha1(git_sha1.encode("utf-8")).hexdigest()
241 with open(oe_hash_file, 'w') as fobj:
242 fobj.write(sha1)
243 ret = oe_hash_file + ':True'
244 else:
245 ret = s_dir + '/*:True'
246 return ret
247
248def srctree_configure_hash_files(d):
249 """
250 Get the list of files that should trigger do_configure to re-execute,
251 based on the value of CONFIGURE_FILES
252 """
253 in_files = (d.getVar('CONFIGURE_FILES') or '').split()
254 out_items = []
255 search_files = []
256 for entry in in_files:
257 if entry.startswith('/'):
258 out_items.append('%s:%s' % (entry, os.path.exists(entry)))
259 else:
260 search_files.append(entry)
261 if search_files:
262 s_dir = d.getVar('EXTERNALSRC')
263 for root, _, files in os.walk(s_dir):
264 for f in files:
265 if f in search_files:
266 out_items.append('%s:True' % os.path.join(root, f))
267 return ' '.join(out_items)
268
269EXPORT_FUNCTIONS do_buildclean
diff --git a/meta/classes-recipe/features_check.bbclass b/meta/classes-recipe/features_check.bbclass
new file mode 100644
index 0000000000..163a7bc3fc
--- /dev/null
+++ b/meta/classes-recipe/features_check.bbclass
@@ -0,0 +1,57 @@
1# Allow checking of required and conflicting features
2#
3# xxx = [DISTRO,MACHINE,COMBINED,IMAGE]
4#
5# ANY_OF_xxx_FEATURES: ensure at least one item on this list is included
6# in xxx_FEATURES.
7# REQUIRED_xxx_FEATURES: ensure every item on this list is included
8# in xxx_FEATURES.
9# CONFLICT_xxx_FEATURES: ensure no item in this list is included in
10# xxx_FEATURES.
11#
12# Copyright 2019 (C) Texas Instruments Inc.
13# Copyright 2013 (C) O.S. Systems Software LTDA.
14#
15# SPDX-License-Identifier: MIT
16
17
18python () {
19 if d.getVar('PARSE_ALL_RECIPES', False):
20 return
21
22 unused = True
23
24 for kind in ['DISTRO', 'MACHINE', 'COMBINED', 'IMAGE']:
25 if d.getVar('ANY_OF_' + kind + '_FEATURES') is None and not d.hasOverrides('ANY_OF_' + kind + '_FEATURES') and \
26 d.getVar('REQUIRED_' + kind + '_FEATURES') is None and not d.hasOverrides('REQUIRED_' + kind + '_FEATURES') and \
27 d.getVar('CONFLICT_' + kind + '_FEATURES') is None and not d.hasOverrides('CONFLICT_' + kind + '_FEATURES'):
28 continue
29
30 unused = False
31
32 # Assume at least one var is set.
33 features = set((d.getVar(kind + '_FEATURES') or '').split())
34
35 any_of_features = set((d.getVar('ANY_OF_' + kind + '_FEATURES') or '').split())
36 if any_of_features:
37 if set.isdisjoint(any_of_features, features):
38 raise bb.parse.SkipRecipe("one of '%s' needs to be in %s_FEATURES"
39 % (' '.join(any_of_features), kind))
40
41 required_features = set((d.getVar('REQUIRED_' + kind + '_FEATURES') or '').split())
42 if required_features:
43 missing = set.difference(required_features, features)
44 if missing:
45 raise bb.parse.SkipRecipe("missing required %s feature%s '%s' (not in %s_FEATURES)"
46 % (kind.lower(), 's' if len(missing) > 1 else '', ' '.join(missing), kind))
47
48 conflict_features = set((d.getVar('CONFLICT_' + kind + '_FEATURES') or '').split())
49 if conflict_features:
50 conflicts = set.intersection(conflict_features, features)
51 if conflicts:
52 raise bb.parse.SkipRecipe("conflicting %s feature%s '%s' (in %s_FEATURES)"
53 % (kind.lower(), 's' if len(conflicts) > 1 else '', ' '.join(conflicts), kind))
54
55 if unused:
56 bb.warn("Recipe inherits features_check but doesn't use it")
57}
diff --git a/meta/classes-recipe/fontcache.bbclass b/meta/classes-recipe/fontcache.bbclass
new file mode 100644
index 0000000000..0d496b72dd
--- /dev/null
+++ b/meta/classes-recipe/fontcache.bbclass
@@ -0,0 +1,63 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7#
8# This class will generate the proper postinst/postrm scriptlets for font
9# packages.
10#
11
12PACKAGE_WRITE_DEPS += "qemu-native"
13inherit qemu
14
15FONT_PACKAGES ??= "${PN}"
16FONT_EXTRA_RDEPENDS ?= "${MLPREFIX}fontconfig-utils"
17FONTCONFIG_CACHE_DIR ?= "${localstatedir}/cache/fontconfig"
18FONTCONFIG_CACHE_PARAMS ?= "-v"
19# You can change this to e.g. FC_DEBUG=16 to debug fc-cache issues,
20# something has to be set, because qemuwrapper is using this variable after -E
21# multiple variables aren't allowed because for qemu they are separated
22# by comma and in -n "$D" case they should be separated by space
23FONTCONFIG_CACHE_ENV ?= "FC_DEBUG=1"
24fontcache_common() {
25if [ -n "$D" ] ; then
26 $INTERCEPT_DIR/postinst_intercept update_font_cache ${PKG} mlprefix=${MLPREFIX} binprefix=${MLPREFIX} \
27 'bindir="${bindir}"' \
28 'libdir="${libdir}"' \
29 'libexecdir="${libexecdir}"' \
30 'base_libdir="${base_libdir}"' \
31 'fontconfigcachedir="${FONTCONFIG_CACHE_DIR}"' \
32 'fontconfigcacheparams="${FONTCONFIG_CACHE_PARAMS}"' \
33 'fontconfigcacheenv="${FONTCONFIG_CACHE_ENV}"'
34else
35 ${FONTCONFIG_CACHE_ENV} fc-cache ${FONTCONFIG_CACHE_PARAMS}
36fi
37}
38
39python () {
40 font_pkgs = d.getVar('FONT_PACKAGES').split()
41 deps = d.getVar("FONT_EXTRA_RDEPENDS")
42
43 for pkg in font_pkgs:
44 if deps: d.appendVar('RDEPENDS:' + pkg, ' '+deps)
45}
46
47python add_fontcache_postinsts() {
48 for pkg in d.getVar('FONT_PACKAGES').split():
49 bb.note("adding fonts postinst and postrm scripts to %s" % pkg)
50 postinst = d.getVar('pkg_postinst:%s' % pkg) or d.getVar('pkg_postinst')
51 if not postinst:
52 postinst = '#!/bin/sh\n'
53 postinst += d.getVar('fontcache_common')
54 d.setVar('pkg_postinst:%s' % pkg, postinst)
55
56 postrm = d.getVar('pkg_postrm:%s' % pkg) or d.getVar('pkg_postrm')
57 if not postrm:
58 postrm = '#!/bin/sh\n'
59 postrm += d.getVar('fontcache_common')
60 d.setVar('pkg_postrm:%s' % pkg, postrm)
61}
62
63PACKAGEFUNCS =+ "add_fontcache_postinsts"
diff --git a/meta/classes-recipe/fs-uuid.bbclass b/meta/classes-recipe/fs-uuid.bbclass
new file mode 100644
index 0000000000..a9e7eb8c67
--- /dev/null
+++ b/meta/classes-recipe/fs-uuid.bbclass
@@ -0,0 +1,30 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# Extract UUID from ${ROOTFS}, which must have been built
8# by the time that this function gets called. Only works
9# on ext file systems and depends on tune2fs.
10def get_rootfs_uuid(d):
11 import subprocess
12 rootfs = d.getVar('ROOTFS')
13 output = subprocess.check_output(['tune2fs', '-l', rootfs])
14 for line in output.split('\n'):
15 if line.startswith('Filesystem UUID:'):
16 uuid = line.split()[-1]
17 bb.note('UUID of %s: %s' % (rootfs, uuid))
18 return uuid
19 bb.fatal('Could not determine filesystem UUID of %s' % rootfs)
20
21# Replace the special <<uuid-of-rootfs>> inside a string (like the
22# root= APPEND string in a syslinux.cfg or systemd-boot entry) with the
23# actual UUID of the rootfs. Does nothing if the special string
24# is not used.
25def replace_rootfs_uuid(d, string):
26 UUID_PLACEHOLDER = '<<uuid-of-rootfs>>'
27 if UUID_PLACEHOLDER in string:
28 uuid = get_rootfs_uuid(d)
29 string = string.replace(UUID_PLACEHOLDER, uuid)
30 return string
diff --git a/meta/classes-recipe/gconf.bbclass b/meta/classes-recipe/gconf.bbclass
new file mode 100644
index 0000000000..b81851bc78
--- /dev/null
+++ b/meta/classes-recipe/gconf.bbclass
@@ -0,0 +1,77 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7DEPENDS += "gconf"
8PACKAGE_WRITE_DEPS += "gconf-native"
9
10# These are for when gconftool is used natively and the prefix isn't necessarily
11# the sysroot. TODO: replicate the postinst logic for -native packages going
12# into sysroot as they won't be running their own install-time schema
13# registration (disabled below) nor the postinst script (as they don't happen).
14export GCONF_SCHEMA_INSTALL_SOURCE = "xml:merged:${STAGING_DIR_NATIVE}${sysconfdir}/gconf/gconf.xml.defaults"
15export GCONF_BACKEND_DIR = "${STAGING_LIBDIR_NATIVE}/GConf/2"
16
17# Disable install-time schema registration as we're a packaging system so this
18# happens in the postinst script, not at install time. Set both the configure
19# script option and the traditional envionment variable just to make sure.
20EXTRA_OECONF += "--disable-schemas-install"
21export GCONF_DISABLE_MAKEFILE_SCHEMA_INSTALL = "1"
22
23gconf_postinst() {
24if [ "x$D" != "x" ]; then
25 export GCONF_CONFIG_SOURCE="xml::$D${sysconfdir}/gconf/gconf.xml.defaults"
26else
27 export GCONF_CONFIG_SOURCE=`gconftool-2 --get-default-source`
28fi
29
30SCHEMA_LOCATION=$D/etc/gconf/schemas
31for SCHEMA in ${SCHEMA_FILES}; do
32 if [ -e $SCHEMA_LOCATION/$SCHEMA ]; then
33 HOME=$D/root gconftool-2 \
34 --makefile-install-rule $SCHEMA_LOCATION/$SCHEMA > /dev/null
35 fi
36done
37}
38
39gconf_prerm() {
40SCHEMA_LOCATION=/etc/gconf/schemas
41for SCHEMA in ${SCHEMA_FILES}; do
42 if [ -e $SCHEMA_LOCATION/$SCHEMA ]; then
43 HOME=/root GCONF_CONFIG_SOURCE=`gconftool-2 --get-default-source` \
44 gconftool-2 \
45 --makefile-uninstall-rule $SCHEMA_LOCATION/$SCHEMA > /dev/null
46 fi
47done
48}
49
50python populate_packages:append () {
51 import re
52 packages = d.getVar('PACKAGES').split()
53 pkgdest = d.getVar('PKGDEST')
54
55 for pkg in packages:
56 schema_dir = '%s/%s/etc/gconf/schemas' % (pkgdest, pkg)
57 schemas = []
58 schema_re = re.compile(r".*\.schemas$")
59 if os.path.exists(schema_dir):
60 for f in os.listdir(schema_dir):
61 if schema_re.match(f):
62 schemas.append(f)
63 if schemas != []:
64 bb.note("adding gconf postinst and prerm scripts to %s" % pkg)
65 d.setVar('SCHEMA_FILES', " ".join(schemas))
66 postinst = d.getVar('pkg_postinst:%s' % pkg)
67 if not postinst:
68 postinst = '#!/bin/sh\n'
69 postinst += d.getVar('gconf_postinst')
70 d.setVar('pkg_postinst:%s' % pkg, postinst)
71 prerm = d.getVar('pkg_prerm:%s' % pkg)
72 if not prerm:
73 prerm = '#!/bin/sh\n'
74 prerm += d.getVar('gconf_prerm')
75 d.setVar('pkg_prerm:%s' % pkg, prerm)
76 d.appendVar("RDEPENDS:%s" % pkg, ' ' + d.getVar('MLPREFIX', False) + 'gconf')
77}
diff --git a/meta/classes-recipe/gettext.bbclass b/meta/classes-recipe/gettext.bbclass
new file mode 100644
index 0000000000..c313885d52
--- /dev/null
+++ b/meta/classes-recipe/gettext.bbclass
@@ -0,0 +1,28 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7def gettext_dependencies(d):
8 if d.getVar('INHIBIT_DEFAULT_DEPS') and not oe.utils.inherits(d, 'cross-canadian'):
9 return ""
10 if d.getVar('USE_NLS') == 'no':
11 return "gettext-minimal-native"
12 return "gettext-native"
13
14def gettext_oeconf(d):
15 if d.getVar('USE_NLS') == 'no':
16 return '--disable-nls'
17 # Remove the NLS bits if USE_NLS is no or INHIBIT_DEFAULT_DEPS is set
18 if d.getVar('INHIBIT_DEFAULT_DEPS') and not oe.utils.inherits(d, 'cross-canadian'):
19 return '--disable-nls'
20 return "--enable-nls"
21
22BASEDEPENDS:append = " ${@gettext_dependencies(d)}"
23EXTRA_OECONF:append = " ${@gettext_oeconf(d)}"
24
25# Without this, msgfmt from gettext-native will not find ITS files
26# provided by target recipes (for example, polkit.its).
27GETTEXTDATADIRS:append:class-target = ":${STAGING_DATADIR}/gettext"
28export GETTEXTDATADIRS
diff --git a/meta/classes-recipe/gi-docgen.bbclass b/meta/classes-recipe/gi-docgen.bbclass
new file mode 100644
index 0000000000..8b7eaacea3
--- /dev/null
+++ b/meta/classes-recipe/gi-docgen.bbclass
@@ -0,0 +1,30 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# gi-docgen is a new gnome documentation generator, which
8# seems to be a successor to gtk-doc:
9# https://gitlab.gnome.org/GNOME/gi-docgen
10
11# This variable is set to True if api-documentation is in
12# DISTRO_FEATURES, and False otherwise.
13GIDOCGEN_ENABLED ?= "${@bb.utils.contains('DISTRO_FEATURES', 'api-documentation', 'True', 'False', d)}"
14# When building native recipes, disable gi-docgen, as it is not necessary,
15# pulls in additional dependencies, and makes build times longer
16GIDOCGEN_ENABLED:class-native = "False"
17GIDOCGEN_ENABLED:class-nativesdk = "False"
18
19# meson: default option name to enable/disable gi-docgen. This matches most
20# projects' configuration. In doubts - check meson_options.txt in project's
21# source path.
22GIDOCGEN_MESON_OPTION ?= 'gtk_doc'
23GIDOCGEN_MESON_ENABLE_FLAG ?= 'true'
24GIDOCGEN_MESON_DISABLE_FLAG ?= 'false'
25
26# Auto enable/disable based on GIDOCGEN_ENABLED
27EXTRA_OEMESON:prepend = "-D${GIDOCGEN_MESON_OPTION}=${@bb.utils.contains('GIDOCGEN_ENABLED', 'True', '${GIDOCGEN_MESON_ENABLE_FLAG}', '${GIDOCGEN_MESON_DISABLE_FLAG}', d)} "
28
29DEPENDS:append = "${@' gi-docgen-native gi-docgen' if d.getVar('GIDOCGEN_ENABLED') == 'True' else ''}"
30
diff --git a/meta/classes-recipe/gio-module-cache.bbclass b/meta/classes-recipe/gio-module-cache.bbclass
new file mode 100644
index 0000000000..d12e03c4a0
--- /dev/null
+++ b/meta/classes-recipe/gio-module-cache.bbclass
@@ -0,0 +1,44 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7PACKAGE_WRITE_DEPS += "qemu-native"
8inherit qemu
9
10GIO_MODULE_PACKAGES ??= "${PN}"
11
12gio_module_cache_common() {
13if [ "x$D" != "x" ]; then
14 $INTERCEPT_DIR/postinst_intercept update_gio_module_cache ${PKG} \
15 mlprefix=${MLPREFIX} \
16 binprefix=${MLPREFIX} \
17 libdir=${libdir} \
18 libexecdir=${libexecdir} \
19 base_libdir=${base_libdir} \
20 bindir=${bindir}
21else
22 ${libexecdir}/${MLPREFIX}gio-querymodules ${libdir}/gio/modules/
23fi
24}
25
26python populate_packages:append () {
27 packages = d.getVar('GIO_MODULE_PACKAGES').split()
28
29 for pkg in packages:
30 bb.note("adding gio-module-cache postinst and postrm scripts to %s" % pkg)
31
32 postinst = d.getVar('pkg_postinst:%s' % pkg)
33 if not postinst:
34 postinst = '#!/bin/sh\n'
35 postinst += d.getVar('gio_module_cache_common')
36 d.setVar('pkg_postinst:%s' % pkg, postinst)
37
38 postrm = d.getVar('pkg_postrm:%s' % pkg)
39 if not postrm:
40 postrm = '#!/bin/sh\n'
41 postrm += d.getVar('gio_module_cache_common')
42 d.setVar('pkg_postrm:%s' % pkg, postrm)
43}
44
diff --git a/meta/classes-recipe/glide.bbclass b/meta/classes-recipe/glide.bbclass
new file mode 100644
index 0000000000..21b48fa4e0
--- /dev/null
+++ b/meta/classes-recipe/glide.bbclass
@@ -0,0 +1,15 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# Handle Glide Vendor Package Management use
8#
9# Copyright 2018 (C) O.S. Systems Software LTDA.
10
11DEPENDS:append = " glide-native"
12
13do_compile:prepend() {
14 ( cd ${B}/src/${GO_IMPORT} && glide install )
15}
diff --git a/meta/classes-recipe/gnomebase.bbclass b/meta/classes-recipe/gnomebase.bbclass
new file mode 100644
index 0000000000..805daafa40
--- /dev/null
+++ b/meta/classes-recipe/gnomebase.bbclass
@@ -0,0 +1,37 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7def gnome_verdir(v):
8 return ".".join(v.split(".")[:-1])
9
10
11GNOME_COMPRESS_TYPE ?= "xz"
12SECTION ?= "x11/gnome"
13GNOMEBN ?= "${BPN}"
14SRC_URI = "${GNOME_MIRROR}/${GNOMEBN}/${@gnome_verdir("${PV}")}/${GNOMEBN}-${PV}.tar.${GNOME_COMPRESS_TYPE};name=archive"
15
16FILES:${PN} += "${datadir}/application-registry \
17 ${datadir}/mime-info \
18 ${datadir}/mime/packages \
19 ${datadir}/mime/application \
20 ${datadir}/gnome-2.0 \
21 ${datadir}/polkit* \
22 ${datadir}/GConf \
23 ${datadir}/glib-2.0/schemas \
24 ${datadir}/appdata \
25 ${datadir}/icons \
26"
27
28FILES:${PN}-doc += "${datadir}/devhelp"
29
30GNOMEBASEBUILDCLASS ??= "autotools"
31inherit ${GNOMEBASEBUILDCLASS} pkgconfig
32
33do_install:append() {
34 rm -rf ${D}${localstatedir}/lib/scrollkeeper/*
35 rm -rf ${D}${localstatedir}/scrollkeeper/*
36 rm -f ${D}${datadir}/applications/*.cache
37}
diff --git a/meta/classes-recipe/go-mod.bbclass b/meta/classes-recipe/go-mod.bbclass
new file mode 100644
index 0000000000..927746a338
--- /dev/null
+++ b/meta/classes-recipe/go-mod.bbclass
@@ -0,0 +1,26 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# Handle Go Modules support
8#
9# When using Go Modules, the the current working directory MUST be at or below
10# the location of the 'go.mod' file when the go tool is used, and there is no
11# way to tell it to look elsewhere. It will automatically look upwards for the
12# file, but not downwards.
13#
14# To support this use case, we provide the `GO_WORKDIR` variable, which defaults
15# to `GO_IMPORT` but allows for easy override.
16#
17# Copyright 2020 (C) O.S. Systems Software LTDA.
18
19# The '-modcacherw' option ensures we have write access to the cached objects so
20# we avoid errors during clean task as well as when removing the TMPDIR.
21GOBUILDFLAGS:append = " -modcacherw"
22
23inherit go
24
25GO_WORKDIR ?= "${GO_IMPORT}"
26do_compile[dirs] += "${B}/src/${GO_WORKDIR}"
diff --git a/meta/classes-recipe/go-ptest.bbclass b/meta/classes-recipe/go-ptest.bbclass
new file mode 100644
index 0000000000..54fcbb535d
--- /dev/null
+++ b/meta/classes-recipe/go-ptest.bbclass
@@ -0,0 +1,60 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7inherit go ptest
8
9do_compile_ptest_base() {
10 export TMPDIR="${GOTMPDIR}"
11 rm -f ${B}/.go_compiled_tests.list
12 go_list_package_tests | while read pkg; do
13 cd ${B}/src/$pkg
14 ${GO} test ${GOPTESTBUILDFLAGS} $pkg
15 find . -mindepth 1 -maxdepth 1 -type f -name '*.test' -exec echo $pkg/{} \; | \
16 sed -e's,/\./,/,'>> ${B}/.go_compiled_tests.list
17 done
18 do_compile_ptest
19}
20
21do_compile_ptest_base[dirs] =+ "${GOTMPDIR}"
22
23go_make_ptest_wrapper() {
24 cat >${D}${PTEST_PATH}/run-ptest <<EOF
25#!/bin/sh
26RC=0
27run_test() (
28 cd "\$1"
29 ((((./\$2 ${GOPTESTFLAGS}; echo \$? >&3) | sed -r -e"s,^(PASS|SKIP|FAIL)\$,\\1: \$1/\$2," >&4) 3>&1) | (read rc; exit \$rc)) 4>&1
30 exit \$?)
31EOF
32
33}
34
35do_install_ptest_base() {
36 test -f "${B}/.go_compiled_tests.list" || exit 0
37 install -d ${D}${PTEST_PATH}
38 go_stage_testdata
39 go_make_ptest_wrapper
40 havetests=""
41 while read test; do
42 testdir=`dirname $test`
43 testprog=`basename $test`
44 install -d ${D}${PTEST_PATH}/$testdir
45 install -m 0755 ${B}/src/$test ${D}${PTEST_PATH}/$test
46 echo "run_test $testdir $testprog || RC=1" >> ${D}${PTEST_PATH}/run-ptest
47 havetests="yes"
48 done < ${B}/.go_compiled_tests.list
49 if [ -n "$havetests" ]; then
50 echo "exit \$RC" >> ${D}${PTEST_PATH}/run-ptest
51 chmod +x ${D}${PTEST_PATH}/run-ptest
52 else
53 rm -rf ${D}${PTEST_PATH}
54 fi
55 do_install_ptest
56 chown -R root:root ${D}${PTEST_PATH}
57}
58
59INSANE_SKIP:${PN}-ptest += "ldflags"
60
diff --git a/meta/classes-recipe/go.bbclass b/meta/classes-recipe/go.bbclass
new file mode 100644
index 0000000000..6b9748406d
--- /dev/null
+++ b/meta/classes-recipe/go.bbclass
@@ -0,0 +1,170 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7inherit goarch
8inherit linuxloader
9
10GO_PARALLEL_BUILD ?= "${@oe.utils.parallel_make_argument(d, '-p %d')}"
11
12export GODEBUG = "gocachehash=1"
13
14GOROOT:class-native = "${STAGING_LIBDIR_NATIVE}/go"
15GOROOT:class-nativesdk = "${STAGING_DIR_TARGET}${libdir}/go"
16GOROOT = "${STAGING_LIBDIR}/go"
17export GOROOT
18export GOROOT_FINAL = "${libdir}/go"
19export GOCACHE = "${B}/.cache"
20
21export GOARCH = "${TARGET_GOARCH}"
22export GOOS = "${TARGET_GOOS}"
23export GOHOSTARCH="${BUILD_GOARCH}"
24export GOHOSTOS="${BUILD_GOOS}"
25
26GOARM[export] = "0"
27GOARM:arm:class-target = "${TARGET_GOARM}"
28GOARM:arm:class-target[export] = "1"
29
30GO386[export] = "0"
31GO386:x86:class-target = "${TARGET_GO386}"
32GO386:x86:class-target[export] = "1"
33
34GOMIPS[export] = "0"
35GOMIPS:mips:class-target = "${TARGET_GOMIPS}"
36GOMIPS:mips:class-target[export] = "1"
37
38DEPENDS_GOLANG:class-target = "virtual/${TUNE_PKGARCH}-go virtual/${TARGET_PREFIX}go-runtime"
39DEPENDS_GOLANG:class-native = "go-native"
40DEPENDS_GOLANG:class-nativesdk = "virtual/${TARGET_PREFIX}go-crosssdk virtual/${TARGET_PREFIX}go-runtime"
41
42DEPENDS:append = " ${DEPENDS_GOLANG}"
43
44GO_LINKSHARED ?= "${@'-linkshared' if d.getVar('GO_DYNLINK') else ''}"
45GO_RPATH_LINK = "${@'-Wl,-rpath-link=${STAGING_DIR_TARGET}${libdir}/go/pkg/${TARGET_GOTUPLE}_dynlink' if d.getVar('GO_DYNLINK') else ''}"
46GO_RPATH = "${@'-r ${libdir}/go/pkg/${TARGET_GOTUPLE}_dynlink' if d.getVar('GO_DYNLINK') else ''}"
47GO_RPATH:class-native = "${@'-r ${STAGING_LIBDIR_NATIVE}/go/pkg/${TARGET_GOTUPLE}_dynlink' if d.getVar('GO_DYNLINK') else ''}"
48GO_RPATH_LINK:class-native = "${@'-Wl,-rpath-link=${STAGING_LIBDIR_NATIVE}/go/pkg/${TARGET_GOTUPLE}_dynlink' if d.getVar('GO_DYNLINK') else ''}"
49GO_EXTLDFLAGS ?= "${HOST_CC_ARCH}${TOOLCHAIN_OPTIONS} ${GO_RPATH_LINK} ${LDFLAGS}"
50GO_LINKMODE ?= ""
51GO_LINKMODE:class-nativesdk = "--linkmode=external"
52GO_LINKMODE:class-native = "--linkmode=external"
53GO_EXTRA_LDFLAGS ?= ""
54GO_LINUXLOADER ?= "-I ${@get_linuxloader(d)}"
55# Use system loader. If uninative is used, the uninative loader will be patched automatically
56GO_LINUXLOADER:class-native = ""
57GO_LDFLAGS ?= '-ldflags="${GO_RPATH} ${GO_LINKMODE} ${GO_LINUXLOADER} ${GO_EXTRA_LDFLAGS} -extldflags '${GO_EXTLDFLAGS}'"'
58export GOBUILDFLAGS ?= "-v ${GO_LDFLAGS} -trimpath"
59export GOPATH_OMIT_IN_ACTIONID ?= "1"
60export GOPTESTBUILDFLAGS ?= "${GOBUILDFLAGS} -c"
61export GOPTESTFLAGS ?= ""
62GOBUILDFLAGS:prepend:task-compile = "${GO_PARALLEL_BUILD} "
63
64export GO = "${HOST_PREFIX}go"
65GOTOOLDIR = "${STAGING_LIBDIR_NATIVE}/${TARGET_SYS}/go/pkg/tool/${BUILD_GOTUPLE}"
66GOTOOLDIR:class-native = "${STAGING_LIBDIR_NATIVE}/go/pkg/tool/${BUILD_GOTUPLE}"
67export GOTOOLDIR
68
69export CGO_ENABLED ?= "1"
70export CGO_CFLAGS ?= "${CFLAGS}"
71export CGO_CPPFLAGS ?= "${CPPFLAGS}"
72export CGO_CXXFLAGS ?= "${CXXFLAGS}"
73export CGO_LDFLAGS ?= "${LDFLAGS}"
74
75GO_INSTALL ?= "${GO_IMPORT}/..."
76GO_INSTALL_FILTEROUT ?= "${GO_IMPORT}/vendor/"
77
78B = "${WORKDIR}/build"
79export GOPATH = "${B}"
80export GOENV = "off"
81export GOTMPDIR ?= "${WORKDIR}/build-tmp"
82GOTMPDIR[vardepvalue] = ""
83
84python go_do_unpack() {
85 src_uri = (d.getVar('SRC_URI') or "").split()
86 if len(src_uri) == 0:
87 return
88
89 fetcher = bb.fetch2.Fetch(src_uri, d)
90 for url in fetcher.urls:
91 if fetcher.ud[url].type == 'git':
92 if fetcher.ud[url].parm.get('destsuffix') is None:
93 s_dirname = os.path.basename(d.getVar('S'))
94 fetcher.ud[url].parm['destsuffix'] = os.path.join(s_dirname, 'src', d.getVar('GO_IMPORT')) + '/'
95 fetcher.unpack(d.getVar('WORKDIR'))
96}
97
98go_list_packages() {
99 ${GO} list -f '{{.ImportPath}}' ${GOBUILDFLAGS} ${GO_INSTALL} | \
100 egrep -v '${GO_INSTALL_FILTEROUT}'
101}
102
103go_list_package_tests() {
104 ${GO} list -f '{{.ImportPath}} {{.TestGoFiles}}' ${GOBUILDFLAGS} ${GO_INSTALL} | \
105 grep -v '\[\]$' | \
106 egrep -v '${GO_INSTALL_FILTEROUT}' | \
107 awk '{ print $1 }'
108}
109
110go_do_configure() {
111 ln -snf ${S}/src ${B}/
112}
113do_configure[dirs] =+ "${GOTMPDIR}"
114
115go_do_compile() {
116 export TMPDIR="${GOTMPDIR}"
117 if [ -n "${GO_INSTALL}" ]; then
118 if [ -n "${GO_LINKSHARED}" ]; then
119 ${GO} install ${GOBUILDFLAGS} `go_list_packages`
120 rm -rf ${B}/bin
121 fi
122 ${GO} install ${GO_LINKSHARED} ${GOBUILDFLAGS} `go_list_packages`
123 fi
124}
125do_compile[dirs] =+ "${GOTMPDIR}"
126do_compile[cleandirs] = "${B}/bin ${B}/pkg"
127
128go_do_install() {
129 install -d ${D}${libdir}/go/src/${GO_IMPORT}
130 tar -C ${S}/src/${GO_IMPORT} -cf - --exclude-vcs --exclude '*.test' --exclude 'testdata' . | \
131 tar -C ${D}${libdir}/go/src/${GO_IMPORT} --no-same-owner -xf -
132 tar -C ${B} -cf - --exclude-vcs --exclude '*.test' --exclude 'testdata' pkg | \
133 tar -C ${D}${libdir}/go --no-same-owner -xf -
134
135 if [ -n "`ls ${B}/${GO_BUILD_BINDIR}/`" ]; then
136 install -d ${D}${bindir}
137 install -m 0755 ${B}/${GO_BUILD_BINDIR}/* ${D}${bindir}/
138 fi
139}
140
141go_stage_testdata() {
142 oldwd="$PWD"
143 cd ${S}/src
144 find ${GO_IMPORT} -depth -type d -name testdata | while read d; do
145 if echo "$d" | grep -q '/vendor/'; then
146 continue
147 fi
148 parent=`dirname $d`
149 install -d ${D}${PTEST_PATH}/$parent
150 cp --preserve=mode,timestamps -R $d ${D}${PTEST_PATH}/$parent/
151 done
152 cd "$oldwd"
153}
154
155EXPORT_FUNCTIONS do_unpack do_configure do_compile do_install
156
157FILES:${PN}-dev = "${libdir}/go/src"
158FILES:${PN}-staticdev = "${libdir}/go/pkg"
159
160INSANE_SKIP:${PN} += "ldflags"
161
162# Add -buildmode=pie to GOBUILDFLAGS to satisfy "textrel" QA checking, but mips
163# doesn't support -buildmode=pie, so skip the QA checking for mips/rv32 and its
164# variants.
165python() {
166 if 'mips' in d.getVar('TARGET_ARCH') or 'riscv32' in d.getVar('TARGET_ARCH'):
167 d.appendVar('INSANE_SKIP:%s' % d.getVar('PN'), " textrel")
168 else:
169 d.appendVar('GOBUILDFLAGS', ' -buildmode=pie')
170}
diff --git a/meta/classes-recipe/goarch.bbclass b/meta/classes-recipe/goarch.bbclass
new file mode 100644
index 0000000000..61ead30a63
--- /dev/null
+++ b/meta/classes-recipe/goarch.bbclass
@@ -0,0 +1,122 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7BUILD_GOOS = "${@go_map_os(d.getVar('BUILD_OS'), d)}"
8BUILD_GOARCH = "${@go_map_arch(d.getVar('BUILD_ARCH'), d)}"
9BUILD_GOTUPLE = "${BUILD_GOOS}_${BUILD_GOARCH}"
10HOST_GOOS = "${@go_map_os(d.getVar('HOST_OS'), d)}"
11HOST_GOARCH = "${@go_map_arch(d.getVar('HOST_ARCH'), d)}"
12HOST_GOARM = "${@go_map_arm(d.getVar('HOST_ARCH'), d)}"
13HOST_GO386 = "${@go_map_386(d.getVar('HOST_ARCH'), d.getVar('TUNE_FEATURES'), d)}"
14HOST_GOMIPS = "${@go_map_mips(d.getVar('HOST_ARCH'), d.getVar('TUNE_FEATURES'), d)}"
15HOST_GOARM:class-native = "7"
16HOST_GO386:class-native = "sse2"
17HOST_GOMIPS:class-native = "hardfloat"
18HOST_GOTUPLE = "${HOST_GOOS}_${HOST_GOARCH}"
19TARGET_GOOS = "${@go_map_os(d.getVar('TARGET_OS'), d)}"
20TARGET_GOARCH = "${@go_map_arch(d.getVar('TARGET_ARCH'), d)}"
21TARGET_GOARM = "${@go_map_arm(d.getVar('TARGET_ARCH'), d)}"
22TARGET_GO386 = "${@go_map_386(d.getVar('TARGET_ARCH'), d.getVar('TUNE_FEATURES'), d)}"
23TARGET_GOMIPS = "${@go_map_mips(d.getVar('TARGET_ARCH'), d.getVar('TUNE_FEATURES'), d)}"
24TARGET_GOARM:class-native = "7"
25TARGET_GO386:class-native = "sse2"
26TARGET_GOMIPS:class-native = "hardfloat"
27TARGET_GOTUPLE = "${TARGET_GOOS}_${TARGET_GOARCH}"
28GO_BUILD_BINDIR = "${@['bin/${HOST_GOTUPLE}','bin'][d.getVar('BUILD_GOTUPLE') == d.getVar('HOST_GOTUPLE')]}"
29
30# Use the MACHINEOVERRIDES to map ARM CPU architecture passed to GO via GOARM.
31# This is combined with *_ARCH to set HOST_GOARM and TARGET_GOARM.
32BASE_GOARM = ''
33BASE_GOARM:armv7ve = '7'
34BASE_GOARM:armv7a = '7'
35BASE_GOARM:armv6 = '6'
36BASE_GOARM:armv5 = '5'
37
38# Go supports dynamic linking on a limited set of architectures.
39# See the supportsDynlink function in go/src/cmd/compile/internal/gc/main.go
40GO_DYNLINK = ""
41GO_DYNLINK:arm ?= "1"
42GO_DYNLINK:aarch64 ?= "1"
43GO_DYNLINK:x86 ?= "1"
44GO_DYNLINK:x86-64 ?= "1"
45GO_DYNLINK:powerpc64 ?= "1"
46GO_DYNLINK:powerpc64le ?= "1"
47GO_DYNLINK:class-native ?= ""
48GO_DYNLINK:class-nativesdk = ""
49
50# define here because everybody inherits this class
51#
52COMPATIBLE_HOST:linux-gnux32 = "null"
53COMPATIBLE_HOST:linux-muslx32 = "null"
54COMPATIBLE_HOST:powerpc = "null"
55COMPATIBLE_HOST:powerpc64 = "null"
56COMPATIBLE_HOST:mipsarchn32 = "null"
57
58ARM_INSTRUCTION_SET:armv4 = "arm"
59ARM_INSTRUCTION_SET:armv5 = "arm"
60ARM_INSTRUCTION_SET:armv6 = "arm"
61
62TUNE_CCARGS:remove = "-march=mips32r2"
63SECURITY_NOPIE_CFLAGS ??= ""
64
65# go can't be built with ccache:
66# gcc: fatal error: no input files
67CCACHE_DISABLE ?= "1"
68
69def go_map_arch(a, d):
70 import re
71 if re.match('i.86', a):
72 return '386'
73 elif a == 'x86_64':
74 return 'amd64'
75 elif re.match('arm.*', a):
76 return 'arm'
77 elif re.match('aarch64.*', a):
78 return 'arm64'
79 elif re.match('mips64el.*', a):
80 return 'mips64le'
81 elif re.match('mips64.*', a):
82 return 'mips64'
83 elif a == 'mips':
84 return 'mips'
85 elif a == 'mipsel':
86 return 'mipsle'
87 elif re.match('p(pc|owerpc)(64le)', a):
88 return 'ppc64le'
89 elif re.match('p(pc|owerpc)(64)', a):
90 return 'ppc64'
91 elif a == 'riscv64':
92 return 'riscv64'
93 else:
94 raise bb.parse.SkipRecipe("Unsupported CPU architecture: %s" % a)
95
96def go_map_arm(a, d):
97 if a.startswith("arm"):
98 return d.getVar('BASE_GOARM')
99 return ''
100
101def go_map_386(a, f, d):
102 import re
103 if re.match('i.86', a):
104 if ('core2' in f) or ('corei7' in f):
105 return 'sse2'
106 else:
107 return 'softfloat'
108 return ''
109
110def go_map_mips(a, f, d):
111 import re
112 if a == 'mips' or a == 'mipsel':
113 if 'fpu-hard' in f:
114 return 'hardfloat'
115 else:
116 return 'softfloat'
117 return ''
118
119def go_map_os(o, d):
120 if o.startswith('linux'):
121 return 'linux'
122 return o
diff --git a/meta/classes-recipe/gobject-introspection-data.bbclass b/meta/classes-recipe/gobject-introspection-data.bbclass
new file mode 100644
index 0000000000..7f522a1ed3
--- /dev/null
+++ b/meta/classes-recipe/gobject-introspection-data.bbclass
@@ -0,0 +1,18 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# This variable is set to True if gobject-introspection-data is in
8# DISTRO_FEATURES and qemu-usermode is in MACHINE_FEATURES, and False otherwise.
9#
10# It should be used in recipes to determine whether introspection data should be built,
11# so that qemu use can be avoided when necessary.
12GI_DATA_ENABLED ?= "${@bb.utils.contains('DISTRO_FEATURES', 'gobject-introspection-data', \
13 bb.utils.contains('MACHINE_FEATURES', 'qemu-usermode', 'True', 'False', d), 'False', d)}"
14
15do_compile:prepend() {
16 # This prevents g-ir-scanner from writing cache data to $HOME
17 export GI_SCANNER_DISABLE_CACHE=1
18}
diff --git a/meta/classes-recipe/gobject-introspection.bbclass b/meta/classes-recipe/gobject-introspection.bbclass
new file mode 100644
index 0000000000..0c7b7d200a
--- /dev/null
+++ b/meta/classes-recipe/gobject-introspection.bbclass
@@ -0,0 +1,61 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# Inherit this class in recipes to enable building their introspection files
8
9# python3native is inherited to prevent introspection tools being run with
10# host's python 3 (they need to be run with native python 3)
11#
12# This also sets up autoconf-based recipes to build introspection data (or not),
13# depending on distro and machine features (see gobject-introspection-data class).
14inherit python3native gobject-introspection-data
15
16# meson: default option name to enable/disable introspection. This matches most
17# project's configuration. In doubts - check meson_options.txt in project's
18# source path.
19GIR_MESON_OPTION ?= 'introspection'
20GIR_MESON_ENABLE_FLAG ?= 'true'
21GIR_MESON_DISABLE_FLAG ?= 'false'
22
23# Define g-i options such that they can be disabled completely when GIR_MESON_OPTION is empty
24GIRMESONTARGET = "-D${GIR_MESON_OPTION}=${@bb.utils.contains('GI_DATA_ENABLED', 'True', '${GIR_MESON_ENABLE_FLAG}', '${GIR_MESON_DISABLE_FLAG}', d)} "
25GIRMESONBUILD = "-D${GIR_MESON_OPTION}=${GIR_MESON_DISABLE_FLAG} "
26# Auto enable/disable based on GI_DATA_ENABLED
27EXTRA_OECONF:prepend:class-target = "${@bb.utils.contains('GI_DATA_ENABLED', 'True', '--enable-introspection', '--disable-introspection', d)} "
28EXTRA_OEMESON:prepend:class-target = "${@['', '${GIRMESONTARGET}'][d.getVar('GIR_MESON_OPTION') != '']}"
29# When building native recipes, disable introspection, as it is not necessary,
30# pulls in additional dependencies, and makes build times longer
31EXTRA_OECONF:prepend:class-native = "--disable-introspection "
32EXTRA_OECONF:prepend:class-nativesdk = "--disable-introspection "
33EXTRA_OEMESON:prepend:class-native = "${@['', '${GIRMESONBUILD}'][d.getVar('GIR_MESON_OPTION') != '']}"
34EXTRA_OEMESON:prepend:class-nativesdk = "${@['', '${GIRMESONBUILD}'][d.getVar('GIR_MESON_OPTION') != '']}"
35
36# Generating introspection data depends on a combination of native and target
37# introspection tools, and qemu to run the target tools.
38DEPENDS:append:class-target = " gobject-introspection gobject-introspection-native qemu-native"
39
40# Even though introspection is disabled on -native, gobject-introspection package is still
41# needed for m4 macros.
42DEPENDS:append:class-native = " gobject-introspection-native"
43DEPENDS:append:class-nativesdk = " gobject-introspection-native"
44
45# This is used by introspection tools to find .gir includes
46export XDG_DATA_DIRS = "${STAGING_DATADIR}:${STAGING_LIBDIR}"
47
48do_configure:prepend:class-target () {
49 # introspection.m4 pre-packaged with upstream tarballs does not yet
50 # have our fixes
51 mkdir -p ${S}/m4
52 cp ${STAGING_DIR_TARGET}/${datadir}/aclocal/introspection.m4 ${S}/m4
53}
54
55# .typelib files are needed at runtime and so they go to the main package (so
56# they'll be together with libraries they support).
57FILES:${PN}:append = " ${libdir}/girepository-*/*.typelib"
58
59# .gir files go to dev package, as they're needed for developing (but not for
60# running) things that depends on introspection.
61FILES:${PN}-dev:append = " ${datadir}/gir-*/*.gir ${libdir}/gir-*/*.gir"
diff --git a/meta/classes-recipe/grub-efi-cfg.bbclass b/meta/classes-recipe/grub-efi-cfg.bbclass
new file mode 100644
index 0000000000..52e85a3bb0
--- /dev/null
+++ b/meta/classes-recipe/grub-efi-cfg.bbclass
@@ -0,0 +1,122 @@
1# grub-efi.bbclass
2# Copyright (c) 2011, Intel Corporation.
3#
4# SPDX-License-Identifier: MIT
5
6# Provide grub-efi specific functions for building bootable images.
7
8# External variables
9# ${INITRD} - indicates a list of filesystem images to concatenate and use as an initrd (optional)
10# ${ROOTFS} - indicates a filesystem image to include as the root filesystem (optional)
11# ${GRUB_GFXSERIAL} - set this to 1 to have graphics and serial in the boot menu
12# ${LABELS} - a list of targets for the automatic config
13# ${APPEND} - an override list of append strings for each label
14# ${GRUB_OPTS} - additional options to add to the config, ';' delimited # (optional)
15# ${GRUB_TIMEOUT} - timeout before executing the deault label (optional)
16# ${GRUB_ROOT} - grub's root device.
17
18GRUB_SERIAL ?= "console=ttyS0,115200"
19GRUB_CFG_VM = "${S}/grub_vm.cfg"
20GRUB_CFG_LIVE = "${S}/grub_live.cfg"
21GRUB_TIMEOUT ?= "10"
22#FIXME: build this from the machine config
23GRUB_OPTS ?= "serial --unit=0 --speed=115200 --word=8 --parity=no --stop=1"
24
25GRUB_ROOT ?= "${ROOT}"
26APPEND ?= ""
27
28# Uses MACHINE specific KERNEL_IMAGETYPE
29PACKAGE_ARCH = "${MACHINE_ARCH}"
30
31# Need UUID utility code.
32inherit fs-uuid
33
34python build_efi_cfg() {
35 import sys
36
37 workdir = d.getVar('WORKDIR')
38 if not workdir:
39 bb.error("WORKDIR not defined, unable to package")
40 return
41
42 gfxserial = d.getVar('GRUB_GFXSERIAL') or ""
43
44 labels = d.getVar('LABELS')
45 if not labels:
46 bb.debug(1, "LABELS not defined, nothing to do")
47 return
48
49 if labels == []:
50 bb.debug(1, "No labels, nothing to do")
51 return
52
53 cfile = d.getVar('GRUB_CFG')
54 if not cfile:
55 bb.fatal('Unable to read GRUB_CFG')
56
57 try:
58 cfgfile = open(cfile, 'w')
59 except OSError:
60 bb.fatal('Unable to open %s' % cfile)
61
62 cfgfile.write('# Automatically created by OE\n')
63
64 opts = d.getVar('GRUB_OPTS')
65 if opts:
66 for opt in opts.split(';'):
67 cfgfile.write('%s\n' % opt)
68
69 cfgfile.write('default=%s\n' % (labels.split()[0]))
70
71 timeout = d.getVar('GRUB_TIMEOUT')
72 if timeout:
73 cfgfile.write('timeout=%s\n' % timeout)
74 else:
75 cfgfile.write('timeout=50\n')
76
77 root = d.getVar('GRUB_ROOT')
78 if not root:
79 bb.fatal('GRUB_ROOT not defined')
80
81 if gfxserial == "1":
82 btypes = [ [ " graphics console", "" ],
83 [ " serial console", d.getVar('GRUB_SERIAL') or "" ] ]
84 else:
85 btypes = [ [ "", "" ] ]
86
87 for label in labels.split():
88 localdata = d.createCopy()
89
90 overrides = localdata.getVar('OVERRIDES')
91 if not overrides:
92 bb.fatal('OVERRIDES not defined')
93
94 localdata.setVar('OVERRIDES', 'grub_' + label + ':' + overrides)
95
96 for btype in btypes:
97 cfgfile.write('\nmenuentry \'%s%s\'{\n' % (label, btype[0]))
98 lb = label
99 if label == "install":
100 lb = "install-efi"
101 kernel = localdata.getVar('KERNEL_IMAGETYPE')
102 cfgfile.write('linux /%s LABEL=%s' % (kernel, lb))
103
104 cfgfile.write(' %s' % replace_rootfs_uuid(d, root))
105
106 append = localdata.getVar('APPEND')
107 initrd = localdata.getVar('INITRD')
108
109 if append:
110 append = replace_rootfs_uuid(d, append)
111 cfgfile.write(' %s' % (append))
112
113 cfgfile.write(' %s' % btype[1])
114 cfgfile.write('\n')
115
116 if initrd:
117 cfgfile.write('initrd /initrd')
118 cfgfile.write('\n}\n')
119
120 cfgfile.close()
121}
122build_efi_cfg[vardepsexclude] += "OVERRIDES"
diff --git a/meta/classes-recipe/grub-efi.bbclass b/meta/classes-recipe/grub-efi.bbclass
new file mode 100644
index 0000000000..4afd12195f
--- /dev/null
+++ b/meta/classes-recipe/grub-efi.bbclass
@@ -0,0 +1,14 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7inherit grub-efi-cfg
8require conf/image-uefi.conf
9
10efi_populate() {
11 efi_populate_common "$1" grub-efi
12
13 install -m 0644 ${GRUB_CFG} ${DEST}${EFIDIR}/grub.cfg
14}
diff --git a/meta/classes-recipe/gsettings.bbclass b/meta/classes-recipe/gsettings.bbclass
new file mode 100644
index 0000000000..adb027ea0a
--- /dev/null
+++ b/meta/classes-recipe/gsettings.bbclass
@@ -0,0 +1,48 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# A bbclass to handle installed GSettings (glib) schemas, updated the compiled
8# form on package install and remove.
9#
10# The compiled schemas are platform-agnostic, so we can depend on
11# glib-2.0-native for the native tool and run the postinst script when the
12# rootfs builds to save a little time on first boot.
13
14# TODO use a trigger so that this runs once per package operation run
15
16GSETTINGS_PACKAGE ?= "${PN}"
17
18python __anonymous() {
19 pkg = d.getVar("GSETTINGS_PACKAGE")
20 if pkg:
21 d.appendVar("PACKAGE_WRITE_DEPS", " glib-2.0-native")
22 d.appendVar("RDEPENDS:" + pkg, " ${MLPREFIX}glib-2.0-utils")
23 d.appendVar("FILES:" + pkg, " ${datadir}/glib-2.0/schemas")
24}
25
26gsettings_postinstrm () {
27 glib-compile-schemas $D${datadir}/glib-2.0/schemas
28}
29
30python populate_packages:append () {
31 pkg = d.getVar('GSETTINGS_PACKAGE')
32 if pkg:
33 bb.note("adding gsettings postinst scripts to %s" % pkg)
34
35 postinst = d.getVar('pkg_postinst:%s' % pkg) or d.getVar('pkg_postinst')
36 if not postinst:
37 postinst = '#!/bin/sh\n'
38 postinst += d.getVar('gsettings_postinstrm')
39 d.setVar('pkg_postinst:%s' % pkg, postinst)
40
41 bb.note("adding gsettings postrm scripts to %s" % pkg)
42
43 postrm = d.getVar('pkg_postrm:%s' % pkg) or d.getVar('pkg_postrm')
44 if not postrm:
45 postrm = '#!/bin/sh\n'
46 postrm += d.getVar('gsettings_postinstrm')
47 d.setVar('pkg_postrm:%s' % pkg, postrm)
48}
diff --git a/meta/classes-recipe/gtk-doc.bbclass b/meta/classes-recipe/gtk-doc.bbclass
new file mode 100644
index 0000000000..68fa2cc745
--- /dev/null
+++ b/meta/classes-recipe/gtk-doc.bbclass
@@ -0,0 +1,89 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# Helper class to pull in the right gtk-doc dependencies and configure
8# gtk-doc to enable or disable documentation building (which requries the
9# use of usermode qemu).
10
11# This variable is set to True if api-documentation is in
12# DISTRO_FEATURES and qemu-usermode is in MACHINE_FEATURES, and False otherwise.
13#
14# It should be used in recipes to determine whether gtk-doc based documentation should be built,
15# so that qemu use can be avoided when necessary.
16GTKDOC_ENABLED:class-native = "False"
17GTKDOC_ENABLED ?= "${@bb.utils.contains('DISTRO_FEATURES', 'api-documentation', \
18 bb.utils.contains('MACHINE_FEATURES', 'qemu-usermode', 'True', 'False', d), 'False', d)}"
19
20# meson: default option name to enable/disable gtk-doc. This matches most
21# project's configuration. In doubts - check meson_options.txt in project's
22# source path.
23GTKDOC_MESON_OPTION ?= 'docs'
24GTKDOC_MESON_ENABLE_FLAG ?= 'true'
25GTKDOC_MESON_DISABLE_FLAG ?= 'false'
26
27# Auto enable/disable based on GTKDOC_ENABLED
28EXTRA_OECONF:prepend:class-target = "${@bb.utils.contains('GTKDOC_ENABLED', 'True', '--enable-gtk-doc --enable-gtk-doc-html --disable-gtk-doc-pdf', \
29 '--disable-gtk-doc', d)} "
30EXTRA_OEMESON:prepend:class-target = "-D${GTKDOC_MESON_OPTION}=${@bb.utils.contains('GTKDOC_ENABLED', 'True', '${GTKDOC_MESON_ENABLE_FLAG}', '${GTKDOC_MESON_DISABLE_FLAG}', d)} "
31
32# When building native recipes, disable gtkdoc, as it is not necessary,
33# pulls in additional dependencies, and makes build times longer
34EXTRA_OECONF:prepend:class-native = "--disable-gtk-doc "
35EXTRA_OECONF:prepend:class-nativesdk = "--disable-gtk-doc "
36EXTRA_OEMESON:prepend:class-native = "-D${GTKDOC_MESON_OPTION}=${GTKDOC_MESON_DISABLE_FLAG} "
37EXTRA_OEMESON:prepend:class-nativesdk = "-D${GTKDOC_MESON_OPTION}=${GTKDOC_MESON_DISABLE_FLAG} "
38
39# Even though gtkdoc is disabled on -native, gtk-doc package is still
40# needed for m4 macros.
41DEPENDS:append = " gtk-doc-native"
42
43# The documentation directory, where the infrastructure will be copied.
44# gtkdocize has a default of "." so to handle out-of-tree builds set this to $S.
45GTKDOC_DOCDIR ?= "${S}"
46
47export STAGING_DIR_HOST
48
49inherit python3native pkgconfig qemu
50DEPENDS:append = "${@' qemu-native' if d.getVar('GTKDOC_ENABLED') == 'True' else ''}"
51
52do_configure:prepend () {
53 # Need to use ||true as this is only needed if configure.ac both exists
54 # and uses GTK_DOC_CHECK.
55 gtkdocize --srcdir ${S} --docdir ${GTKDOC_DOCDIR} || true
56}
57
58do_compile:prepend:class-target () {
59 if [ ${GTKDOC_ENABLED} = True ]; then
60 # Write out a qemu wrapper that will be given to gtkdoc-scangobj so that it
61 # can run target helper binaries through that.
62 qemu_binary="${@qemu_wrapper_cmdline(d, '$STAGING_DIR_HOST', ['\\$GIR_EXTRA_LIBS_PATH','$STAGING_DIR_HOST/${libdir}','$STAGING_DIR_HOST/${base_libdir}'])}"
63 cat > ${B}/gtkdoc-qemuwrapper << EOF
64#!/bin/sh
65# Use a modules directory which doesn't exist so we don't load random things
66# which may then get deleted (or their dependencies) and potentially segfault
67export GIO_MODULE_DIR=${STAGING_LIBDIR}/gio/modules-dummy
68
69GIR_EXTRA_LIBS_PATH=\`find ${B} -name *.so -printf "%h\n"|sort|uniq| tr '\n' ':'\`\$GIR_EXTRA_LIBS_PATH
70GIR_EXTRA_LIBS_PATH=\`find ${B} -name .libs| tr '\n' ':'\`\$GIR_EXTRA_LIBS_PATH
71
72# meson sets this wrongly (only to libs in build-dir), qemu_wrapper_cmdline() and GIR_EXTRA_LIBS_PATH take care of it properly
73unset LD_LIBRARY_PATH
74
75if [ -d ".libs" ]; then
76 $qemu_binary ".libs/\$@"
77else
78 $qemu_binary "\$@"
79fi
80
81if [ \$? -ne 0 ]; then
82 echo "If the above error message is about missing .so libraries, then setting up GIR_EXTRA_LIBS_PATH in the recipe should help."
83 echo "(typically like this: GIR_EXTRA_LIBS_PATH=\"$""{B}/something/.libs\" )"
84 exit 1
85fi
86EOF
87 chmod +x ${B}/gtkdoc-qemuwrapper
88 fi
89}
diff --git a/meta/classes-recipe/gtk-icon-cache.bbclass b/meta/classes-recipe/gtk-icon-cache.bbclass
new file mode 100644
index 0000000000..17c7eb7a33
--- /dev/null
+++ b/meta/classes-recipe/gtk-icon-cache.bbclass
@@ -0,0 +1,95 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7FILES:${PN} += "${datadir}/icons/hicolor"
8
9GTKIC_VERSION ??= '3'
10
11GTKPN = "${@ 'gtk4' if d.getVar('GTKIC_VERSION') == '4' else 'gtk+3' }"
12GTKIC_CMD = "${@ 'gtk-update-icon-cache-3.0.0' if d.getVar('GTKIC_VERSION') == '4' else 'gtk4-update-icon-cache' }"
13
14#gtk+3/gtk4 require GTK3DISTROFEATURES, DEPENDS on it make all the
15#recipes inherit this class require GTK3DISTROFEATURES
16inherit features_check
17ANY_OF_DISTRO_FEATURES = "${GTK3DISTROFEATURES}"
18
19DEPENDS +=" ${@ '' if d.getVar('BPN') == 'hicolor-icon-theme' else 'hicolor-icon-theme' } \
20 ${@ '' if d.getVar('BPN') == 'gdk-pixbuf' else 'gdk-pixbuf' } \
21 ${@ '' if d.getVar('BPN') == d.getVar('GTKPN') else d.getVar('GTKPN') } \
22 ${GTKPN}-native \
23"
24
25PACKAGE_WRITE_DEPS += "${GTKPN}-native gdk-pixbuf-native"
26
27gtk_icon_cache_postinst() {
28if [ "x$D" != "x" ]; then
29 $INTERCEPT_DIR/postinst_intercept update_gtk_icon_cache ${PKG} \
30 mlprefix=${MLPREFIX} \
31 libdir_native=${libdir_native}
32else
33
34 # Update the pixbuf loaders in case they haven't been registered yet
35 ${libdir}/gdk-pixbuf-2.0/gdk-pixbuf-query-loaders --update-cache
36
37 for icondir in /usr/share/icons/* ; do
38 if [ -d $icondir ] ; then
39 ${GTKIC_CMD} -fqt $icondir
40 fi
41 done
42fi
43}
44
45gtk_icon_cache_postrm() {
46if [ "x$D" != "x" ]; then
47 $INTERCEPT_DIR/postinst_intercept update_gtk_icon_cache ${PKG} \
48 mlprefix=${MLPREFIX} \
49 libdir=${libdir}
50else
51 for icondir in /usr/share/icons/* ; do
52 if [ -d $icondir ] ; then
53 ${GTKIC_CMD} -qt $icondir
54 fi
55 done
56fi
57}
58
59python populate_packages:append () {
60 packages = d.getVar('PACKAGES').split()
61 pkgdest = d.getVar('PKGDEST')
62
63 for pkg in packages:
64 icon_dir = '%s/%s/%s/icons' % (pkgdest, pkg, d.getVar('datadir'))
65 if not os.path.exists(icon_dir):
66 continue
67
68 bb.note("adding hicolor-icon-theme dependency to %s" % pkg)
69 rdepends = ' ' + d.getVar('MLPREFIX', False) + "hicolor-icon-theme"
70 d.appendVar('RDEPENDS:%s' % pkg, rdepends)
71
72 #gtk_icon_cache_postinst depend on gdk-pixbuf and gtk+3/gtk4
73 bb.note("adding gdk-pixbuf dependency to %s" % pkg)
74 rdepends = ' ' + d.getVar('MLPREFIX', False) + "gdk-pixbuf"
75 d.appendVar('RDEPENDS:%s' % pkg, rdepends)
76
77 bb.note("adding %s dependency to %s" % (d.getVar('GTKPN'), pkg))
78 rdepends = ' ' + d.getVar('MLPREFIX', False) + d.getVar('GTKPN')
79 d.appendVar('RDEPENDS:%s' % pkg, rdepends)
80
81 bb.note("adding gtk-icon-cache postinst and postrm scripts to %s" % pkg)
82
83 postinst = d.getVar('pkg_postinst:%s' % pkg)
84 if not postinst:
85 postinst = '#!/bin/sh\n'
86 postinst += d.getVar('gtk_icon_cache_postinst')
87 d.setVar('pkg_postinst:%s' % pkg, postinst)
88
89 postrm = d.getVar('pkg_postrm:%s' % pkg)
90 if not postrm:
91 postrm = '#!/bin/sh\n'
92 postrm += d.getVar('gtk_icon_cache_postrm')
93 d.setVar('pkg_postrm:%s' % pkg, postrm)
94}
95
diff --git a/meta/classes-recipe/gtk-immodules-cache.bbclass b/meta/classes-recipe/gtk-immodules-cache.bbclass
new file mode 100644
index 0000000000..8fbe1dd1fb
--- /dev/null
+++ b/meta/classes-recipe/gtk-immodules-cache.bbclass
@@ -0,0 +1,82 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# This class will update the inputmethod module cache for virtual keyboards
8#
9# Usage: Set GTKIMMODULES_PACKAGES to the packages that needs to update the inputmethod modules
10
11PACKAGE_WRITE_DEPS += "qemu-native"
12
13inherit qemu
14
15GTKIMMODULES_PACKAGES ?= "${PN}"
16
17gtk_immodule_cache_postinst() {
18if [ "x$D" != "x" ]; then
19 $INTERCEPT_DIR/postinst_intercept update_gtk_immodules_cache ${PKG} \
20 mlprefix=${MLPREFIX} \
21 binprefix=${MLPREFIX} \
22 libdir=${libdir} \
23 libexecdir=${libexecdir} \
24 base_libdir=${base_libdir} \
25 bindir=${bindir}
26else
27 if [ ! -z `which gtk-query-immodules-2.0` ]; then
28 gtk-query-immodules-2.0 > ${libdir}/gtk-2.0/2.10.0/immodules.cache
29 fi
30 if [ ! -z `which gtk-query-immodules-3.0` ]; then
31 mkdir -p ${libdir}/gtk-3.0/3.0.0
32 gtk-query-immodules-3.0 > ${libdir}/gtk-3.0/3.0.0/immodules.cache
33 fi
34fi
35}
36
37gtk_immodule_cache_postrm() {
38if [ "x$D" != "x" ]; then
39 $INTERCEPT_DIR/postinst_intercept update_gtk_immodules_cache ${PKG} \
40 mlprefix=${MLPREFIX} \
41 binprefix=${MLPREFIX} \
42 libdir=${libdir} \
43 libexecdir=${libexecdir} \
44 base_libdir=${base_libdir} \
45 bindir=${bindir}
46else
47 if [ ! -z `which gtk-query-immodules-2.0` ]; then
48 gtk-query-immodules-2.0 > ${libdir}/gtk-2.0/2.10.0/immodules.cache
49 fi
50 if [ ! -z `which gtk-query-immodules-3.0` ]; then
51 gtk-query-immodules-3.0 > ${libdir}/gtk-3.0/3.0.0/immodules.cache
52 fi
53fi
54}
55
56python populate_packages:append () {
57 gtkimmodules_pkgs = d.getVar('GTKIMMODULES_PACKAGES').split()
58
59 for pkg in gtkimmodules_pkgs:
60 bb.note("adding gtk-immodule-cache postinst and postrm scripts to %s" % pkg)
61
62 postinst = d.getVar('pkg_postinst:%s' % pkg)
63 if not postinst:
64 postinst = '#!/bin/sh\n'
65 postinst += d.getVar('gtk_immodule_cache_postinst')
66 d.setVar('pkg_postinst:%s' % pkg, postinst)
67
68 postrm = d.getVar('pkg_postrm:%s' % pkg)
69 if not postrm:
70 postrm = '#!/bin/sh\n'
71 postrm += d.getVar('gtk_immodule_cache_postrm')
72 d.setVar('pkg_postrm:%s' % pkg, postrm)
73}
74
75python __anonymous() {
76 if not bb.data.inherits_class('native', d) and not bb.data.inherits_class('cross', d):
77 gtkimmodules_check = d.getVar('GTKIMMODULES_PACKAGES', False)
78 if not gtkimmodules_check:
79 bb_filename = d.getVar('FILE', False)
80 bb.fatal("ERROR: %s inherits gtk-immodules-cache but doesn't set GTKIMMODULES_PACKAGES" % bb_filename)
81}
82
diff --git a/meta/classes-recipe/image-artifact-names.bbclass b/meta/classes-recipe/image-artifact-names.bbclass
new file mode 100644
index 0000000000..5c4e746b90
--- /dev/null
+++ b/meta/classes-recipe/image-artifact-names.bbclass
@@ -0,0 +1,28 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7##################################################################
8# Specific image creation and rootfs population info.
9##################################################################
10
11IMAGE_BASENAME ?= "${PN}"
12IMAGE_VERSION_SUFFIX ?= "-${DATETIME}"
13IMAGE_VERSION_SUFFIX[vardepsexclude] += "DATETIME SOURCE_DATE_EPOCH"
14IMAGE_NAME ?= "${IMAGE_BASENAME}-${MACHINE}${IMAGE_VERSION_SUFFIX}"
15IMAGE_LINK_NAME ?= "${IMAGE_BASENAME}-${MACHINE}"
16
17# IMAGE_NAME is the base name for everything produced when building images.
18# The actual image that contains the rootfs has an additional suffix (.rootfs
19# by default) followed by additional suffices which describe the format (.ext4,
20# .ext4.xz, etc.).
21IMAGE_NAME_SUFFIX ??= ".rootfs"
22
23python () {
24 if bb.data.inherits_class('deploy', d) and d.getVar("IMAGE_VERSION_SUFFIX") == "-${DATETIME}":
25 import datetime
26 d.setVar("IMAGE_VERSION_SUFFIX", "-" + datetime.datetime.fromtimestamp(int(d.getVar("SOURCE_DATE_EPOCH")), datetime.timezone.utc).strftime('%Y%m%d%H%M%S'))
27 d.setVarFlag("IMAGE_VERSION_SUFFIX", "vardepvalue", "")
28}
diff --git a/meta/classes-recipe/image-combined-dbg.bbclass b/meta/classes-recipe/image-combined-dbg.bbclass
new file mode 100644
index 0000000000..dcf1968538
--- /dev/null
+++ b/meta/classes-recipe/image-combined-dbg.bbclass
@@ -0,0 +1,15 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7IMAGE_PREPROCESS_COMMAND:append = " combine_dbg_image; "
8
9combine_dbg_image () {
10 if [ "${IMAGE_GEN_DEBUGFS}" = "1" -a -e ${IMAGE_ROOTFS}-dbg ]; then
11 # copy target files into -dbg rootfs, so it can be used for
12 # debug purposes directly
13 tar -C ${IMAGE_ROOTFS} -cf - . | tar -C ${IMAGE_ROOTFS}-dbg -xf -
14 fi
15}
diff --git a/meta/classes-recipe/image-container.bbclass b/meta/classes-recipe/image-container.bbclass
new file mode 100644
index 0000000000..d24b030453
--- /dev/null
+++ b/meta/classes-recipe/image-container.bbclass
@@ -0,0 +1,27 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7ROOTFS_BOOTSTRAP_INSTALL = ""
8IMAGE_TYPES_MASKED += "container"
9IMAGE_TYPEDEP:container = "tar.bz2"
10
11python __anonymous() {
12 if "container" in d.getVar("IMAGE_FSTYPES") and \
13 d.getVar("IMAGE_CONTAINER_NO_DUMMY") != "1" and \
14 "linux-dummy" not in d.getVar("PREFERRED_PROVIDER_virtual/kernel"):
15 msg = '"container" is in IMAGE_FSTYPES, but ' \
16 'PREFERRED_PROVIDER_virtual/kernel is not "linux-dummy". ' \
17 'Unless a particular kernel is needed, using linux-dummy will ' \
18 'prevent a kernel from being built, which can reduce ' \
19 'build times. If you don\'t want to use "linux-dummy", set ' \
20 '"IMAGE_CONTAINER_NO_DUMMY" to "1".'
21
22 # Raising skip recipe was Paul's clever idea. It causes the error to
23 # only be shown for the recipes actually requested to build, rather
24 # than bb.fatal which would appear for all recipes inheriting the
25 # class.
26 raise bb.parse.SkipRecipe(msg)
27}
diff --git a/meta/classes-recipe/image-live.bbclass b/meta/classes-recipe/image-live.bbclass
new file mode 100644
index 0000000000..1034acc49e
--- /dev/null
+++ b/meta/classes-recipe/image-live.bbclass
@@ -0,0 +1,265 @@
1# Copyright (C) 2004, Advanced Micro Devices, Inc.
2#
3# SPDX-License-Identifier: MIT
4
5# Creates a bootable image using syslinux, your kernel and an optional
6# initrd
7
8#
9# End result is two things:
10#
11# 1. A .hddimg file which is an msdos filesystem containing syslinux, a kernel,
12# an initrd and a rootfs image. These can be written to harddisks directly and
13# also booted on USB flash disks (write them there with dd).
14#
15# 2. A CD .iso image
16
17# Boot process is that the initrd will boot and process which label was selected
18# in syslinux. Actions based on the label are then performed (e.g. installing to
19# an hdd)
20
21# External variables (also used by syslinux.bbclass)
22# ${INITRD} - indicates a list of filesystem images to concatenate and use as an initrd (optional)
23# ${HDDIMG_ID} - FAT image volume-id
24# ${ROOTFS} - indicates a filesystem image to include as the root filesystem (optional)
25
26inherit live-vm-common image-artifact-names
27
28do_bootimg[depends] += "dosfstools-native:do_populate_sysroot \
29 mtools-native:do_populate_sysroot \
30 cdrtools-native:do_populate_sysroot \
31 virtual/kernel:do_deploy \
32 ${MLPREFIX}syslinux:do_populate_sysroot \
33 syslinux-native:do_populate_sysroot \
34 ${@'%s:do_image_%s' % (d.getVar('PN'), d.getVar('LIVE_ROOTFS_TYPE').replace('-', '_')) if d.getVar('ROOTFS') else ''} \
35 "
36
37
38LABELS_LIVE ?= "boot install"
39ROOT_LIVE ?= "root=/dev/ram0"
40INITRD_IMAGE_LIVE ?= "${MLPREFIX}core-image-minimal-initramfs"
41INITRD_LIVE ?= "${DEPLOY_DIR_IMAGE}/${INITRD_IMAGE_LIVE}-${MACHINE}.${INITRAMFS_FSTYPES}"
42
43LIVE_ROOTFS_TYPE ?= "ext4"
44ROOTFS ?= "${IMGDEPLOYDIR}/${IMAGE_LINK_NAME}.${LIVE_ROOTFS_TYPE}"
45
46IMAGE_TYPEDEP:live = "${LIVE_ROOTFS_TYPE}"
47IMAGE_TYPEDEP:iso = "${LIVE_ROOTFS_TYPE}"
48IMAGE_TYPEDEP:hddimg = "${LIVE_ROOTFS_TYPE}"
49IMAGE_TYPES_MASKED += "live hddimg iso"
50
51python() {
52 image_b = d.getVar('IMAGE_BASENAME')
53 initrd_i = d.getVar('INITRD_IMAGE_LIVE')
54 if image_b == initrd_i:
55 bb.error('INITRD_IMAGE_LIVE %s cannot use image live, hddimg or iso.' % initrd_i)
56 bb.fatal('Check IMAGE_FSTYPES and INITRAMFS_FSTYPES settings.')
57 elif initrd_i:
58 d.appendVarFlag('do_bootimg', 'depends', ' %s:do_image_complete' % initrd_i)
59}
60
61HDDDIR = "${S}/hddimg"
62ISODIR = "${S}/iso"
63EFIIMGDIR = "${S}/efi_img"
64COMPACT_ISODIR = "${S}/iso.z"
65
66ISOLINUXDIR ?= "/isolinux"
67ISO_BOOTIMG = "isolinux/isolinux.bin"
68ISO_BOOTCAT = "isolinux/boot.cat"
69MKISOFS_OPTIONS = "-no-emul-boot -boot-load-size 4 -boot-info-table"
70
71BOOTIMG_VOLUME_ID ?= "boot"
72BOOTIMG_EXTRA_SPACE ?= "512"
73
74populate_live() {
75 populate_kernel $1
76 if [ -s "${ROOTFS}" ]; then
77 install -m 0644 ${ROOTFS} $1/rootfs.img
78 fi
79}
80
81build_iso() {
82 # Only create an ISO if we have an INITRD and the live or iso image type was selected
83 if [ -z "${INITRD}" ] || [ "${@bb.utils.contains_any('IMAGE_FSTYPES', 'live iso', '1', '0', d)}" != "1" ]; then
84 bbnote "ISO image will not be created."
85 return
86 fi
87 # ${INITRD} is a list of multiple filesystem images
88 for fs in ${INITRD}
89 do
90 if [ ! -s "$fs" ]; then
91 bbwarn "ISO image will not be created. $fs is invalid."
92 return
93 fi
94 done
95
96 populate_live ${ISODIR}
97
98 if [ "${PCBIOS}" = "1" ]; then
99 syslinux_iso_populate ${ISODIR}
100 fi
101 if [ "${EFI}" = "1" ]; then
102 efi_iso_populate ${ISODIR}
103 build_fat_img ${EFIIMGDIR} ${ISODIR}/efi.img
104 fi
105
106 # EFI only
107 if [ "${PCBIOS}" != "1" ] && [ "${EFI}" = "1" ] ; then
108 # Work around bug in isohybrid where it requires isolinux.bin
109 # In the boot catalog, even though it is not used
110 mkdir -p ${ISODIR}/${ISOLINUXDIR}
111 install -m 0644 ${STAGING_DATADIR}/syslinux/isolinux.bin ${ISODIR}${ISOLINUXDIR}
112 fi
113
114 # We used to have support for zisofs; this is a relic of that
115 mkisofs_compress_opts="-r"
116
117 # Check the size of ${ISODIR}/rootfs.img, use mkisofs -iso-level 3
118 # when it exceeds 3.8GB, the specification is 4G - 1 bytes, we need
119 # leave a few space for other files.
120 mkisofs_iso_level=""
121
122 if [ -n "${ROOTFS}" ] && [ -s "${ROOTFS}" ]; then
123 rootfs_img_size=`stat -c '%s' ${ISODIR}/rootfs.img`
124 # 4080218931 = 3.8 * 1024 * 1024 * 1024
125 if [ $rootfs_img_size -gt 4080218931 ]; then
126 bbnote "${ISODIR}/rootfs.img execeeds 3.8GB, using '-iso-level 3' for mkisofs"
127 mkisofs_iso_level="-iso-level 3"
128 fi
129 fi
130
131 if [ "${PCBIOS}" = "1" ] && [ "${EFI}" != "1" ] ; then
132 # PCBIOS only media
133 mkisofs -V ${BOOTIMG_VOLUME_ID} \
134 -o ${IMGDEPLOYDIR}/${IMAGE_NAME}.iso \
135 -b ${ISO_BOOTIMG} -c ${ISO_BOOTCAT} \
136 $mkisofs_compress_opts \
137 ${MKISOFS_OPTIONS} $mkisofs_iso_level ${ISODIR}
138 else
139 # EFI only OR EFI+PCBIOS
140 mkisofs -A ${BOOTIMG_VOLUME_ID} -V ${BOOTIMG_VOLUME_ID} \
141 -o ${IMGDEPLOYDIR}/${IMAGE_NAME}.iso \
142 -b ${ISO_BOOTIMG} -c ${ISO_BOOTCAT} \
143 $mkisofs_compress_opts ${MKISOFS_OPTIONS} $mkisofs_iso_level \
144 -eltorito-alt-boot -eltorito-platform efi \
145 -b efi.img -no-emul-boot \
146 ${ISODIR}
147 isohybrid_args="-u"
148 fi
149
150 isohybrid $isohybrid_args ${IMGDEPLOYDIR}/${IMAGE_NAME}.iso
151}
152
153build_fat_img() {
154 FATSOURCEDIR=$1
155 FATIMG=$2
156
157 # Calculate the size required for the final image including the
158 # data and filesystem overhead.
159 # Sectors: 512 bytes
160 # Blocks: 1024 bytes
161
162 # Determine the sector count just for the data
163 SECTORS=$(expr $(du --apparent-size -ks ${FATSOURCEDIR} | cut -f 1) \* 2)
164
165 # Account for the filesystem overhead. This includes directory
166 # entries in the clusters as well as the FAT itself.
167 # Assumptions:
168 # FAT32 (12 or 16 may be selected by mkdosfs, but the extra
169 # padding will be minimal on those smaller images and not
170 # worth the logic here to caclulate the smaller FAT sizes)
171 # < 16 entries per directory
172 # 8.3 filenames only
173
174 # 32 bytes per dir entry
175 DIR_BYTES=$(expr $(find ${FATSOURCEDIR} | tail -n +2 | wc -l) \* 32)
176 # 32 bytes for every end-of-directory dir entry
177 DIR_BYTES=$(expr $DIR_BYTES + $(expr $(find ${FATSOURCEDIR} -type d | tail -n +2 | wc -l) \* 32))
178 # 4 bytes per FAT entry per sector of data
179 FAT_BYTES=$(expr $SECTORS \* 4)
180 # 4 bytes per FAT entry per end-of-cluster list
181 FAT_BYTES=$(expr $FAT_BYTES + $(expr $(find ${FATSOURCEDIR} -type d | tail -n +2 | wc -l) \* 4))
182
183 # Use a ceiling function to determine FS overhead in sectors
184 DIR_SECTORS=$(expr $(expr $DIR_BYTES + 511) / 512)
185 # There are two FATs on the image
186 FAT_SECTORS=$(expr $(expr $(expr $FAT_BYTES + 511) / 512) \* 2)
187 SECTORS=$(expr $SECTORS + $(expr $DIR_SECTORS + $FAT_SECTORS))
188
189 # Determine the final size in blocks accounting for some padding
190 BLOCKS=$(expr $(expr $SECTORS / 2) + ${BOOTIMG_EXTRA_SPACE})
191
192 # mkdosfs will sometimes use FAT16 when it is not appropriate,
193 # resulting in a boot failure from SYSLINUX. Use FAT32 for
194 # images larger than 512MB, otherwise let mkdosfs decide.
195 if [ $(expr $BLOCKS / 1024) -gt 512 ]; then
196 FATSIZE="-F 32"
197 fi
198
199 # mkdosfs will fail if ${FATIMG} exists. Since we are creating an
200 # new image, it is safe to delete any previous image.
201 if [ -e ${FATIMG} ]; then
202 rm ${FATIMG}
203 fi
204
205 if [ -z "${HDDIMG_ID}" ]; then
206 mkdosfs ${FATSIZE} -n ${BOOTIMG_VOLUME_ID} ${MKDOSFS_EXTRAOPTS} -C ${FATIMG} \
207 ${BLOCKS}
208 else
209 mkdosfs ${FATSIZE} -n ${BOOTIMG_VOLUME_ID} ${MKDOSFS_EXTRAOPTS} -C ${FATIMG} \
210 ${BLOCKS} -i ${HDDIMG_ID}
211 fi
212
213 # Copy FATSOURCEDIR recursively into the image file directly
214 mcopy -i ${FATIMG} -s ${FATSOURCEDIR}/* ::/
215}
216
217build_hddimg() {
218 # Create an HDD image
219 if [ "${@bb.utils.contains_any('IMAGE_FSTYPES', 'live hddimg', '1', '0', d)}" = "1" ] ; then
220 populate_live ${HDDDIR}
221
222 if [ "${PCBIOS}" = "1" ]; then
223 syslinux_hddimg_populate ${HDDDIR}
224 fi
225 if [ "${EFI}" = "1" ]; then
226 efi_hddimg_populate ${HDDDIR}
227 fi
228
229 # Check the size of ${HDDDIR}/rootfs.img, error out if it
230 # exceeds 4GB, it is the single file's max size of FAT fs.
231 if [ -f ${HDDDIR}/rootfs.img ]; then
232 rootfs_img_size=`stat -c '%s' ${HDDDIR}/rootfs.img`
233 max_size=`expr 4 \* 1024 \* 1024 \* 1024`
234 if [ $rootfs_img_size -ge $max_size ]; then
235 bberror "${HDDDIR}/rootfs.img rootfs size is greather than or equal to 4GB,"
236 bberror "and this doesn't work on a FAT filesystem. You can either:"
237 bberror "1) Reduce the size of rootfs.img, or,"
238 bbfatal "2) Use wic, vmdk,vhd, vhdx or vdi instead of hddimg\n"
239 fi
240 fi
241
242 build_fat_img ${HDDDIR} ${IMGDEPLOYDIR}/${IMAGE_NAME}.hddimg
243
244 if [ "${PCBIOS}" = "1" ]; then
245 syslinux_hddimg_install
246 fi
247
248 chmod 644 ${IMGDEPLOYDIR}/${IMAGE_NAME}.hddimg
249 fi
250}
251
252python do_bootimg() {
253 set_live_vm_vars(d, 'LIVE')
254 if d.getVar("PCBIOS") == "1":
255 bb.build.exec_func('build_syslinux_cfg', d)
256 if d.getVar("EFI") == "1":
257 bb.build.exec_func('build_efi_cfg', d)
258 bb.build.exec_func('build_hddimg', d)
259 bb.build.exec_func('build_iso', d)
260 bb.build.exec_func('create_symlinks', d)
261}
262do_bootimg[subimages] = "hddimg iso"
263do_bootimg[imgsuffix] = "."
264
265addtask bootimg before do_image_complete after do_rootfs
diff --git a/meta/classes-recipe/image-postinst-intercepts.bbclass b/meta/classes-recipe/image-postinst-intercepts.bbclass
new file mode 100644
index 0000000000..fc15926384
--- /dev/null
+++ b/meta/classes-recipe/image-postinst-intercepts.bbclass
@@ -0,0 +1,29 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# Gather existing and candidate postinst intercepts from BBPATH
8POSTINST_INTERCEPTS_DIR ?= "${COREBASE}/scripts/postinst-intercepts"
9POSTINST_INTERCEPTS_PATHS ?= "${@':'.join('%s/postinst-intercepts' % p for p in '${BBPATH}'.split(':'))}:${POSTINST_INTERCEPTS_DIR}"
10
11python find_intercepts() {
12 intercepts = {}
13 search_paths = []
14 paths = d.getVar('POSTINST_INTERCEPTS_PATHS').split(':')
15 overrides = (':' + d.getVar('FILESOVERRIDES')).split(':') + ['']
16 search_paths = [os.path.join(p, op) for p in paths for op in overrides]
17 searched = oe.path.which_wild('*', ':'.join(search_paths), candidates=True)
18 files, chksums = [], []
19 for pathname, candidates in searched:
20 if os.path.isfile(pathname):
21 files.append(pathname)
22 chksums.append('%s:True' % pathname)
23 chksums.extend('%s:False' % c for c in candidates[:-1])
24
25 d.setVar('POSTINST_INTERCEPT_CHECKSUMS', ' '.join(chksums))
26 d.setVar('POSTINST_INTERCEPTS', ' '.join(files))
27}
28find_intercepts[eventmask] += "bb.event.RecipePreFinalise"
29addhandler find_intercepts
diff --git a/meta/classes-recipe/image.bbclass b/meta/classes-recipe/image.bbclass
new file mode 100644
index 0000000000..433172378a
--- /dev/null
+++ b/meta/classes-recipe/image.bbclass
@@ -0,0 +1,684 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7IMAGE_CLASSES ??= ""
8
9# rootfs bootstrap install
10# warning - image-container resets this
11ROOTFS_BOOTSTRAP_INSTALL = "run-postinsts"
12
13# Handle inherits of any of the image classes we need
14IMGCLASSES = "rootfs_${IMAGE_PKGTYPE} image_types ${IMAGE_CLASSES}"
15# Only Linux SDKs support populate_sdk_ext, fall back to populate_sdk_base
16# in the non-Linux SDK_OS case, such as mingw32
17IMGCLASSES += "${@['populate_sdk_base', 'populate_sdk_ext']['linux' in d.getVar("SDK_OS")]}"
18IMGCLASSES += "${@bb.utils.contains_any('IMAGE_FSTYPES', 'live iso hddimg', 'image-live', '', d)}"
19IMGCLASSES += "${@bb.utils.contains('IMAGE_FSTYPES', 'container', 'image-container', '', d)}"
20IMGCLASSES += "image_types_wic"
21IMGCLASSES += "rootfs-postcommands"
22IMGCLASSES += "image-postinst-intercepts"
23IMGCLASSES += "overlayfs-etc"
24inherit ${IMGCLASSES}
25
26TOOLCHAIN_TARGET_TASK += "${PACKAGE_INSTALL}"
27TOOLCHAIN_TARGET_TASK_ATTEMPTONLY += "${PACKAGE_INSTALL_ATTEMPTONLY}"
28POPULATE_SDK_POST_TARGET_COMMAND += "rootfs_sysroot_relativelinks; "
29
30LICENSE ?= "MIT"
31PACKAGES = ""
32DEPENDS += "${@' '.join(["%s-qemuwrapper-cross" % m for m in d.getVar("MULTILIB_VARIANTS").split()])} qemuwrapper-cross depmodwrapper-cross cross-localedef-native"
33RDEPENDS += "${PACKAGE_INSTALL} ${LINGUAS_INSTALL} ${IMAGE_INSTALL_DEBUGFS}"
34RRECOMMENDS += "${PACKAGE_INSTALL_ATTEMPTONLY}"
35PATH:prepend = "${@":".join(all_multilib_tune_values(d, 'STAGING_BINDIR_CROSS').split())}:"
36
37INHIBIT_DEFAULT_DEPS = "1"
38
39# IMAGE_FEATURES may contain any available package group
40IMAGE_FEATURES ?= ""
41IMAGE_FEATURES[type] = "list"
42IMAGE_FEATURES[validitems] += "debug-tweaks read-only-rootfs read-only-rootfs-delayed-postinsts stateless-rootfs empty-root-password allow-empty-password allow-root-login post-install-logging overlayfs-etc"
43
44# Generate companion debugfs?
45IMAGE_GEN_DEBUGFS ?= "0"
46
47# These packages will be installed as additional into debug rootfs
48IMAGE_INSTALL_DEBUGFS ?= ""
49
50# These packages will be removed from a read-only rootfs after all other
51# packages have been installed
52ROOTFS_RO_UNNEEDED ??= "update-rc.d base-passwd shadow ${VIRTUAL-RUNTIME_update-alternatives} ${ROOTFS_BOOTSTRAP_INSTALL}"
53
54# packages to install from features
55FEATURE_INSTALL = "${@' '.join(oe.packagegroup.required_packages(oe.data.typed_value('IMAGE_FEATURES', d), d))}"
56FEATURE_INSTALL[vardepvalue] = "${FEATURE_INSTALL}"
57FEATURE_INSTALL_OPTIONAL = "${@' '.join(oe.packagegroup.optional_packages(oe.data.typed_value('IMAGE_FEATURES', d), d))}"
58FEATURE_INSTALL_OPTIONAL[vardepvalue] = "${FEATURE_INSTALL_OPTIONAL}"
59
60# Define some very basic feature package groups
61FEATURE_PACKAGES_package-management = "${ROOTFS_PKGMANAGE}"
62SPLASH ?= "${@bb.utils.contains("MACHINE_FEATURES", "screen", "psplash", "", d)}"
63FEATURE_PACKAGES_splash = "${SPLASH}"
64
65IMAGE_INSTALL_COMPLEMENTARY = '${@complementary_globs("IMAGE_FEATURES", d)}'
66
67def check_image_features(d):
68 valid_features = (d.getVarFlag('IMAGE_FEATURES', 'validitems') or "").split()
69 valid_features += d.getVarFlags('COMPLEMENTARY_GLOB').keys()
70 for var in d:
71 if var.startswith("FEATURE_PACKAGES_"):
72 valid_features.append(var[17:])
73 valid_features.sort()
74
75 features = set(oe.data.typed_value('IMAGE_FEATURES', d))
76 for feature in features:
77 if feature not in valid_features:
78 if bb.utils.contains('EXTRA_IMAGE_FEATURES', feature, True, False, d):
79 raise bb.parse.SkipRecipe("'%s' in IMAGE_FEATURES (added via EXTRA_IMAGE_FEATURES) is not a valid image feature. Valid features: %s" % (feature, ' '.join(valid_features)))
80 else:
81 raise bb.parse.SkipRecipe("'%s' in IMAGE_FEATURES is not a valid image feature. Valid features: %s" % (feature, ' '.join(valid_features)))
82
83IMAGE_INSTALL ?= ""
84IMAGE_INSTALL[type] = "list"
85export PACKAGE_INSTALL ?= "${IMAGE_INSTALL} ${ROOTFS_BOOTSTRAP_INSTALL} ${FEATURE_INSTALL}"
86PACKAGE_INSTALL_ATTEMPTONLY ?= "${FEATURE_INSTALL_OPTIONAL}"
87
88IMGDEPLOYDIR = "${WORKDIR}/deploy-${PN}-image-complete"
89
90# Images are generally built explicitly, do not need to be part of world.
91EXCLUDE_FROM_WORLD = "1"
92
93USE_DEVFS ?= "1"
94USE_DEPMOD ?= "1"
95
96PID = "${@os.getpid()}"
97
98PACKAGE_ARCH = "${MACHINE_ARCH}"
99
100LDCONFIGDEPEND ?= "ldconfig-native:do_populate_sysroot"
101LDCONFIGDEPEND:libc-musl = ""
102
103# This is needed to have depmod data in PKGDATA_DIR,
104# but if you're building small initramfs image
105# e.g. to include it in your kernel, you probably
106# don't want this dependency, which is causing dependency loop
107KERNELDEPMODDEPEND ?= "virtual/kernel:do_packagedata"
108
109do_rootfs[depends] += " \
110 makedevs-native:do_populate_sysroot virtual/fakeroot-native:do_populate_sysroot ${LDCONFIGDEPEND} \
111 virtual/update-alternatives-native:do_populate_sysroot update-rc.d-native:do_populate_sysroot \
112 ${KERNELDEPMODDEPEND} \
113"
114do_rootfs[recrdeptask] += "do_packagedata"
115
116def rootfs_command_variables(d):
117 return ['ROOTFS_POSTPROCESS_COMMAND','ROOTFS_PREPROCESS_COMMAND','ROOTFS_POSTINSTALL_COMMAND','ROOTFS_POSTUNINSTALL_COMMAND','OPKG_PREPROCESS_COMMANDS','OPKG_POSTPROCESS_COMMANDS','IMAGE_POSTPROCESS_COMMAND',
118 'IMAGE_PREPROCESS_COMMAND','RPM_PREPROCESS_COMMANDS','RPM_POSTPROCESS_COMMANDS','DEB_PREPROCESS_COMMANDS','DEB_POSTPROCESS_COMMANDS']
119
120python () {
121 variables = rootfs_command_variables(d)
122 for var in variables:
123 if d.getVar(var, False):
124 d.setVarFlag(var, 'func', '1')
125}
126
127def rootfs_variables(d):
128 from oe.rootfs import variable_depends
129 variables = ['IMAGE_DEVICE_TABLE','IMAGE_DEVICE_TABLES','BUILD_IMAGES_FROM_FEEDS','IMAGE_TYPES_MASKED','IMAGE_ROOTFS_ALIGNMENT','IMAGE_OVERHEAD_FACTOR','IMAGE_ROOTFS_SIZE','IMAGE_ROOTFS_EXTRA_SPACE',
130 'IMAGE_ROOTFS_MAXSIZE','IMAGE_NAME','IMAGE_LINK_NAME','IMAGE_MANIFEST','DEPLOY_DIR_IMAGE','IMAGE_FSTYPES','IMAGE_INSTALL_COMPLEMENTARY','IMAGE_LINGUAS', 'IMAGE_LINGUAS_COMPLEMENTARY', 'IMAGE_LOCALES_ARCHIVE',
131 'MULTILIBRE_ALLOW_REP','MULTILIB_TEMP_ROOTFS','MULTILIB_VARIANTS','MULTILIBS','ALL_MULTILIB_PACKAGE_ARCHS','MULTILIB_GLOBAL_VARIANTS','BAD_RECOMMENDATIONS','NO_RECOMMENDATIONS',
132 'PACKAGE_ARCHS','PACKAGE_CLASSES','TARGET_VENDOR','TARGET_ARCH','TARGET_OS','OVERRIDES','BBEXTENDVARIANT','FEED_DEPLOYDIR_BASE_URI','INTERCEPT_DIR','USE_DEVFS',
133 'CONVERSIONTYPES', 'IMAGE_GEN_DEBUGFS', 'ROOTFS_RO_UNNEEDED', 'IMGDEPLOYDIR', 'PACKAGE_EXCLUDE_COMPLEMENTARY', 'REPRODUCIBLE_TIMESTAMP_ROOTFS', 'IMAGE_INSTALL_DEBUGFS']
134 variables.extend(rootfs_command_variables(d))
135 variables.extend(variable_depends(d))
136 return " ".join(variables)
137
138do_rootfs[vardeps] += "${@rootfs_variables(d)}"
139
140# This is needed to have kernel image in DEPLOY_DIR.
141# This follows many common usecases and user expectations.
142# But if you are building an image which doesn't need the kernel image at all,
143# you can unset this variable manually.
144KERNEL_DEPLOY_DEPEND ?= "virtual/kernel:do_deploy"
145do_build[depends] += "${KERNEL_DEPLOY_DEPEND}"
146
147
148python () {
149 def extraimage_getdepends(task):
150 deps = ""
151 for dep in (d.getVar('EXTRA_IMAGEDEPENDS') or "").split():
152 if ":" in dep:
153 deps += " %s " % (dep)
154 else:
155 deps += " %s:%s" % (dep, task)
156 return deps
157
158 d.appendVarFlag('do_image_complete', 'depends', extraimage_getdepends('do_populate_sysroot'))
159
160 deps = " " + imagetypes_getdepends(d)
161 d.appendVarFlag('do_rootfs', 'depends', deps)
162
163 #process IMAGE_FEATURES, we must do this before runtime_mapping_rename
164 #Check for replaces image features
165 features = set(oe.data.typed_value('IMAGE_FEATURES', d))
166 remain_features = features.copy()
167 for feature in features:
168 replaces = set((d.getVar("IMAGE_FEATURES_REPLACES_%s" % feature) or "").split())
169 remain_features -= replaces
170
171 #Check for conflict image features
172 for feature in remain_features:
173 conflicts = set((d.getVar("IMAGE_FEATURES_CONFLICTS_%s" % feature) or "").split())
174 temp = conflicts & remain_features
175 if temp:
176 bb.fatal("%s contains conflicting IMAGE_FEATURES %s %s" % (d.getVar('PN'), feature, ' '.join(list(temp))))
177
178 d.setVar('IMAGE_FEATURES', ' '.join(sorted(list(remain_features))))
179
180 check_image_features(d)
181}
182
183IMAGE_POSTPROCESS_COMMAND ?= ""
184
185# some default locales
186IMAGE_LINGUAS ?= "de-de fr-fr en-gb"
187
188LINGUAS_INSTALL ?= "${@" ".join(map(lambda s: "locale-base-%s" % s, d.getVar('IMAGE_LINGUAS').split()))}"
189
190# per default create a locale archive
191IMAGE_LOCALES_ARCHIVE ?= '1'
192
193# Prefer image, but use the fallback files for lookups if the image ones
194# aren't yet available.
195PSEUDO_PASSWD = "${IMAGE_ROOTFS}:${STAGING_DIR_NATIVE}"
196
197PSEUDO_IGNORE_PATHS .= ",${WORKDIR}/intercept_scripts,${WORKDIR}/oe-rootfs-repo,${WORKDIR}/sstate-build-image_complete"
198
199PACKAGE_EXCLUDE ??= ""
200PACKAGE_EXCLUDE[type] = "list"
201
202fakeroot python do_rootfs () {
203 from oe.rootfs import create_rootfs
204 from oe.manifest import create_manifest
205 import logging
206
207 logger = d.getVar('BB_TASK_LOGGER', False)
208 if logger:
209 logcatcher = bb.utils.LogCatcher()
210 logger.addHandler(logcatcher)
211 else:
212 logcatcher = None
213
214 # NOTE: if you add, remove or significantly refactor the stages of this
215 # process then you should recalculate the weightings here. This is quite
216 # easy to do - just change the MultiStageProgressReporter line temporarily
217 # to pass debug=True as the last parameter and you'll get a printout of
218 # the weightings as well as a map to the lines where next_stage() was
219 # called. Of course this isn't critical, but it helps to keep the progress
220 # reporting accurate.
221 stage_weights = [1, 203, 354, 186, 65, 4228, 1, 353, 49, 330, 382, 23, 1]
222 progress_reporter = bb.progress.MultiStageProgressReporter(d, stage_weights)
223 progress_reporter.next_stage()
224
225 # Handle package exclusions
226 excl_pkgs = d.getVar("PACKAGE_EXCLUDE").split()
227 inst_pkgs = d.getVar("PACKAGE_INSTALL").split()
228 inst_attempt_pkgs = d.getVar("PACKAGE_INSTALL_ATTEMPTONLY").split()
229
230 d.setVar('PACKAGE_INSTALL_ORIG', ' '.join(inst_pkgs))
231 d.setVar('PACKAGE_INSTALL_ATTEMPTONLY', ' '.join(inst_attempt_pkgs))
232
233 for pkg in excl_pkgs:
234 if pkg in inst_pkgs:
235 bb.warn("Package %s, set to be excluded, is in %s PACKAGE_INSTALL (%s). It will be removed from the list." % (pkg, d.getVar('PN'), inst_pkgs))
236 inst_pkgs.remove(pkg)
237
238 if pkg in inst_attempt_pkgs:
239 bb.warn("Package %s, set to be excluded, is in %s PACKAGE_INSTALL_ATTEMPTONLY (%s). It will be removed from the list." % (pkg, d.getVar('PN'), inst_pkgs))
240 inst_attempt_pkgs.remove(pkg)
241
242 d.setVar("PACKAGE_INSTALL", ' '.join(inst_pkgs))
243 d.setVar("PACKAGE_INSTALL_ATTEMPTONLY", ' '.join(inst_attempt_pkgs))
244
245 # Ensure we handle package name remapping
246 # We have to delay the runtime_mapping_rename until just before rootfs runs
247 # otherwise, the multilib renaming could step in and squash any fixups that
248 # may have occurred.
249 pn = d.getVar('PN')
250 runtime_mapping_rename("PACKAGE_INSTALL", pn, d)
251 runtime_mapping_rename("PACKAGE_INSTALL_ATTEMPTONLY", pn, d)
252 runtime_mapping_rename("BAD_RECOMMENDATIONS", pn, d)
253
254 # Generate the initial manifest
255 create_manifest(d)
256
257 progress_reporter.next_stage()
258
259 # generate rootfs
260 d.setVarFlag('REPRODUCIBLE_TIMESTAMP_ROOTFS', 'export', '1')
261 create_rootfs(d, progress_reporter=progress_reporter, logcatcher=logcatcher)
262
263 progress_reporter.finish()
264}
265do_rootfs[dirs] = "${TOPDIR}"
266do_rootfs[cleandirs] += "${IMAGE_ROOTFS} ${IMGDEPLOYDIR} ${S}"
267do_rootfs[file-checksums] += "${POSTINST_INTERCEPT_CHECKSUMS}"
268addtask rootfs after do_prepare_recipe_sysroot
269
270fakeroot python do_image () {
271 from oe.utils import execute_pre_post_process
272
273 d.setVarFlag('REPRODUCIBLE_TIMESTAMP_ROOTFS', 'export', '1')
274 pre_process_cmds = d.getVar("IMAGE_PREPROCESS_COMMAND")
275
276 execute_pre_post_process(d, pre_process_cmds)
277}
278do_image[dirs] = "${TOPDIR}"
279addtask do_image after do_rootfs
280
281fakeroot python do_image_complete () {
282 from oe.utils import execute_pre_post_process
283
284 post_process_cmds = d.getVar("IMAGE_POSTPROCESS_COMMAND")
285
286 execute_pre_post_process(d, post_process_cmds)
287}
288do_image_complete[dirs] = "${TOPDIR}"
289SSTATETASKS += "do_image_complete"
290SSTATE_SKIP_CREATION:task-image-complete = '1'
291do_image_complete[sstate-inputdirs] = "${IMGDEPLOYDIR}"
292do_image_complete[sstate-outputdirs] = "${DEPLOY_DIR_IMAGE}"
293do_image_complete[stamp-extra-info] = "${MACHINE_ARCH}"
294addtask do_image_complete after do_image before do_build
295python do_image_complete_setscene () {
296 sstate_setscene(d)
297}
298addtask do_image_complete_setscene
299
300# Add image-level QA/sanity checks to IMAGE_QA_COMMANDS
301#
302# IMAGE_QA_COMMANDS += " \
303# image_check_everything_ok \
304# "
305# This task runs all functions in IMAGE_QA_COMMANDS after the rootfs
306# construction has completed in order to validate the resulting image.
307#
308# The functions should use ${IMAGE_ROOTFS} to find the unpacked rootfs
309# directory, which if QA passes will be the basis for the images.
310fakeroot python do_image_qa () {
311 from oe.utils import ImageQAFailed
312
313 qa_cmds = (d.getVar('IMAGE_QA_COMMANDS') or '').split()
314 qamsg = ""
315
316 for cmd in qa_cmds:
317 try:
318 bb.build.exec_func(cmd, d)
319 except oe.utils.ImageQAFailed as e:
320 qamsg = qamsg + '\tImage QA function %s failed: %s\n' % (e.name, e.description)
321 except Exception as e:
322 qamsg = qamsg + '\tImage QA function %s failed\n' % cmd
323
324 if qamsg:
325 imgname = d.getVar('IMAGE_NAME')
326 bb.fatal("QA errors found whilst validating image: %s\n%s" % (imgname, qamsg))
327}
328addtask do_image_qa after do_rootfs before do_image
329
330SSTATETASKS += "do_image_qa"
331SSTATE_SKIP_CREATION:task-image-qa = '1'
332do_image_qa[sstate-inputdirs] = ""
333do_image_qa[sstate-outputdirs] = ""
334python do_image_qa_setscene () {
335 sstate_setscene(d)
336}
337addtask do_image_qa_setscene
338
339def setup_debugfs_variables(d):
340 d.appendVar('IMAGE_ROOTFS', '-dbg')
341 if d.getVar('IMAGE_LINK_NAME'):
342 d.appendVar('IMAGE_LINK_NAME', '-dbg')
343 d.appendVar('IMAGE_NAME','-dbg')
344 d.setVar('IMAGE_BUILDING_DEBUGFS', 'true')
345 debugfs_image_fstypes = d.getVar('IMAGE_FSTYPES_DEBUGFS')
346 if debugfs_image_fstypes:
347 d.setVar('IMAGE_FSTYPES', debugfs_image_fstypes)
348
349python setup_debugfs () {
350 setup_debugfs_variables(d)
351}
352
353python () {
354 vardeps = set()
355 # We allow CONVERSIONTYPES to have duplicates. That avoids breaking
356 # derived distros when OE-core or some other layer independently adds
357 # the same type. There is still only one command for each type, but
358 # presumably the commands will do the same when the type is the same,
359 # even when added in different places.
360 #
361 # Without de-duplication, gen_conversion_cmds() below
362 # would create the same compression command multiple times.
363 ctypes = set(d.getVar('CONVERSIONTYPES').split())
364 old_overrides = d.getVar('OVERRIDES', False)
365
366 def _image_base_type(type):
367 basetype = type
368 for ctype in ctypes:
369 if type.endswith("." + ctype):
370 basetype = type[:-len("." + ctype)]
371 break
372
373 if basetype != type:
374 # New base type itself might be generated by a conversion command.
375 basetype = _image_base_type(basetype)
376
377 return basetype
378
379 basetypes = {}
380 alltypes = d.getVar('IMAGE_FSTYPES').split()
381 typedeps = {}
382
383 if d.getVar('IMAGE_GEN_DEBUGFS') == "1":
384 debugfs_fstypes = d.getVar('IMAGE_FSTYPES_DEBUGFS').split()
385 for t in debugfs_fstypes:
386 alltypes.append("debugfs_" + t)
387
388 def _add_type(t):
389 baset = _image_base_type(t)
390 input_t = t
391 if baset not in basetypes:
392 basetypes[baset]= []
393 if t not in basetypes[baset]:
394 basetypes[baset].append(t)
395 debug = ""
396 if t.startswith("debugfs_"):
397 t = t[8:]
398 debug = "debugfs_"
399 deps = (d.getVar('IMAGE_TYPEDEP:' + t) or "").split()
400 vardeps.add('IMAGE_TYPEDEP:' + t)
401 if baset not in typedeps:
402 typedeps[baset] = set()
403 deps = [debug + dep for dep in deps]
404 for dep in deps:
405 if dep not in alltypes:
406 alltypes.append(dep)
407 _add_type(dep)
408 basedep = _image_base_type(dep)
409 typedeps[baset].add(basedep)
410
411 if baset != input_t:
412 _add_type(baset)
413
414 for t in alltypes[:]:
415 _add_type(t)
416
417 d.appendVarFlag('do_image', 'vardeps', ' '.join(vardeps))
418
419 maskedtypes = (d.getVar('IMAGE_TYPES_MASKED') or "").split()
420 maskedtypes = [dbg + t for t in maskedtypes for dbg in ("", "debugfs_")]
421
422 for t in basetypes:
423 vardeps = set()
424 cmds = []
425 subimages = []
426 realt = t
427
428 if t in maskedtypes:
429 continue
430
431 localdata = bb.data.createCopy(d)
432 debug = ""
433 if t.startswith("debugfs_"):
434 setup_debugfs_variables(localdata)
435 debug = "setup_debugfs "
436 realt = t[8:]
437 localdata.setVar('OVERRIDES', '%s:%s' % (realt, old_overrides))
438 localdata.setVar('type', realt)
439 # Delete DATETIME so we don't expand any references to it now
440 # This means the task's hash can be stable rather than having hardcoded
441 # date/time values. It will get expanded at execution time.
442 # Similarly TMPDIR since otherwise we see QA stamp comparision problems
443 # Expand PV else it can trigger get_srcrev which can fail due to these variables being unset
444 localdata.setVar('PV', d.getVar('PV'))
445 localdata.delVar('DATETIME')
446 localdata.delVar('DATE')
447 localdata.delVar('TMPDIR')
448 localdata.delVar('IMAGE_VERSION_SUFFIX')
449 vardepsexclude = (d.getVarFlag('IMAGE_CMD:' + realt, 'vardepsexclude', True) or '').split()
450 for dep in vardepsexclude:
451 localdata.delVar(dep)
452
453 image_cmd = localdata.getVar("IMAGE_CMD")
454 vardeps.add('IMAGE_CMD:' + realt)
455 if image_cmd:
456 cmds.append("\t" + image_cmd)
457 else:
458 bb.fatal("No IMAGE_CMD defined for IMAGE_FSTYPES entry '%s' - possibly invalid type name or missing support class" % t)
459 cmds.append(localdata.expand("\tcd ${IMGDEPLOYDIR}"))
460
461 # Since a copy of IMAGE_CMD:xxx will be inlined within do_image_xxx,
462 # prevent a redundant copy of IMAGE_CMD:xxx being emitted as a function.
463 d.delVarFlag('IMAGE_CMD:' + realt, 'func')
464
465 rm_tmp_images = set()
466 def gen_conversion_cmds(bt):
467 for ctype in sorted(ctypes):
468 if bt.endswith("." + ctype):
469 type = bt[0:-len(ctype) - 1]
470 if type.startswith("debugfs_"):
471 type = type[8:]
472 # Create input image first.
473 gen_conversion_cmds(type)
474 localdata.setVar('type', type)
475 cmd = "\t" + localdata.getVar("CONVERSION_CMD:" + ctype)
476 if cmd not in cmds:
477 cmds.append(cmd)
478 vardeps.add('CONVERSION_CMD:' + ctype)
479 subimage = type + "." + ctype
480 if subimage not in subimages:
481 subimages.append(subimage)
482 if type not in alltypes:
483 rm_tmp_images.add(localdata.expand("${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"))
484
485 for bt in basetypes[t]:
486 gen_conversion_cmds(bt)
487
488 localdata.setVar('type', realt)
489 if t not in alltypes:
490 rm_tmp_images.add(localdata.expand("${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"))
491 else:
492 subimages.append(realt)
493
494 # Clean up after applying all conversion commands. Some of them might
495 # use the same input, therefore we cannot delete sooner without applying
496 # some complex dependency analysis.
497 for image in sorted(rm_tmp_images):
498 cmds.append("\trm " + image)
499
500 after = 'do_image'
501 for dep in typedeps[t]:
502 after += ' do_image_%s' % dep.replace("-", "_").replace(".", "_")
503
504 task = "do_image_%s" % t.replace("-", "_").replace(".", "_")
505
506 d.setVar(task, '\n'.join(cmds))
507 d.setVarFlag(task, 'func', '1')
508 d.setVarFlag(task, 'fakeroot', '1')
509
510 d.appendVarFlag(task, 'prefuncs', ' ' + debug + ' set_image_size')
511 d.prependVarFlag(task, 'postfuncs', 'create_symlinks ')
512 d.appendVarFlag(task, 'subimages', ' ' + ' '.join(subimages))
513 d.appendVarFlag(task, 'vardeps', ' ' + ' '.join(vardeps))
514 d.appendVarFlag(task, 'vardepsexclude', ' DATETIME DATE ' + ' '.join(vardepsexclude))
515
516 bb.debug(2, "Adding task %s before %s, after %s" % (task, 'do_image_complete', after))
517 bb.build.addtask(task, 'do_image_complete', after, d)
518}
519
520#
521# Compute the rootfs size
522#
523def get_rootfs_size(d):
524 import subprocess, oe.utils
525
526 rootfs_alignment = int(d.getVar('IMAGE_ROOTFS_ALIGNMENT'))
527 overhead_factor = float(d.getVar('IMAGE_OVERHEAD_FACTOR'))
528 rootfs_req_size = int(d.getVar('IMAGE_ROOTFS_SIZE'))
529 rootfs_extra_space = eval(d.getVar('IMAGE_ROOTFS_EXTRA_SPACE'))
530 rootfs_maxsize = d.getVar('IMAGE_ROOTFS_MAXSIZE')
531 image_fstypes = d.getVar('IMAGE_FSTYPES') or ''
532 initramfs_fstypes = d.getVar('INITRAMFS_FSTYPES') or ''
533 initramfs_maxsize = d.getVar('INITRAMFS_MAXSIZE')
534
535 size_kb = oe.utils.directory_size(d.getVar("IMAGE_ROOTFS")) / 1024
536
537 base_size = size_kb * overhead_factor
538 bb.debug(1, '%f = %d * %f' % (base_size, size_kb, overhead_factor))
539 base_size2 = max(base_size, rootfs_req_size) + rootfs_extra_space
540 bb.debug(1, '%f = max(%f, %d)[%f] + %d' % (base_size2, base_size, rootfs_req_size, max(base_size, rootfs_req_size), rootfs_extra_space))
541
542 base_size = base_size2
543 if base_size != int(base_size):
544 base_size = int(base_size + 1)
545 else:
546 base_size = int(base_size)
547 bb.debug(1, '%f = int(%f)' % (base_size, base_size2))
548
549 base_size_saved = base_size
550 base_size += rootfs_alignment - 1
551 base_size -= base_size % rootfs_alignment
552 bb.debug(1, '%d = aligned(%d)' % (base_size, base_size_saved))
553
554 # Do not check image size of the debugfs image. This is not supposed
555 # to be deployed, etc. so it doesn't make sense to limit the size
556 # of the debug.
557 if (d.getVar('IMAGE_BUILDING_DEBUGFS') or "") == "true":
558 bb.debug(1, 'returning debugfs size %d' % (base_size))
559 return base_size
560
561 # Check the rootfs size against IMAGE_ROOTFS_MAXSIZE (if set)
562 if rootfs_maxsize:
563 rootfs_maxsize_int = int(rootfs_maxsize)
564 if base_size > rootfs_maxsize_int:
565 bb.fatal("The rootfs size %d(K) exceeds IMAGE_ROOTFS_MAXSIZE: %d(K)" % \
566 (base_size, rootfs_maxsize_int))
567
568 # Check the initramfs size against INITRAMFS_MAXSIZE (if set)
569 if image_fstypes == initramfs_fstypes != '' and initramfs_maxsize:
570 initramfs_maxsize_int = int(initramfs_maxsize)
571 if base_size > initramfs_maxsize_int:
572 bb.error("The initramfs size %d(K) exceeds INITRAMFS_MAXSIZE: %d(K)" % \
573 (base_size, initramfs_maxsize_int))
574 bb.error("You can set INITRAMFS_MAXSIZE a larger value. Usually, it should")
575 bb.fatal("be less than 1/2 of ram size, or you may fail to boot it.\n")
576
577 bb.debug(1, 'returning %d' % (base_size))
578 return base_size
579
580python set_image_size () {
581 rootfs_size = get_rootfs_size(d)
582 d.setVar('ROOTFS_SIZE', str(rootfs_size))
583 d.setVarFlag('ROOTFS_SIZE', 'export', '1')
584}
585
586#
587# Create symlinks to the newly created image
588#
589python create_symlinks() {
590
591 deploy_dir = d.getVar('IMGDEPLOYDIR')
592 img_name = d.getVar('IMAGE_NAME')
593 link_name = d.getVar('IMAGE_LINK_NAME')
594 manifest_name = d.getVar('IMAGE_MANIFEST')
595 taskname = d.getVar("BB_CURRENTTASK")
596 subimages = (d.getVarFlag("do_" + taskname, 'subimages', False) or "").split()
597 imgsuffix = d.getVarFlag("do_" + taskname, 'imgsuffix') or d.expand("${IMAGE_NAME_SUFFIX}.")
598
599 if not link_name:
600 return
601 for type in subimages:
602 dst = os.path.join(deploy_dir, link_name + "." + type)
603 src = img_name + imgsuffix + type
604 if os.path.exists(os.path.join(deploy_dir, src)):
605 bb.note("Creating symlink: %s -> %s" % (dst, src))
606 if os.path.islink(dst):
607 os.remove(dst)
608 os.symlink(src, dst)
609 else:
610 bb.note("Skipping symlink, source does not exist: %s -> %s" % (dst, src))
611}
612
613MULTILIBRE_ALLOW_REP =. "${base_bindir}|${base_sbindir}|${bindir}|${sbindir}|${libexecdir}|${sysconfdir}|${nonarch_base_libdir}/udev|/lib/modules/[^/]*/modules.*|"
614MULTILIB_CHECK_FILE = "${WORKDIR}/multilib_check.py"
615MULTILIB_TEMP_ROOTFS = "${WORKDIR}/multilib"
616
617do_fetch[noexec] = "1"
618do_unpack[noexec] = "1"
619do_patch[noexec] = "1"
620do_configure[noexec] = "1"
621do_compile[noexec] = "1"
622do_install[noexec] = "1"
623deltask do_populate_lic
624deltask do_populate_sysroot
625do_package[noexec] = "1"
626deltask do_package_qa
627deltask do_packagedata
628deltask do_package_write_ipk
629deltask do_package_write_deb
630deltask do_package_write_rpm
631
632# Prepare the root links to point to the /usr counterparts.
633create_merged_usr_symlinks() {
634 root="$1"
635 install -d $root${base_bindir} $root${base_sbindir} $root${base_libdir}
636 ln -rs $root${base_bindir} $root/bin
637 ln -rs $root${base_sbindir} $root/sbin
638 ln -rs $root${base_libdir} $root/${baselib}
639
640 if [ "${nonarch_base_libdir}" != "${base_libdir}" ]; then
641 install -d $root${nonarch_base_libdir}
642 ln -rs $root${nonarch_base_libdir} $root/lib
643 fi
644
645 # create base links for multilibs
646 multi_libdirs="${@d.getVar('MULTILIB_VARIANTS')}"
647 for d in $multi_libdirs; do
648 install -d $root${exec_prefix}/$d
649 ln -rs $root${exec_prefix}/$d $root/$d
650 done
651}
652
653create_merged_usr_symlinks_rootfs() {
654 create_merged_usr_symlinks ${IMAGE_ROOTFS}
655}
656
657create_merged_usr_symlinks_sdk() {
658 create_merged_usr_symlinks ${SDK_OUTPUT}${SDKTARGETSYSROOT}
659}
660
661ROOTFS_PREPROCESS_COMMAND += "${@bb.utils.contains('DISTRO_FEATURES', 'usrmerge', 'create_merged_usr_symlinks_rootfs; ', '',d)}"
662POPULATE_SDK_PRE_TARGET_COMMAND += "${@bb.utils.contains('DISTRO_FEATURES', 'usrmerge', 'create_merged_usr_symlinks_sdk; ', '',d)}"
663
664reproducible_final_image_task () {
665 if [ "$REPRODUCIBLE_TIMESTAMP_ROOTFS" = "" ]; then
666 REPRODUCIBLE_TIMESTAMP_ROOTFS=`git -C "${COREBASE}" log -1 --pretty=%ct 2>/dev/null` || true
667 if [ "$REPRODUCIBLE_TIMESTAMP_ROOTFS" = "" ]; then
668 REPRODUCIBLE_TIMESTAMP_ROOTFS=`stat -c%Y ${@bb.utils.which(d.getVar("BBPATH"), "conf/bitbake.conf")}`
669 fi
670 fi
671 # Set mtime of all files to a reproducible value
672 bbnote "reproducible_final_image_task: mtime set to $REPRODUCIBLE_TIMESTAMP_ROOTFS"
673 find ${IMAGE_ROOTFS} -print0 | xargs -0 touch -h --date=@$REPRODUCIBLE_TIMESTAMP_ROOTFS
674}
675
676systemd_preset_all () {
677 if [ -e ${IMAGE_ROOTFS}${root_prefix}/lib/systemd/systemd ]; then
678 systemctl --root="${IMAGE_ROOTFS}" --preset-mode=enable-only preset-all
679 fi
680}
681
682IMAGE_PREPROCESS_COMMAND:append = " ${@ 'systemd_preset_all;' if bb.utils.contains('DISTRO_FEATURES', 'systemd', True, False, d) and not bb.utils.contains('IMAGE_FEATURES', 'stateless-rootfs', True, False, d) else ''} reproducible_final_image_task; "
683
684CVE_PRODUCT = ""
diff --git a/meta/classes-recipe/image_types.bbclass b/meta/classes-recipe/image_types.bbclass
new file mode 100644
index 0000000000..a731e585b2
--- /dev/null
+++ b/meta/classes-recipe/image_types.bbclass
@@ -0,0 +1,355 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# The default aligment of the size of the rootfs is set to 1KiB. In case
8# you're using the SD card emulation of a QEMU system simulator you may
9# set this value to 2048 (2MiB alignment).
10IMAGE_ROOTFS_ALIGNMENT ?= "1"
11
12def imagetypes_getdepends(d):
13 def adddep(depstr, deps):
14 for d in (depstr or "").split():
15 # Add task dependency if not already present
16 if ":" not in d:
17 d += ":do_populate_sysroot"
18 deps.add(d)
19
20 # Take a type in the form of foo.bar.car and split it into the items
21 # needed for the image deps "foo", and the conversion deps ["bar", "car"]
22 def split_types(typestring):
23 types = typestring.split(".")
24 return types[0], types[1:]
25
26 fstypes = set((d.getVar('IMAGE_FSTYPES') or "").split())
27 fstypes |= set((d.getVar('IMAGE_FSTYPES_DEBUGFS') or "").split())
28
29 deprecated = set()
30 deps = set()
31 for typestring in fstypes:
32 basetype, resttypes = split_types(typestring)
33
34 var = "IMAGE_DEPENDS_%s" % basetype
35 if d.getVar(var) is not None:
36 deprecated.add(var)
37
38 for typedepends in (d.getVar("IMAGE_TYPEDEP:%s" % basetype) or "").split():
39 base, rest = split_types(typedepends)
40 resttypes += rest
41
42 var = "IMAGE_DEPENDS_%s" % base
43 if d.getVar(var) is not None:
44 deprecated.add(var)
45
46 for ctype in resttypes:
47 adddep(d.getVar("CONVERSION_DEPENDS_%s" % ctype), deps)
48 adddep(d.getVar("COMPRESS_DEPENDS_%s" % ctype), deps)
49
50 if deprecated:
51 bb.fatal('Deprecated variable(s) found: "%s". '
52 'Use do_image_<type>[depends] += "<recipe>:<task>" instead' % ', '.join(deprecated))
53
54 # Sort the set so that ordering is consistant
55 return " ".join(sorted(deps))
56
57XZ_COMPRESSION_LEVEL ?= "-9"
58XZ_INTEGRITY_CHECK ?= "crc32"
59
60ZIP_COMPRESSION_LEVEL ?= "-9"
61
62ZSTD_COMPRESSION_LEVEL ?= "-3"
63
64JFFS2_SUM_EXTRA_ARGS ?= ""
65IMAGE_CMD:jffs2 = "mkfs.jffs2 --root=${IMAGE_ROOTFS} --faketime --output=${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.jffs2 ${EXTRA_IMAGECMD}"
66
67IMAGE_CMD:cramfs = "mkfs.cramfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.cramfs ${EXTRA_IMAGECMD}"
68
69oe_mkext234fs () {
70 fstype=$1
71 extra_imagecmd=""
72
73 if [ $# -gt 1 ]; then
74 shift
75 extra_imagecmd=$@
76 fi
77
78 # If generating an empty image the size of the sparse block should be large
79 # enough to allocate an ext4 filesystem using 4096 bytes per inode, this is
80 # about 60K, so dd needs a minimum count of 60, with bs=1024 (bytes per IO)
81 eval local COUNT=\"0\"
82 eval local MIN_COUNT=\"60\"
83 if [ $ROOTFS_SIZE -lt $MIN_COUNT ]; then
84 eval COUNT=\"$MIN_COUNT\"
85 fi
86 # Create a sparse image block
87 bbdebug 1 Executing "dd if=/dev/zero of=${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.$fstype seek=$ROOTFS_SIZE count=$COUNT bs=1024"
88 dd if=/dev/zero of=${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.$fstype seek=$ROOTFS_SIZE count=$COUNT bs=1024
89 bbdebug 1 "Actual Rootfs size: `du -s ${IMAGE_ROOTFS}`"
90 bbdebug 1 "Actual Partition size: `stat -c '%s' ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.$fstype`"
91 bbdebug 1 Executing "mkfs.$fstype -F $extra_imagecmd ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.$fstype -d ${IMAGE_ROOTFS}"
92 mkfs.$fstype -F $extra_imagecmd ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.$fstype -d ${IMAGE_ROOTFS}
93 # Error codes 0-3 indicate successfull operation of fsck (no errors or errors corrected)
94 fsck.$fstype -pvfD ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.$fstype || [ $? -le 3 ]
95}
96
97IMAGE_CMD:ext2 = "oe_mkext234fs ext2 ${EXTRA_IMAGECMD}"
98IMAGE_CMD:ext3 = "oe_mkext234fs ext3 ${EXTRA_IMAGECMD}"
99IMAGE_CMD:ext4 = "oe_mkext234fs ext4 ${EXTRA_IMAGECMD}"
100
101MIN_BTRFS_SIZE ?= "16384"
102IMAGE_CMD:btrfs () {
103 size=${ROOTFS_SIZE}
104 if [ ${size} -lt ${MIN_BTRFS_SIZE} ] ; then
105 size=${MIN_BTRFS_SIZE}
106 bbwarn "Rootfs size is too small for BTRFS. Filesystem will be extended to ${size}K"
107 fi
108 dd if=/dev/zero of=${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.btrfs seek=${size} count=0 bs=1024
109 mkfs.btrfs ${EXTRA_IMAGECMD} -r ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.btrfs
110}
111
112IMAGE_CMD:squashfs = "mksquashfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.squashfs ${EXTRA_IMAGECMD} -noappend"
113IMAGE_CMD:squashfs-xz = "mksquashfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.squashfs-xz ${EXTRA_IMAGECMD} -noappend -comp xz"
114IMAGE_CMD:squashfs-lzo = "mksquashfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.squashfs-lzo ${EXTRA_IMAGECMD} -noappend -comp lzo"
115IMAGE_CMD:squashfs-lz4 = "mksquashfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.squashfs-lz4 ${EXTRA_IMAGECMD} -noappend -comp lz4"
116IMAGE_CMD:squashfs-zst = "mksquashfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.squashfs-zst ${EXTRA_IMAGECMD} -noappend -comp zstd"
117
118IMAGE_CMD:erofs = "mkfs.erofs ${EXTRA_IMAGECMD} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.erofs ${IMAGE_ROOTFS}"
119IMAGE_CMD:erofs-lz4 = "mkfs.erofs -zlz4 ${EXTRA_IMAGECMD} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.erofs-lz4 ${IMAGE_ROOTFS}"
120IMAGE_CMD:erofs-lz4hc = "mkfs.erofs -zlz4hc ${EXTRA_IMAGECMD} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.erofs-lz4hc ${IMAGE_ROOTFS}"
121
122
123IMAGE_CMD_TAR ?= "tar"
124# ignore return code 1 "file changed as we read it" as other tasks(e.g. do_image_wic) may be hardlinking rootfs
125IMAGE_CMD:tar = "${IMAGE_CMD_TAR} --sort=name --format=posix --numeric-owner -cf ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.tar -C ${IMAGE_ROOTFS} . || [ $? -eq 1 ]"
126
127do_image_cpio[cleandirs] += "${WORKDIR}/cpio_append"
128IMAGE_CMD:cpio () {
129 (cd ${IMAGE_ROOTFS} && find . | sort | cpio --reproducible -o -H newc >${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.cpio)
130 # We only need the /init symlink if we're building the real
131 # image. The -dbg image doesn't need it! By being clever
132 # about this we also avoid 'touch' below failing, as it
133 # might be trying to touch /sbin/init on the host since both
134 # the normal and the -dbg image share the same WORKDIR
135 if [ "${IMAGE_BUILDING_DEBUGFS}" != "true" ]; then
136 if [ ! -L ${IMAGE_ROOTFS}/init ] && [ ! -e ${IMAGE_ROOTFS}/init ]; then
137 if [ -L ${IMAGE_ROOTFS}/sbin/init ] || [ -e ${IMAGE_ROOTFS}/sbin/init ]; then
138 ln -sf /sbin/init ${WORKDIR}/cpio_append/init
139 else
140 touch ${WORKDIR}/cpio_append/init
141 fi
142 (cd ${WORKDIR}/cpio_append && echo ./init | cpio -oA -H newc -F ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.cpio)
143 fi
144 fi
145}
146
147UBI_VOLNAME ?= "${MACHINE}-rootfs"
148UBI_VOLTYPE ?= "dynamic"
149UBI_IMGTYPE ?= "ubifs"
150
151write_ubi_config() {
152 if [ -z "$1" ]; then
153 local vname=""
154 else
155 local vname="_$1"
156 fi
157
158 cat <<EOF > ubinize${vname}-${IMAGE_NAME}.cfg
159[ubifs]
160mode=ubi
161image=${IMGDEPLOYDIR}/${IMAGE_NAME}${vname}${IMAGE_NAME_SUFFIX}.${UBI_IMGTYPE}
162vol_id=0
163vol_type=${UBI_VOLTYPE}
164vol_name=${UBI_VOLNAME}
165vol_flags=autoresize
166EOF
167}
168
169multiubi_mkfs() {
170 local mkubifs_args="$1"
171 local ubinize_args="$2"
172
173 # Added prompt error message for ubi and ubifs image creation.
174 if [ -z "$mkubifs_args" ] || [ -z "$ubinize_args" ]; then
175 bbfatal "MKUBIFS_ARGS and UBINIZE_ARGS have to be set, see http://www.linux-mtd.infradead.org/faq/ubifs.html for details"
176 fi
177
178 write_ubi_config "$3"
179
180 if [ -n "$vname" ]; then
181 mkfs.ubifs -r ${IMAGE_ROOTFS} -o ${IMGDEPLOYDIR}/${IMAGE_NAME}${vname}${IMAGE_NAME_SUFFIX}.ubifs ${mkubifs_args}
182 fi
183 ubinize -o ${IMGDEPLOYDIR}/${IMAGE_NAME}${vname}${IMAGE_NAME_SUFFIX}.ubi ${ubinize_args} ubinize${vname}-${IMAGE_NAME}.cfg
184
185 # Cleanup cfg file
186 mv ubinize${vname}-${IMAGE_NAME}.cfg ${IMGDEPLOYDIR}/
187
188 # Create own symlinks for 'named' volumes
189 if [ -n "$vname" ]; then
190 cd ${IMGDEPLOYDIR}
191 if [ -e ${IMAGE_NAME}${vname}${IMAGE_NAME_SUFFIX}.ubifs ]; then
192 ln -sf ${IMAGE_NAME}${vname}${IMAGE_NAME_SUFFIX}.ubifs \
193 ${IMAGE_LINK_NAME}${vname}.ubifs
194 fi
195 if [ -e ${IMAGE_NAME}${vname}${IMAGE_NAME_SUFFIX}.ubi ]; then
196 ln -sf ${IMAGE_NAME}${vname}${IMAGE_NAME_SUFFIX}.ubi \
197 ${IMAGE_LINK_NAME}${vname}.ubi
198 fi
199 cd -
200 fi
201}
202
203IMAGE_CMD:multiubi () {
204 # Split MKUBIFS_ARGS_<name> and UBINIZE_ARGS_<name>
205 for name in ${MULTIUBI_BUILD}; do
206 eval local mkubifs_args=\"\$MKUBIFS_ARGS_${name}\"
207 eval local ubinize_args=\"\$UBINIZE_ARGS_${name}\"
208
209 multiubi_mkfs "${mkubifs_args}" "${ubinize_args}" "${name}"
210 done
211}
212
213IMAGE_CMD:ubi () {
214 multiubi_mkfs "${MKUBIFS_ARGS}" "${UBINIZE_ARGS}"
215}
216IMAGE_TYPEDEP:ubi = "${UBI_IMGTYPE}"
217
218IMAGE_CMD:ubifs = "mkfs.ubifs -r ${IMAGE_ROOTFS} -o ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.ubifs ${MKUBIFS_ARGS}"
219
220MIN_F2FS_SIZE ?= "524288"
221IMAGE_CMD:f2fs () {
222 # We need to add additional smarts here form devices smaller than 1.5G
223 # Need to scale appropriately between 40M -> 1.5G as the "overprovision
224 # ratio" goes down as the device gets bigger (70% -> 4.5%), below about
225 # 500M the standard IMAGE_OVERHEAD_FACTOR does not work, so add additional
226 # space here when under 500M
227 size=${ROOTFS_SIZE}
228 if [ ${size} -lt ${MIN_F2FS_SIZE} ] ; then
229 size=${MIN_F2FS_SIZE}
230 bbwarn "Rootfs size is too small for F2FS. Filesystem will be extended to ${size}K"
231 fi
232 dd if=/dev/zero of=${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.f2fs seek=${size} count=0 bs=1024
233 mkfs.f2fs ${EXTRA_IMAGECMD} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.f2fs
234 sload.f2fs -f ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.f2fs
235}
236
237EXTRA_IMAGECMD = ""
238
239inherit siteinfo kernel-arch image-artifact-names
240
241JFFS2_ENDIANNESS ?= "${@oe.utils.conditional('SITEINFO_ENDIANNESS', 'le', '-l', '-b', d)}"
242JFFS2_ERASEBLOCK ?= "0x40000"
243EXTRA_IMAGECMD:jffs2 ?= "--pad ${JFFS2_ENDIANNESS} --eraseblock=${JFFS2_ERASEBLOCK} --no-cleanmarkers"
244
245# Change these if you want default mkfs behavior (i.e. create minimal inode number)
246EXTRA_IMAGECMD:ext2 ?= "-i 4096"
247EXTRA_IMAGECMD:ext3 ?= "-i 4096"
248EXTRA_IMAGECMD:ext4 ?= "-i 4096"
249EXTRA_IMAGECMD:btrfs ?= "-n 4096 --shrink"
250EXTRA_IMAGECMD:f2fs ?= ""
251
252do_image_cpio[depends] += "cpio-native:do_populate_sysroot"
253do_image_jffs2[depends] += "mtd-utils-native:do_populate_sysroot"
254do_image_cramfs[depends] += "util-linux-native:do_populate_sysroot"
255do_image_ext2[depends] += "e2fsprogs-native:do_populate_sysroot"
256do_image_ext3[depends] += "e2fsprogs-native:do_populate_sysroot"
257do_image_ext4[depends] += "e2fsprogs-native:do_populate_sysroot"
258do_image_btrfs[depends] += "btrfs-tools-native:do_populate_sysroot"
259do_image_squashfs[depends] += "squashfs-tools-native:do_populate_sysroot"
260do_image_squashfs_xz[depends] += "squashfs-tools-native:do_populate_sysroot"
261do_image_squashfs_lzo[depends] += "squashfs-tools-native:do_populate_sysroot"
262do_image_squashfs_lz4[depends] += "squashfs-tools-native:do_populate_sysroot"
263do_image_squashfs_zst[depends] += "squashfs-tools-native:do_populate_sysroot"
264do_image_ubi[depends] += "mtd-utils-native:do_populate_sysroot"
265do_image_ubifs[depends] += "mtd-utils-native:do_populate_sysroot"
266do_image_multiubi[depends] += "mtd-utils-native:do_populate_sysroot"
267do_image_f2fs[depends] += "f2fs-tools-native:do_populate_sysroot"
268do_image_erofs[depends] += "erofs-utils-native:do_populate_sysroot"
269do_image_erofs_lz4[depends] += "erofs-utils-native:do_populate_sysroot"
270do_image_erofs_lz4hc[depends] += "erofs-utils-native:do_populate_sysroot"
271
272# This variable is available to request which values are suitable for IMAGE_FSTYPES
273IMAGE_TYPES = " \
274 jffs2 jffs2.sum \
275 cramfs \
276 ext2 ext2.gz ext2.bz2 ext2.lzma \
277 ext3 ext3.gz \
278 ext4 ext4.gz \
279 btrfs \
280 squashfs squashfs-xz squashfs-lzo squashfs-lz4 squashfs-zst \
281 ubi ubifs multiubi \
282 tar tar.gz tar.bz2 tar.xz tar.lz4 tar.zst \
283 cpio cpio.gz cpio.xz cpio.lzma cpio.lz4 cpio.zst \
284 wic wic.gz wic.bz2 wic.lzma wic.zst \
285 container \
286 f2fs \
287 erofs erofs-lz4 erofs-lz4hc \
288"
289# These image types are x86 specific as they need syslinux
290IMAGE_TYPES:append:x86 = " hddimg iso"
291IMAGE_TYPES:append:x86-64 = " hddimg iso"
292
293# Compression is a special case of conversion. The old variable
294# names are still supported for backward-compatibility. When defining
295# new compression or conversion commands, use CONVERSIONTYPES and
296# CONVERSION_CMD/DEPENDS.
297COMPRESSIONTYPES ?= ""
298
299CONVERSIONTYPES = "gz bz2 lzma xz lz4 lzo zip zst sum md5sum sha1sum sha224sum sha256sum sha384sum sha512sum bmap u-boot vmdk vhd vhdx vdi qcow2 base64 gzsync zsync ${COMPRESSIONTYPES}"
300CONVERSION_CMD:lzma = "lzma -k -f -7 ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
301CONVERSION_CMD:gz = "gzip -f -9 -n -c --rsyncable ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.gz"
302CONVERSION_CMD:bz2 = "pbzip2 -f -k ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
303CONVERSION_CMD:xz = "xz -f -k -c ${XZ_COMPRESSION_LEVEL} ${XZ_DEFAULTS} --check=${XZ_INTEGRITY_CHECK} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.xz"
304CONVERSION_CMD:lz4 = "lz4 -9 -z -l ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.lz4"
305CONVERSION_CMD:lzo = "lzop -9 ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
306CONVERSION_CMD:zip = "zip ${ZIP_COMPRESSION_LEVEL} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.zip ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
307CONVERSION_CMD:zst = "zstd -f -k -T0 -c ${ZSTD_COMPRESSION_LEVEL} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.zst"
308CONVERSION_CMD:sum = "sumtool -i ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} -o ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sum ${JFFS2_SUM_EXTRA_ARGS}"
309CONVERSION_CMD:md5sum = "md5sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.md5sum"
310CONVERSION_CMD:sha1sum = "sha1sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha1sum"
311CONVERSION_CMD:sha224sum = "sha224sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha224sum"
312CONVERSION_CMD:sha256sum = "sha256sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha256sum"
313CONVERSION_CMD:sha384sum = "sha384sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha384sum"
314CONVERSION_CMD:sha512sum = "sha512sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha512sum"
315CONVERSION_CMD:bmap = "bmaptool create ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} -o ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.bmap"
316CONVERSION_CMD:u-boot = "mkimage -A ${UBOOT_ARCH} -O linux -T ramdisk -C none -n ${IMAGE_NAME} -d ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.u-boot"
317CONVERSION_CMD:vmdk = "qemu-img convert -O vmdk ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.vmdk"
318CONVERSION_CMD:vhdx = "qemu-img convert -O vhdx -o subformat=dynamic ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.vhdx"
319CONVERSION_CMD:vhd = "qemu-img convert -O vpc -o subformat=fixed ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.vhd"
320CONVERSION_CMD:vdi = "qemu-img convert -O vdi ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.vdi"
321CONVERSION_CMD:qcow2 = "qemu-img convert -O qcow2 ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.qcow2"
322CONVERSION_CMD:base64 = "base64 ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.base64"
323CONVERSION_CMD:zsync = "zsyncmake_curl ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
324CONVERSION_CMD:gzsync = "zsyncmake_curl -z ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
325CONVERSION_DEPENDS_lzma = "xz-native"
326CONVERSION_DEPENDS_gz = "pigz-native"
327CONVERSION_DEPENDS_bz2 = "pbzip2-native"
328CONVERSION_DEPENDS_xz = "xz-native"
329CONVERSION_DEPENDS_lz4 = "lz4-native"
330CONVERSION_DEPENDS_lzo = "lzop-native"
331CONVERSION_DEPENDS_zip = "zip-native"
332CONVERSION_DEPENDS_zst = "zstd-native"
333CONVERSION_DEPENDS_sum = "mtd-utils-native"
334CONVERSION_DEPENDS_bmap = "bmap-tools-native"
335CONVERSION_DEPENDS_u-boot = "u-boot-tools-native"
336CONVERSION_DEPENDS_vmdk = "qemu-system-native"
337CONVERSION_DEPENDS_vdi = "qemu-system-native"
338CONVERSION_DEPENDS_qcow2 = "qemu-system-native"
339CONVERSION_DEPENDS_base64 = "coreutils-native"
340CONVERSION_DEPENDS_vhdx = "qemu-system-native"
341CONVERSION_DEPENDS_vhd = "qemu-system-native"
342CONVERSION_DEPENDS_zsync = "zsync-curl-native"
343CONVERSION_DEPENDS_gzsync = "zsync-curl-native"
344
345RUNNABLE_IMAGE_TYPES ?= "ext2 ext3 ext4"
346RUNNABLE_MACHINE_PATTERNS ?= "qemu"
347
348DEPLOYABLE_IMAGE_TYPES ?= "hddimg iso"
349
350# The IMAGE_TYPES_MASKED variable will be used to mask out from the IMAGE_FSTYPES,
351# images that will not be built at do_rootfs time: vmdk, vhd, vhdx, vdi, qcow2, hddimg, iso, etc.
352IMAGE_TYPES_MASKED ?= ""
353
354# bmap requires python3 to be in the PATH
355EXTRANATIVEPATH += "${@'python3-native' if d.getVar('IMAGE_FSTYPES').find('.bmap') else ''}"
diff --git a/meta/classes-recipe/image_types_wic.bbclass b/meta/classes-recipe/image_types_wic.bbclass
new file mode 100644
index 0000000000..c339b9bdfb
--- /dev/null
+++ b/meta/classes-recipe/image_types_wic.bbclass
@@ -0,0 +1,190 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# The WICVARS variable is used to define list of bitbake variables used in wic code
8# variables from this list is written to <image>.env file
9WICVARS ?= "\
10 APPEND \
11 ASSUME_PROVIDED \
12 BBLAYERS \
13 DEPLOY_DIR_IMAGE \
14 FAKEROOTCMD \
15 HOSTTOOLS_DIR \
16 IMAGE_BASENAME \
17 IMAGE_BOOT_FILES \
18 IMAGE_EFI_BOOT_FILES \
19 IMAGE_LINK_NAME \
20 IMAGE_ROOTFS \
21 IMGDEPLOYDIR \
22 INITRAMFS_FSTYPES \
23 INITRAMFS_IMAGE \
24 INITRAMFS_IMAGE_BUNDLE \
25 INITRAMFS_LINK_NAME \
26 INITRD \
27 INITRD_LIVE \
28 ISODIR \
29 KERNEL_IMAGETYPE \
30 MACHINE \
31 PSEUDO_IGNORE_PATHS \
32 RECIPE_SYSROOT_NATIVE \
33 ROOTFS_SIZE \
34 STAGING_DATADIR \
35 STAGING_DIR \
36 STAGING_DIR_HOST \
37 STAGING_LIBDIR \
38 TARGET_SYS \
39"
40
41inherit ${@bb.utils.contains('INITRAMFS_IMAGE_BUNDLE', '1', 'kernel-artifact-names', '', d)}
42
43WKS_FILE ??= "${IMAGE_BASENAME}.${MACHINE}.wks"
44WKS_FILES ?= "${WKS_FILE} ${IMAGE_BASENAME}.wks"
45WKS_SEARCH_PATH ?= "${THISDIR}:${@':'.join('%s/wic' % p for p in '${BBPATH}'.split(':'))}:${@':'.join('%s/scripts/lib/wic/canned-wks' % l for l in '${BBPATH}:${COREBASE}'.split(':'))}"
46WKS_FULL_PATH = "${@wks_search(d.getVar('WKS_FILES').split(), d.getVar('WKS_SEARCH_PATH')) or ''}"
47
48def wks_search(files, search_path):
49 for f in files:
50 if os.path.isabs(f):
51 if os.path.exists(f):
52 return f
53 else:
54 searched = bb.utils.which(search_path, f)
55 if searched:
56 return searched
57
58WIC_CREATE_EXTRA_ARGS ?= ""
59
60IMAGE_CMD:wic () {
61 out="${IMGDEPLOYDIR}/${IMAGE_NAME}"
62 build_wic="${WORKDIR}/build-wic"
63 tmp_wic="${WORKDIR}/tmp-wic"
64 wks="${WKS_FULL_PATH}"
65 if [ -e "$tmp_wic" ]; then
66 # Ensure we don't have any junk leftover from a previously interrupted
67 # do_image_wic execution
68 rm -rf "$tmp_wic"
69 fi
70 if [ -z "$wks" ]; then
71 bbfatal "No kickstart files from WKS_FILES were found: ${WKS_FILES}. Please set WKS_FILE or WKS_FILES appropriately."
72 fi
73 BUILDDIR="${TOPDIR}" PSEUDO_UNLOAD=1 wic create "$wks" --vars "${STAGING_DIR}/${MACHINE}/imgdata/" -e "${IMAGE_BASENAME}" -o "$build_wic/" -w "$tmp_wic" ${WIC_CREATE_EXTRA_ARGS}
74 mv "$build_wic/$(basename "${wks%.wks}")"*.direct "$out${IMAGE_NAME_SUFFIX}.wic"
75}
76IMAGE_CMD:wic[vardepsexclude] = "WKS_FULL_PATH WKS_FILES TOPDIR"
77do_image_wic[cleandirs] = "${WORKDIR}/build-wic"
78
79PSEUDO_IGNORE_PATHS .= ",${WORKDIR}/build-wic"
80
81# Rebuild when the wks file or vars in WICVARS change
82USING_WIC = "${@bb.utils.contains_any('IMAGE_FSTYPES', 'wic ' + ' '.join('wic.%s' % c for c in '${CONVERSIONTYPES}'.split()), '1', '', d)}"
83WKS_FILE_CHECKSUM = "${@'${WKS_FULL_PATH}:%s' % os.path.exists('${WKS_FULL_PATH}') if '${USING_WIC}' else ''}"
84do_image_wic[file-checksums] += "${WKS_FILE_CHECKSUM}"
85do_image_wic[depends] += "${@' '.join('%s-native:do_populate_sysroot' % r for r in ('parted', 'gptfdisk', 'dosfstools', 'mtools'))}"
86
87# We ensure all artfacts are deployed (e.g virtual/bootloader)
88do_image_wic[recrdeptask] += "do_deploy"
89do_image_wic[deptask] += "do_image_complete"
90
91WKS_FILE_DEPENDS_DEFAULT = '${@bb.utils.contains_any("BUILD_ARCH", [ 'x86_64', 'i686' ], "syslinux-native", "",d)}'
92WKS_FILE_DEPENDS_DEFAULT += "bmap-tools-native cdrtools-native btrfs-tools-native squashfs-tools-native e2fsprogs-native erofs-utils-native"
93# Unified kernel images need objcopy
94WKS_FILE_DEPENDS_DEFAULT += "virtual/${MLPREFIX}${TARGET_PREFIX}binutils"
95WKS_FILE_DEPENDS_BOOTLOADERS = ""
96WKS_FILE_DEPENDS_BOOTLOADERS:x86 = "syslinux grub-efi systemd-boot os-release"
97WKS_FILE_DEPENDS_BOOTLOADERS:x86-64 = "syslinux grub-efi systemd-boot os-release"
98WKS_FILE_DEPENDS_BOOTLOADERS:x86-x32 = "syslinux grub-efi"
99
100WKS_FILE_DEPENDS ??= "${WKS_FILE_DEPENDS_DEFAULT} ${WKS_FILE_DEPENDS_BOOTLOADERS}"
101
102DEPENDS += "${@ '${WKS_FILE_DEPENDS}' if d.getVar('USING_WIC') else '' }"
103
104python do_write_wks_template () {
105 """Write out expanded template contents to WKS_FULL_PATH."""
106 import re
107
108 template_body = d.getVar('_WKS_TEMPLATE')
109
110 # Remove any remnant variable references left behind by the expansion
111 # due to undefined variables
112 expand_var_regexp = re.compile(r"\${[^{}@\n\t :]+}")
113 while True:
114 new_body = re.sub(expand_var_regexp, '', template_body)
115 if new_body == template_body:
116 break
117 else:
118 template_body = new_body
119
120 wks_file = d.getVar('WKS_FULL_PATH')
121 with open(wks_file, 'w') as f:
122 f.write(template_body)
123 f.close()
124 # Copy the finalized wks file to the deploy directory for later use
125 depdir = d.getVar('IMGDEPLOYDIR')
126 basename = d.getVar('IMAGE_BASENAME')
127 bb.utils.copyfile(wks_file, "%s/%s" % (depdir, basename + '-' + os.path.basename(wks_file)))
128}
129
130do_flush_pseudodb() {
131 ${FAKEROOTENV} ${FAKEROOTCMD} -S
132}
133
134python () {
135 if d.getVar('USING_WIC'):
136 wks_file_u = d.getVar('WKS_FULL_PATH', False)
137 wks_file = d.expand(wks_file_u)
138 base, ext = os.path.splitext(wks_file)
139 if ext == '.in' and os.path.exists(wks_file):
140 wks_out_file = os.path.join(d.getVar('WORKDIR'), os.path.basename(base))
141 d.setVar('WKS_FULL_PATH', wks_out_file)
142 d.setVar('WKS_TEMPLATE_PATH', wks_file_u)
143 d.setVar('WKS_FILE_CHECKSUM', '${WKS_TEMPLATE_PATH}:True')
144
145 # We need to re-parse each time the file changes, and bitbake
146 # needs to be told about that explicitly.
147 bb.parse.mark_dependency(d, wks_file)
148
149 try:
150 with open(wks_file, 'r') as f:
151 body = f.read()
152 except (IOError, OSError) as exc:
153 pass
154 else:
155 # Previously, I used expandWithRefs to get the dependency list
156 # and add it to WICVARS, but there's no point re-parsing the
157 # file in process_wks_template as well, so just put it in
158 # a variable and let the metadata deal with the deps.
159 d.setVar('_WKS_TEMPLATE', body)
160 bb.build.addtask('do_write_wks_template', 'do_image_wic', 'do_image', d)
161 bb.build.addtask('do_image_wic', 'do_image_complete', None, d)
162}
163
164#
165# Write environment variables used by wic
166# to tmp/sysroots/<machine>/imgdata/<image>.env
167#
168python do_rootfs_wicenv () {
169 wicvars = d.getVar('WICVARS')
170 if not wicvars:
171 return
172
173 stdir = d.getVar('STAGING_DIR')
174 outdir = os.path.join(stdir, d.getVar('MACHINE'), 'imgdata')
175 bb.utils.mkdirhier(outdir)
176 basename = d.getVar('IMAGE_BASENAME')
177 with open(os.path.join(outdir, basename) + '.env', 'w') as envf:
178 for var in wicvars.split():
179 value = d.getVar(var)
180 if value:
181 envf.write('%s="%s"\n' % (var, value.strip()))
182 envf.close()
183 # Copy .env file to deploy directory for later use with stand alone wic
184 depdir = d.getVar('IMGDEPLOYDIR')
185 bb.utils.copyfile(os.path.join(outdir, basename) + '.env', os.path.join(depdir, basename) + '.env')
186}
187addtask do_flush_pseudodb after do_rootfs before do_image do_image_qa
188addtask do_rootfs_wicenv after do_image before do_image_wic
189do_rootfs_wicenv[vardeps] += "${WICVARS}"
190do_rootfs_wicenv[prefuncs] = 'set_image_size'
diff --git a/meta/classes-recipe/kernel-arch.bbclass b/meta/classes-recipe/kernel-arch.bbclass
new file mode 100644
index 0000000000..6f5d3bde6c
--- /dev/null
+++ b/meta/classes-recipe/kernel-arch.bbclass
@@ -0,0 +1,74 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7#
8# set the ARCH environment variable for kernel compilation (including
9# modules). return value must match one of the architecture directories
10# in the kernel source "arch" directory
11#
12
13valid_archs = "alpha cris ia64 \
14 i386 x86 \
15 m68knommu m68k ppc powerpc powerpc64 ppc64 \
16 sparc sparc64 \
17 arm aarch64 \
18 m32r mips \
19 sh sh64 um h8300 \
20 parisc s390 v850 \
21 avr32 blackfin \
22 microblaze \
23 nios2 arc riscv xtensa"
24
25def map_kernel_arch(a, d):
26 import re
27
28 valid_archs = d.getVar('valid_archs').split()
29
30 if re.match('(i.86|athlon|x86.64)$', a): return 'x86'
31 elif re.match('arceb$', a): return 'arc'
32 elif re.match('armeb$', a): return 'arm'
33 elif re.match('aarch64$', a): return 'arm64'
34 elif re.match('aarch64_be$', a): return 'arm64'
35 elif re.match('aarch64_ilp32$', a): return 'arm64'
36 elif re.match('aarch64_be_ilp32$', a): return 'arm64'
37 elif re.match('mips(isa|)(32|64|)(r6|)(el|)$', a): return 'mips'
38 elif re.match('mcf', a): return 'm68k'
39 elif re.match('riscv(32|64|)(eb|)$', a): return 'riscv'
40 elif re.match('p(pc|owerpc)(|64)', a): return 'powerpc'
41 elif re.match('sh(3|4)$', a): return 'sh'
42 elif re.match('bfin', a): return 'blackfin'
43 elif re.match('microblazee[bl]', a): return 'microblaze'
44 elif a in valid_archs: return a
45 else:
46 if not d.getVar("TARGET_OS").startswith("linux"):
47 return a
48 bb.error("cannot map '%s' to a linux kernel architecture" % a)
49
50export ARCH = "${@map_kernel_arch(d.getVar('TARGET_ARCH'), d)}"
51
52def map_uboot_arch(a, d):
53 import re
54
55 if re.match('p(pc|owerpc)(|64)', a): return 'ppc'
56 elif re.match('i.86$', a): return 'x86'
57 return a
58
59export UBOOT_ARCH = "${@map_uboot_arch(d.getVar('ARCH'), d)}"
60
61# Set TARGET_??_KERNEL_ARCH in the machine .conf to set architecture
62# specific options necessary for building the kernel and modules.
63TARGET_CC_KERNEL_ARCH ?= ""
64HOST_CC_KERNEL_ARCH ?= "${TARGET_CC_KERNEL_ARCH}"
65TARGET_LD_KERNEL_ARCH ?= ""
66HOST_LD_KERNEL_ARCH ?= "${TARGET_LD_KERNEL_ARCH}"
67TARGET_AR_KERNEL_ARCH ?= ""
68HOST_AR_KERNEL_ARCH ?= "${TARGET_AR_KERNEL_ARCH}"
69
70KERNEL_CC = "${CCACHE}${HOST_PREFIX}gcc ${HOST_CC_KERNEL_ARCH} -fuse-ld=bfd ${DEBUG_PREFIX_MAP} -fdebug-prefix-map=${STAGING_KERNEL_DIR}=${KERNEL_SRC_PATH} -fdebug-prefix-map=${STAGING_KERNEL_BUILDDIR}=${KERNEL_SRC_PATH}"
71KERNEL_LD = "${CCACHE}${HOST_PREFIX}ld.bfd ${HOST_LD_KERNEL_ARCH}"
72KERNEL_AR = "${CCACHE}${HOST_PREFIX}ar ${HOST_AR_KERNEL_ARCH}"
73TOOLCHAIN = "gcc"
74
diff --git a/meta/classes-recipe/kernel-artifact-names.bbclass b/meta/classes-recipe/kernel-artifact-names.bbclass
new file mode 100644
index 0000000000..311075c68d
--- /dev/null
+++ b/meta/classes-recipe/kernel-artifact-names.bbclass
@@ -0,0 +1,37 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7##################################################################
8# Specific kernel creation info
9# for recipes/bbclasses which need to reuse some of the kernel
10# artifacts, but aren't kernel recipes themselves
11##################################################################
12
13inherit image-artifact-names
14
15KERNEL_ARTIFACT_NAME ?= "${PKGE}-${PKGV}-${PKGR}-${MACHINE}${IMAGE_VERSION_SUFFIX}"
16KERNEL_ARTIFACT_LINK_NAME ?= "${MACHINE}"
17KERNEL_ARTIFACT_BIN_EXT ?= ".bin"
18
19KERNEL_IMAGE_NAME ?= "${KERNEL_ARTIFACT_NAME}"
20KERNEL_IMAGE_LINK_NAME ?= "${KERNEL_ARTIFACT_LINK_NAME}"
21KERNEL_IMAGE_BIN_EXT ?= "${KERNEL_ARTIFACT_BIN_EXT}"
22KERNEL_IMAGETYPE_SYMLINK ?= "1"
23
24KERNEL_DTB_NAME ?= "${KERNEL_ARTIFACT_NAME}"
25KERNEL_DTB_LINK_NAME ?= "${KERNEL_ARTIFACT_LINK_NAME}"
26KERNEL_DTB_BIN_EXT ?= "${KERNEL_ARTIFACT_BIN_EXT}"
27
28KERNEL_FIT_NAME ?= "${KERNEL_ARTIFACT_NAME}"
29KERNEL_FIT_LINK_NAME ?= "${KERNEL_ARTIFACT_LINK_NAME}"
30KERNEL_FIT_BIN_EXT ?= "${KERNEL_ARTIFACT_BIN_EXT}"
31
32MODULE_TARBALL_NAME ?= "${KERNEL_ARTIFACT_NAME}"
33MODULE_TARBALL_LINK_NAME ?= "${KERNEL_ARTIFACT_LINK_NAME}"
34MODULE_TARBALL_DEPLOY ?= "1"
35
36INITRAMFS_NAME ?= "initramfs-${KERNEL_ARTIFACT_NAME}"
37INITRAMFS_LINK_NAME ?= "initramfs-${KERNEL_ARTIFACT_LINK_NAME}"
diff --git a/meta/classes-recipe/kernel-devicetree.bbclass b/meta/classes-recipe/kernel-devicetree.bbclass
new file mode 100644
index 0000000000..b2117de805
--- /dev/null
+++ b/meta/classes-recipe/kernel-devicetree.bbclass
@@ -0,0 +1,119 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# Support for device tree generation
8python () {
9 if not bb.data.inherits_class('nopackages', d):
10 d.appendVar("PACKAGES", " ${KERNEL_PACKAGE_NAME}-devicetree")
11 if d.getVar('KERNEL_DEVICETREE_BUNDLE') == '1':
12 d.appendVar("PACKAGES", " ${KERNEL_PACKAGE_NAME}-image-zimage-bundle")
13}
14
15FILES:${KERNEL_PACKAGE_NAME}-devicetree = "/${KERNEL_IMAGEDEST}/*.dtb /${KERNEL_IMAGEDEST}/*.dtbo"
16FILES:${KERNEL_PACKAGE_NAME}-image-zimage-bundle = "/${KERNEL_IMAGEDEST}/zImage-*.dtb.bin"
17
18# Generate kernel+devicetree bundle
19KERNEL_DEVICETREE_BUNDLE ?= "0"
20
21# dtc flags passed via DTC_FLAGS env variable
22KERNEL_DTC_FLAGS ?= ""
23
24normalize_dtb () {
25 dtb="$1"
26 if echo $dtb | grep -q '/dts/'; then
27 bbwarn "$dtb contains the full path to the the dts file, but only the dtb name should be used."
28 dtb=`basename $dtb | sed 's,\.dts$,.dtb,g'`
29 fi
30 echo "$dtb"
31}
32
33get_real_dtb_path_in_kernel () {
34 dtb="$1"
35 dtb_path="${B}/arch/${ARCH}/boot/dts/$dtb"
36 if [ ! -e "$dtb_path" ]; then
37 dtb_path="${B}/arch/${ARCH}/boot/$dtb"
38 fi
39 echo "$dtb_path"
40}
41
42do_configure:append() {
43 if [ "${KERNEL_DEVICETREE_BUNDLE}" = "1" ]; then
44 if echo ${KERNEL_IMAGETYPE_FOR_MAKE} | grep -q 'zImage'; then
45 case "${ARCH}" in
46 "arm")
47 config="${B}/.config"
48 if ! grep -q 'CONFIG_ARM_APPENDED_DTB=y' $config; then
49 bbwarn 'CONFIG_ARM_APPENDED_DTB is NOT enabled in the kernel. Enabling it to allow the kernel to boot with the Device Tree appended!'
50 sed -i "/CONFIG_ARM_APPENDED_DTB[ =]/d" $config
51 echo "CONFIG_ARM_APPENDED_DTB=y" >> $config
52 echo "# CONFIG_ARM_ATAG_DTB_COMPAT is not set" >> $config
53 fi
54 ;;
55 *)
56 bberror "KERNEL_DEVICETREE_BUNDLE is not supported for ${ARCH}. Currently it is only supported for 'ARM'."
57 esac
58 else
59 bberror 'The KERNEL_DEVICETREE_BUNDLE requires the KERNEL_IMAGETYPE to contain zImage.'
60 fi
61 fi
62}
63
64do_compile:append() {
65 if [ -n "${KERNEL_DTC_FLAGS}" ]; then
66 export DTC_FLAGS="${KERNEL_DTC_FLAGS}"
67 fi
68
69 for dtbf in ${KERNEL_DEVICETREE}; do
70 dtb=`normalize_dtb "$dtbf"`
71 oe_runmake $dtb CC="${KERNEL_CC} $cc_extra " LD="${KERNEL_LD}" ${KERNEL_EXTRA_ARGS}
72 done
73}
74
75do_install:append() {
76 for dtbf in ${KERNEL_DEVICETREE}; do
77 dtb=`normalize_dtb "$dtbf"`
78 dtb_ext=${dtb##*.}
79 dtb_base_name=`basename $dtb .$dtb_ext`
80 dtb_path=`get_real_dtb_path_in_kernel "$dtb"`
81 install -m 0644 $dtb_path ${D}/${KERNEL_IMAGEDEST}/$dtb_base_name.$dtb_ext
82 done
83}
84
85do_deploy:append() {
86 for dtbf in ${KERNEL_DEVICETREE}; do
87 dtb=`normalize_dtb "$dtbf"`
88 dtb_ext=${dtb##*.}
89 dtb_base_name=`basename $dtb .$dtb_ext`
90 install -d $deployDir
91 install -m 0644 ${D}/${KERNEL_IMAGEDEST}/$dtb_base_name.$dtb_ext $deployDir/$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext
92 if [ "${KERNEL_IMAGETYPE_SYMLINK}" = "1" ] ; then
93 ln -sf $dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext $deployDir/$dtb_base_name.$dtb_ext
94 fi
95 if [ -n "${KERNEL_DTB_LINK_NAME}" ] ; then
96 ln -sf $dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext $deployDir/$dtb_base_name-${KERNEL_DTB_LINK_NAME}.$dtb_ext
97 fi
98 for type in ${KERNEL_IMAGETYPE_FOR_MAKE}; do
99 if [ "$type" = "zImage" ] && [ "${KERNEL_DEVICETREE_BUNDLE}" = "1" ]; then
100 cat ${D}/${KERNEL_IMAGEDEST}/$type \
101 $deployDir/$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext \
102 > $deployDir/$type-$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext${KERNEL_DTB_BIN_EXT}
103 if [ -n "${KERNEL_DTB_LINK_NAME}" ]; then
104 ln -sf $type-$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext${KERNEL_DTB_BIN_EXT} \
105 $deployDir/$type-$dtb_base_name-${KERNEL_DTB_LINK_NAME}.$dtb_ext${KERNEL_DTB_BIN_EXT}
106 fi
107 if [ -e "${KERNEL_OUTPUT_DIR}/${type}.initramfs" ]; then
108 cat ${KERNEL_OUTPUT_DIR}/${type}.initramfs \
109 $deployDir/$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext \
110 > $deployDir/${type}-${INITRAMFS_NAME}-$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext${KERNEL_DTB_BIN_EXT}
111 if [ -n "${KERNEL_DTB_LINK_NAME}" ]; then
112 ln -sf ${type}-${INITRAMFS_NAME}-$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext${KERNEL_DTB_BIN_EXT} \
113 $deployDir/${type}-${INITRAMFS_NAME}-$dtb_base_name-${KERNEL_DTB_LINK_NAME}.$dtb_ext${KERNEL_DTB_BIN_EXT}
114 fi
115 fi
116 fi
117 done
118 done
119}
diff --git a/meta/classes-recipe/kernel-fitimage.bbclass b/meta/classes-recipe/kernel-fitimage.bbclass
new file mode 100644
index 0000000000..838ce204cb
--- /dev/null
+++ b/meta/classes-recipe/kernel-fitimage.bbclass
@@ -0,0 +1,803 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7inherit kernel-uboot kernel-artifact-names uboot-sign
8
9def get_fit_replacement_type(d):
10 kerneltypes = d.getVar('KERNEL_IMAGETYPES') or ""
11 replacementtype = ""
12 if 'fitImage' in kerneltypes.split():
13 uarch = d.getVar("UBOOT_ARCH")
14 if uarch == "arm64":
15 replacementtype = "Image"
16 elif uarch == "riscv":
17 replacementtype = "Image"
18 elif uarch == "mips":
19 replacementtype = "vmlinuz.bin"
20 elif uarch == "x86":
21 replacementtype = "bzImage"
22 elif uarch == "microblaze":
23 replacementtype = "linux.bin"
24 else:
25 replacementtype = "zImage"
26 return replacementtype
27
28KERNEL_IMAGETYPE_REPLACEMENT ?= "${@get_fit_replacement_type(d)}"
29DEPENDS:append = " ${@'u-boot-tools-native dtc-native' if 'fitImage' in (d.getVar('KERNEL_IMAGETYPES') or '').split() else ''}"
30
31python __anonymous () {
32 # Override KERNEL_IMAGETYPE_FOR_MAKE variable, which is internal
33 # to kernel.bbclass . We have to override it, since we pack zImage
34 # (at least for now) into the fitImage .
35 typeformake = d.getVar("KERNEL_IMAGETYPE_FOR_MAKE") or ""
36 if 'fitImage' in typeformake.split():
37 d.setVar('KERNEL_IMAGETYPE_FOR_MAKE', typeformake.replace('fitImage', d.getVar('KERNEL_IMAGETYPE_REPLACEMENT')))
38
39 image = d.getVar('INITRAMFS_IMAGE')
40 if image:
41 d.appendVarFlag('do_assemble_fitimage_initramfs', 'depends', ' ${INITRAMFS_IMAGE}:do_image_complete')
42
43 ubootenv = d.getVar('UBOOT_ENV')
44 if ubootenv:
45 d.appendVarFlag('do_assemble_fitimage', 'depends', ' virtual/bootloader:do_populate_sysroot')
46
47 #check if there are any dtb providers
48 providerdtb = d.getVar("PREFERRED_PROVIDER_virtual/dtb")
49 if providerdtb:
50 d.appendVarFlag('do_assemble_fitimage', 'depends', ' virtual/dtb:do_populate_sysroot')
51 d.appendVarFlag('do_assemble_fitimage_initramfs', 'depends', ' virtual/dtb:do_populate_sysroot')
52 d.setVar('EXTERNAL_KERNEL_DEVICETREE', "${RECIPE_SYSROOT}/boot/devicetree")
53
54 # Verified boot will sign the fitImage and append the public key to
55 # U-Boot dtb. We ensure the U-Boot dtb is deployed before assembling
56 # the fitImage:
57 if d.getVar('UBOOT_SIGN_ENABLE') == "1" and d.getVar('UBOOT_DTB_BINARY'):
58 uboot_pn = d.getVar('PREFERRED_PROVIDER_u-boot') or 'u-boot'
59 d.appendVarFlag('do_assemble_fitimage', 'depends', ' %s:do_populate_sysroot' % uboot_pn)
60 if d.getVar('INITRAMFS_IMAGE_BUNDLE') == "1":
61 d.appendVarFlag('do_assemble_fitimage_initramfs', 'depends', ' %s:do_populate_sysroot' % uboot_pn)
62}
63
64
65# Description string
66FIT_DESC ?= "Kernel fitImage for ${DISTRO_NAME}/${PV}/${MACHINE}"
67
68# Sign individual images as well
69FIT_SIGN_INDIVIDUAL ?= "0"
70
71FIT_CONF_PREFIX ?= "conf-"
72FIT_CONF_PREFIX[doc] = "Prefix to use for FIT configuration node name"
73
74FIT_SUPPORTED_INITRAMFS_FSTYPES ?= "cpio.lz4 cpio.lzo cpio.lzma cpio.xz cpio.zst cpio.gz ext2.gz cpio"
75
76# Keys used to sign individually image nodes.
77# The keys to sign image nodes must be different from those used to sign
78# configuration nodes, otherwise the "required" property, from
79# UBOOT_DTB_BINARY, will be set to "conf", because "conf" prevails on "image".
80# Then the images signature checking will not be mandatory and no error will be
81# raised in case of failure.
82# UBOOT_SIGN_IMG_KEYNAME = "dev2" # keys name in keydir (eg. "dev2.crt", "dev2.key")
83
84#
85# Emit the fitImage ITS header
86#
87# $1 ... .its filename
88fitimage_emit_fit_header() {
89 cat << EOF >> $1
90/dts-v1/;
91
92/ {
93 description = "${FIT_DESC}";
94 #address-cells = <1>;
95EOF
96}
97
98#
99# Emit the fitImage section bits
100#
101# $1 ... .its filename
102# $2 ... Section bit type: imagestart - image section start
103# confstart - configuration section start
104# sectend - section end
105# fitend - fitimage end
106#
107fitimage_emit_section_maint() {
108 case $2 in
109 imagestart)
110 cat << EOF >> $1
111
112 images {
113EOF
114 ;;
115 confstart)
116 cat << EOF >> $1
117
118 configurations {
119EOF
120 ;;
121 sectend)
122 cat << EOF >> $1
123 };
124EOF
125 ;;
126 fitend)
127 cat << EOF >> $1
128};
129EOF
130 ;;
131 esac
132}
133
134#
135# Emit the fitImage ITS kernel section
136#
137# $1 ... .its filename
138# $2 ... Image counter
139# $3 ... Path to kernel image
140# $4 ... Compression type
141fitimage_emit_section_kernel() {
142
143 kernel_csum="${FIT_HASH_ALG}"
144 kernel_sign_algo="${FIT_SIGN_ALG}"
145 kernel_sign_keyname="${UBOOT_SIGN_IMG_KEYNAME}"
146
147 ENTRYPOINT="${UBOOT_ENTRYPOINT}"
148 if [ -n "${UBOOT_ENTRYSYMBOL}" ]; then
149 ENTRYPOINT=`${HOST_PREFIX}nm vmlinux | \
150 awk '$3=="${UBOOT_ENTRYSYMBOL}" {print "0x"$1;exit}'`
151 fi
152
153 cat << EOF >> $1
154 kernel-$2 {
155 description = "Linux kernel";
156 data = /incbin/("$3");
157 type = "${UBOOT_MKIMAGE_KERNEL_TYPE}";
158 arch = "${UBOOT_ARCH}";
159 os = "linux";
160 compression = "$4";
161 load = <${UBOOT_LOADADDRESS}>;
162 entry = <$ENTRYPOINT>;
163 hash-1 {
164 algo = "$kernel_csum";
165 };
166 };
167EOF
168
169 if [ "${UBOOT_SIGN_ENABLE}" = "1" -a "${FIT_SIGN_INDIVIDUAL}" = "1" -a -n "$kernel_sign_keyname" ] ; then
170 sed -i '$ d' $1
171 cat << EOF >> $1
172 signature-1 {
173 algo = "$kernel_csum,$kernel_sign_algo";
174 key-name-hint = "$kernel_sign_keyname";
175 };
176 };
177EOF
178 fi
179}
180
181#
182# Emit the fitImage ITS DTB section
183#
184# $1 ... .its filename
185# $2 ... Image counter
186# $3 ... Path to DTB image
187fitimage_emit_section_dtb() {
188
189 dtb_csum="${FIT_HASH_ALG}"
190 dtb_sign_algo="${FIT_SIGN_ALG}"
191 dtb_sign_keyname="${UBOOT_SIGN_IMG_KEYNAME}"
192
193 dtb_loadline=""
194 dtb_ext=${DTB##*.}
195 if [ "${dtb_ext}" = "dtbo" ]; then
196 if [ -n "${UBOOT_DTBO_LOADADDRESS}" ]; then
197 dtb_loadline="load = <${UBOOT_DTBO_LOADADDRESS}>;"
198 fi
199 elif [ -n "${UBOOT_DTB_LOADADDRESS}" ]; then
200 dtb_loadline="load = <${UBOOT_DTB_LOADADDRESS}>;"
201 fi
202 cat << EOF >> $1
203 fdt-$2 {
204 description = "Flattened Device Tree blob";
205 data = /incbin/("$3");
206 type = "flat_dt";
207 arch = "${UBOOT_ARCH}";
208 compression = "none";
209 $dtb_loadline
210 hash-1 {
211 algo = "$dtb_csum";
212 };
213 };
214EOF
215
216 if [ "${UBOOT_SIGN_ENABLE}" = "1" -a "${FIT_SIGN_INDIVIDUAL}" = "1" -a -n "$dtb_sign_keyname" ] ; then
217 sed -i '$ d' $1
218 cat << EOF >> $1
219 signature-1 {
220 algo = "$dtb_csum,$dtb_sign_algo";
221 key-name-hint = "$dtb_sign_keyname";
222 };
223 };
224EOF
225 fi
226}
227
228#
229# Emit the fitImage ITS u-boot script section
230#
231# $1 ... .its filename
232# $2 ... Image counter
233# $3 ... Path to boot script image
234fitimage_emit_section_boot_script() {
235
236 bootscr_csum="${FIT_HASH_ALG}"
237 bootscr_sign_algo="${FIT_SIGN_ALG}"
238 bootscr_sign_keyname="${UBOOT_SIGN_IMG_KEYNAME}"
239
240 cat << EOF >> $1
241 bootscr-$2 {
242 description = "U-boot script";
243 data = /incbin/("$3");
244 type = "script";
245 arch = "${UBOOT_ARCH}";
246 compression = "none";
247 hash-1 {
248 algo = "$bootscr_csum";
249 };
250 };
251EOF
252
253 if [ "${UBOOT_SIGN_ENABLE}" = "1" -a "${FIT_SIGN_INDIVIDUAL}" = "1" -a -n "$bootscr_sign_keyname" ] ; then
254 sed -i '$ d' $1
255 cat << EOF >> $1
256 signature-1 {
257 algo = "$bootscr_csum,$bootscr_sign_algo";
258 key-name-hint = "$bootscr_sign_keyname";
259 };
260 };
261EOF
262 fi
263}
264
265#
266# Emit the fitImage ITS setup section
267#
268# $1 ... .its filename
269# $2 ... Image counter
270# $3 ... Path to setup image
271fitimage_emit_section_setup() {
272
273 setup_csum="${FIT_HASH_ALG}"
274
275 cat << EOF >> $1
276 setup-$2 {
277 description = "Linux setup.bin";
278 data = /incbin/("$3");
279 type = "x86_setup";
280 arch = "${UBOOT_ARCH}";
281 os = "linux";
282 compression = "none";
283 load = <0x00090000>;
284 entry = <0x00090000>;
285 hash-1 {
286 algo = "$setup_csum";
287 };
288 };
289EOF
290}
291
292#
293# Emit the fitImage ITS ramdisk section
294#
295# $1 ... .its filename
296# $2 ... Image counter
297# $3 ... Path to ramdisk image
298fitimage_emit_section_ramdisk() {
299
300 ramdisk_csum="${FIT_HASH_ALG}"
301 ramdisk_sign_algo="${FIT_SIGN_ALG}"
302 ramdisk_sign_keyname="${UBOOT_SIGN_IMG_KEYNAME}"
303 ramdisk_loadline=""
304 ramdisk_entryline=""
305
306 if [ -n "${UBOOT_RD_LOADADDRESS}" ]; then
307 ramdisk_loadline="load = <${UBOOT_RD_LOADADDRESS}>;"
308 fi
309 if [ -n "${UBOOT_RD_ENTRYPOINT}" ]; then
310 ramdisk_entryline="entry = <${UBOOT_RD_ENTRYPOINT}>;"
311 fi
312
313 cat << EOF >> $1
314 ramdisk-$2 {
315 description = "${INITRAMFS_IMAGE}";
316 data = /incbin/("$3");
317 type = "ramdisk";
318 arch = "${UBOOT_ARCH}";
319 os = "linux";
320 compression = "none";
321 $ramdisk_loadline
322 $ramdisk_entryline
323 hash-1 {
324 algo = "$ramdisk_csum";
325 };
326 };
327EOF
328
329 if [ "${UBOOT_SIGN_ENABLE}" = "1" -a "${FIT_SIGN_INDIVIDUAL}" = "1" -a -n "$ramdisk_sign_keyname" ] ; then
330 sed -i '$ d' $1
331 cat << EOF >> $1
332 signature-1 {
333 algo = "$ramdisk_csum,$ramdisk_sign_algo";
334 key-name-hint = "$ramdisk_sign_keyname";
335 };
336 };
337EOF
338 fi
339}
340
341#
342# Emit the fitImage ITS configuration section
343#
344# $1 ... .its filename
345# $2 ... Linux kernel ID
346# $3 ... DTB image name
347# $4 ... ramdisk ID
348# $5 ... u-boot script ID
349# $6 ... config ID
350# $7 ... default flag
351fitimage_emit_section_config() {
352
353 conf_csum="${FIT_HASH_ALG}"
354 conf_sign_algo="${FIT_SIGN_ALG}"
355 conf_padding_algo="${FIT_PAD_ALG}"
356 if [ "${UBOOT_SIGN_ENABLE}" = "1" ] ; then
357 conf_sign_keyname="${UBOOT_SIGN_KEYNAME}"
358 fi
359
360 its_file="$1"
361 kernel_id="$2"
362 dtb_image="$3"
363 ramdisk_id="$4"
364 bootscr_id="$5"
365 config_id="$6"
366 default_flag="$7"
367
368 # Test if we have any DTBs at all
369 sep=""
370 conf_desc=""
371 conf_node="${FIT_CONF_PREFIX}"
372 kernel_line=""
373 fdt_line=""
374 ramdisk_line=""
375 bootscr_line=""
376 setup_line=""
377 default_line=""
378
379 # conf node name is selected based on dtb ID if it is present,
380 # otherwise its selected based on kernel ID
381 if [ -n "$dtb_image" ]; then
382 conf_node=$conf_node$dtb_image
383 else
384 conf_node=$conf_node$kernel_id
385 fi
386
387 if [ -n "$kernel_id" ]; then
388 conf_desc="Linux kernel"
389 sep=", "
390 kernel_line="kernel = \"kernel-$kernel_id\";"
391 fi
392
393 if [ -n "$dtb_image" ]; then
394 conf_desc="$conf_desc${sep}FDT blob"
395 sep=", "
396 fdt_line="fdt = \"fdt-$dtb_image\";"
397 fi
398
399 if [ -n "$ramdisk_id" ]; then
400 conf_desc="$conf_desc${sep}ramdisk"
401 sep=", "
402 ramdisk_line="ramdisk = \"ramdisk-$ramdisk_id\";"
403 fi
404
405 if [ -n "$bootscr_id" ]; then
406 conf_desc="$conf_desc${sep}u-boot script"
407 sep=", "
408 bootscr_line="bootscr = \"bootscr-$bootscr_id\";"
409 fi
410
411 if [ -n "$config_id" ]; then
412 conf_desc="$conf_desc${sep}setup"
413 setup_line="setup = \"setup-$config_id\";"
414 fi
415
416 if [ "$default_flag" = "1" ]; then
417 # default node is selected based on dtb ID if it is present,
418 # otherwise its selected based on kernel ID
419 if [ -n "$dtb_image" ]; then
420 default_line="default = \"${FIT_CONF_PREFIX}$dtb_image\";"
421 else
422 default_line="default = \"${FIT_CONF_PREFIX}$kernel_id\";"
423 fi
424 fi
425
426 cat << EOF >> $its_file
427 $default_line
428 $conf_node {
429 description = "$default_flag $conf_desc";
430 $kernel_line
431 $fdt_line
432 $ramdisk_line
433 $bootscr_line
434 $setup_line
435 hash-1 {
436 algo = "$conf_csum";
437 };
438EOF
439
440 if [ -n "$conf_sign_keyname" ] ; then
441
442 sign_line="sign-images = "
443 sep=""
444
445 if [ -n "$kernel_id" ]; then
446 sign_line="$sign_line${sep}\"kernel\""
447 sep=", "
448 fi
449
450 if [ -n "$dtb_image" ]; then
451 sign_line="$sign_line${sep}\"fdt\""
452 sep=", "
453 fi
454
455 if [ -n "$ramdisk_id" ]; then
456 sign_line="$sign_line${sep}\"ramdisk\""
457 sep=", "
458 fi
459
460 if [ -n "$bootscr_id" ]; then
461 sign_line="$sign_line${sep}\"bootscr\""
462 sep=", "
463 fi
464
465 if [ -n "$config_id" ]; then
466 sign_line="$sign_line${sep}\"setup\""
467 fi
468
469 sign_line="$sign_line;"
470
471 cat << EOF >> $its_file
472 signature-1 {
473 algo = "$conf_csum,$conf_sign_algo";
474 key-name-hint = "$conf_sign_keyname";
475 padding = "$conf_padding_algo";
476 $sign_line
477 };
478EOF
479 fi
480
481 cat << EOF >> $its_file
482 };
483EOF
484}
485
486#
487# Assemble fitImage
488#
489# $1 ... .its filename
490# $2 ... fitImage name
491# $3 ... include ramdisk
492fitimage_assemble() {
493 kernelcount=1
494 dtbcount=""
495 DTBS=""
496 ramdiskcount=$3
497 setupcount=""
498 bootscr_id=""
499 rm -f $1 arch/${ARCH}/boot/$2
500
501 if [ -n "${UBOOT_SIGN_IMG_KEYNAME}" -a "${UBOOT_SIGN_KEYNAME}" = "${UBOOT_SIGN_IMG_KEYNAME}" ]; then
502 bbfatal "Keys used to sign images and configuration nodes must be different."
503 fi
504
505 fitimage_emit_fit_header $1
506
507 #
508 # Step 1: Prepare a kernel image section.
509 #
510 fitimage_emit_section_maint $1 imagestart
511
512 uboot_prep_kimage
513 fitimage_emit_section_kernel $1 $kernelcount linux.bin "$linux_comp"
514
515 #
516 # Step 2: Prepare a DTB image section
517 #
518
519 if [ -n "${KERNEL_DEVICETREE}" ]; then
520 dtbcount=1
521 for DTB in ${KERNEL_DEVICETREE}; do
522 if echo $DTB | grep -q '/dts/'; then
523 bbwarn "$DTB contains the full path to the the dts file, but only the dtb name should be used."
524 DTB=`basename $DTB | sed 's,\.dts$,.dtb,g'`
525 fi
526
527 # Skip ${DTB} if it's also provided in ${EXTERNAL_KERNEL_DEVICETREE}
528 if [ -n "${EXTERNAL_KERNEL_DEVICETREE}" ] && [ -s ${EXTERNAL_KERNEL_DEVICETREE}/${DTB} ]; then
529 continue
530 fi
531
532 DTB_PATH="arch/${ARCH}/boot/dts/$DTB"
533 if [ ! -e "$DTB_PATH" ]; then
534 DTB_PATH="arch/${ARCH}/boot/$DTB"
535 fi
536
537 DTB=$(echo "$DTB" | tr '/' '_')
538 DTBS="$DTBS $DTB"
539 fitimage_emit_section_dtb $1 $DTB $DTB_PATH
540 done
541 fi
542
543 if [ -n "${EXTERNAL_KERNEL_DEVICETREE}" ]; then
544 dtbcount=1
545 for DTB in $(find "${EXTERNAL_KERNEL_DEVICETREE}" \( -name '*.dtb' -o -name '*.dtbo' \) -printf '%P\n' | sort); do
546 DTB=$(echo "$DTB" | tr '/' '_')
547 DTBS="$DTBS $DTB"
548 fitimage_emit_section_dtb $1 $DTB "${EXTERNAL_KERNEL_DEVICETREE}/$DTB"
549 done
550 fi
551
552 #
553 # Step 3: Prepare a u-boot script section
554 #
555
556 if [ -n "${UBOOT_ENV}" ] && [ -d "${STAGING_DIR_HOST}/boot" ]; then
557 if [ -e "${STAGING_DIR_HOST}/boot/${UBOOT_ENV_BINARY}" ]; then
558 cp ${STAGING_DIR_HOST}/boot/${UBOOT_ENV_BINARY} ${B}
559 bootscr_id="${UBOOT_ENV_BINARY}"
560 fitimage_emit_section_boot_script $1 "$bootscr_id" ${UBOOT_ENV_BINARY}
561 else
562 bbwarn "${STAGING_DIR_HOST}/boot/${UBOOT_ENV_BINARY} not found."
563 fi
564 fi
565
566 #
567 # Step 4: Prepare a setup section. (For x86)
568 #
569 if [ -e arch/${ARCH}/boot/setup.bin ]; then
570 setupcount=1
571 fitimage_emit_section_setup $1 $setupcount arch/${ARCH}/boot/setup.bin
572 fi
573
574 #
575 # Step 5: Prepare a ramdisk section.
576 #
577 if [ "x${ramdiskcount}" = "x1" ] && [ "${INITRAMFS_IMAGE_BUNDLE}" != "1" ]; then
578 # Find and use the first initramfs image archive type we find
579 found=
580 for img in ${FIT_SUPPORTED_INITRAMFS_FSTYPES}; do
581 initramfs_path="${DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE_NAME}.$img"
582 if [ -e "$initramfs_path" ]; then
583 bbnote "Found initramfs image: $initramfs_path"
584 found=true
585 fitimage_emit_section_ramdisk $1 "$ramdiskcount" "$initramfs_path"
586 break
587 else
588 bbnote "Did not find initramfs image: $initramfs_path"
589 fi
590 done
591
592 if [ -z "$found" ]; then
593 bbfatal "Could not find a valid initramfs type for ${INITRAMFS_IMAGE_NAME}, the supported types are: ${FIT_SUPPORTED_INITRAMFS_FSTYPES}"
594 fi
595 fi
596
597 fitimage_emit_section_maint $1 sectend
598
599 # Force the first Kernel and DTB in the default config
600 kernelcount=1
601 if [ -n "$dtbcount" ]; then
602 dtbcount=1
603 fi
604
605 #
606 # Step 6: Prepare a configurations section
607 #
608 fitimage_emit_section_maint $1 confstart
609
610 # kernel-fitimage.bbclass currently only supports a single kernel (no less or
611 # more) to be added to the FIT image along with 0 or more device trees and
612 # 0 or 1 ramdisk.
613 # It is also possible to include an initramfs bundle (kernel and rootfs in one binary)
614 # When the initramfs bundle is used ramdisk is disabled.
615 # If a device tree is to be part of the FIT image, then select
616 # the default configuration to be used is based on the dtbcount. If there is
617 # no dtb present than select the default configuation to be based on
618 # the kernelcount.
619 if [ -n "$DTBS" ]; then
620 i=1
621 for DTB in ${DTBS}; do
622 dtb_ext=${DTB##*.}
623 if [ "$dtb_ext" = "dtbo" ]; then
624 fitimage_emit_section_config $1 "" "$DTB" "" "$bootscr_id" "" "`expr $i = $dtbcount`"
625 else
626 fitimage_emit_section_config $1 $kernelcount "$DTB" "$ramdiskcount" "$bootscr_id" "$setupcount" "`expr $i = $dtbcount`"
627 fi
628 i=`expr $i + 1`
629 done
630 else
631 defaultconfigcount=1
632 fitimage_emit_section_config $1 $kernelcount "" "$ramdiskcount" "$bootscr_id" "$setupcount" $defaultconfigcount
633 fi
634
635 fitimage_emit_section_maint $1 sectend
636
637 fitimage_emit_section_maint $1 fitend
638
639 #
640 # Step 7: Assemble the image
641 #
642 ${UBOOT_MKIMAGE} \
643 ${@'-D "${UBOOT_MKIMAGE_DTCOPTS}"' if len('${UBOOT_MKIMAGE_DTCOPTS}') else ''} \
644 -f $1 \
645 arch/${ARCH}/boot/$2
646
647 #
648 # Step 8: Sign the image and add public key to U-Boot dtb
649 #
650 if [ "x${UBOOT_SIGN_ENABLE}" = "x1" ] ; then
651 add_key_to_u_boot=""
652 if [ -n "${UBOOT_DTB_BINARY}" ]; then
653 # The u-boot.dtb is a symlink to UBOOT_DTB_IMAGE, so we need copy
654 # both of them, and don't dereference the symlink.
655 cp -P ${STAGING_DATADIR}/u-boot*.dtb ${B}
656 add_key_to_u_boot="-K ${B}/${UBOOT_DTB_BINARY}"
657 fi
658 ${UBOOT_MKIMAGE_SIGN} \
659 ${@'-D "${UBOOT_MKIMAGE_DTCOPTS}"' if len('${UBOOT_MKIMAGE_DTCOPTS}') else ''} \
660 -F -k "${UBOOT_SIGN_KEYDIR}" \
661 $add_key_to_u_boot \
662 -r arch/${ARCH}/boot/$2 \
663 ${UBOOT_MKIMAGE_SIGN_ARGS}
664 fi
665}
666
667do_assemble_fitimage() {
668 if echo ${KERNEL_IMAGETYPES} | grep -wq "fitImage"; then
669 cd ${B}
670 fitimage_assemble fit-image.its fitImage ""
671 fi
672}
673
674addtask assemble_fitimage before do_install after do_compile
675
676do_assemble_fitimage_initramfs() {
677 if echo ${KERNEL_IMAGETYPES} | grep -wq "fitImage" && \
678 test -n "${INITRAMFS_IMAGE}" ; then
679 cd ${B}
680 if [ "${INITRAMFS_IMAGE_BUNDLE}" = "1" ]; then
681 fitimage_assemble fit-image-${INITRAMFS_IMAGE}.its fitImage ""
682 else
683 fitimage_assemble fit-image-${INITRAMFS_IMAGE}.its fitImage-${INITRAMFS_IMAGE} 1
684 fi
685 fi
686}
687
688addtask assemble_fitimage_initramfs before do_deploy after do_bundle_initramfs
689
690do_kernel_generate_rsa_keys() {
691 if [ "${UBOOT_SIGN_ENABLE}" = "0" ] && [ "${FIT_GENERATE_KEYS}" = "1" ]; then
692 bbwarn "FIT_GENERATE_KEYS is set to 1 even though UBOOT_SIGN_ENABLE is set to 0. The keys will not be generated as they won't be used."
693 fi
694
695 if [ "${UBOOT_SIGN_ENABLE}" = "1" ] && [ "${FIT_GENERATE_KEYS}" = "1" ]; then
696
697 # Generate keys to sign configuration nodes, only if they don't already exist
698 if [ ! -f "${UBOOT_SIGN_KEYDIR}/${UBOOT_SIGN_KEYNAME}".key ] || \
699 [ ! -f "${UBOOT_SIGN_KEYDIR}/${UBOOT_SIGN_KEYNAME}".crt ]; then
700
701 # make directory if it does not already exist
702 mkdir -p "${UBOOT_SIGN_KEYDIR}"
703
704 bbnote "Generating RSA private key for signing fitImage"
705 openssl genrsa ${FIT_KEY_GENRSA_ARGS} -out \
706 "${UBOOT_SIGN_KEYDIR}/${UBOOT_SIGN_KEYNAME}".key \
707 "${FIT_SIGN_NUMBITS}"
708
709 bbnote "Generating certificate for signing fitImage"
710 openssl req ${FIT_KEY_REQ_ARGS} "${FIT_KEY_SIGN_PKCS}" \
711 -key "${UBOOT_SIGN_KEYDIR}/${UBOOT_SIGN_KEYNAME}".key \
712 -out "${UBOOT_SIGN_KEYDIR}/${UBOOT_SIGN_KEYNAME}".crt
713 fi
714
715 # Generate keys to sign image nodes, only if they don't already exist
716 if [ ! -f "${UBOOT_SIGN_KEYDIR}/${UBOOT_SIGN_IMG_KEYNAME}".key ] || \
717 [ ! -f "${UBOOT_SIGN_KEYDIR}/${UBOOT_SIGN_IMG_KEYNAME}".crt ]; then
718
719 # make directory if it does not already exist
720 mkdir -p "${UBOOT_SIGN_KEYDIR}"
721
722 bbnote "Generating RSA private key for signing fitImage"
723 openssl genrsa ${FIT_KEY_GENRSA_ARGS} -out \
724 "${UBOOT_SIGN_KEYDIR}/${UBOOT_SIGN_IMG_KEYNAME}".key \
725 "${FIT_SIGN_NUMBITS}"
726
727 bbnote "Generating certificate for signing fitImage"
728 openssl req ${FIT_KEY_REQ_ARGS} "${FIT_KEY_SIGN_PKCS}" \
729 -key "${UBOOT_SIGN_KEYDIR}/${UBOOT_SIGN_IMG_KEYNAME}".key \
730 -out "${UBOOT_SIGN_KEYDIR}/${UBOOT_SIGN_IMG_KEYNAME}".crt
731 fi
732 fi
733}
734
735addtask kernel_generate_rsa_keys before do_assemble_fitimage after do_compile
736
737kernel_do_deploy[vardepsexclude] = "DATETIME"
738kernel_do_deploy:append() {
739 # Update deploy directory
740 if echo ${KERNEL_IMAGETYPES} | grep -wq "fitImage"; then
741
742 if [ "${INITRAMFS_IMAGE_BUNDLE}" != "1" ]; then
743 bbnote "Copying fit-image.its source file..."
744 install -m 0644 ${B}/fit-image.its "$deployDir/fitImage-its-${KERNEL_FIT_NAME}.its"
745 if [ -n "${KERNEL_FIT_LINK_NAME}" ] ; then
746 ln -snf fitImage-its-${KERNEL_FIT_NAME}.its "$deployDir/fitImage-its-${KERNEL_FIT_LINK_NAME}"
747 fi
748
749 bbnote "Copying linux.bin file..."
750 install -m 0644 ${B}/linux.bin $deployDir/fitImage-linux.bin-${KERNEL_FIT_NAME}${KERNEL_FIT_BIN_EXT}
751 if [ -n "${KERNEL_FIT_LINK_NAME}" ] ; then
752 ln -snf fitImage-linux.bin-${KERNEL_FIT_NAME}${KERNEL_FIT_BIN_EXT} "$deployDir/fitImage-linux.bin-${KERNEL_FIT_LINK_NAME}"
753 fi
754 fi
755
756 if [ -n "${INITRAMFS_IMAGE}" ]; then
757 bbnote "Copying fit-image-${INITRAMFS_IMAGE}.its source file..."
758 install -m 0644 ${B}/fit-image-${INITRAMFS_IMAGE}.its "$deployDir/fitImage-its-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.its"
759 if [ -n "${KERNEL_FIT_LINK_NAME}" ] ; then
760 ln -snf fitImage-its-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.its "$deployDir/fitImage-its-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_LINK_NAME}"
761 fi
762
763 if [ "${INITRAMFS_IMAGE_BUNDLE}" != "1" ]; then
764 bbnote "Copying fitImage-${INITRAMFS_IMAGE} file..."
765 install -m 0644 ${B}/arch/${ARCH}/boot/fitImage-${INITRAMFS_IMAGE} "$deployDir/fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}${KERNEL_FIT_BIN_EXT}"
766 if [ -n "${KERNEL_FIT_LINK_NAME}" ] ; then
767 ln -snf fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}${KERNEL_FIT_BIN_EXT} "$deployDir/fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_LINK_NAME}"
768 fi
769 fi
770 fi
771 fi
772 if [ "${UBOOT_SIGN_ENABLE}" = "1" -o "${UBOOT_FITIMAGE_ENABLE}" = "1" ] && \
773 [ -n "${UBOOT_DTB_BINARY}" ] ; then
774 # UBOOT_DTB_IMAGE is a realfile, but we can't use
775 # ${UBOOT_DTB_IMAGE} since it contains ${PV} which is aimed
776 # for u-boot, but we are in kernel env now.
777 install -m 0644 ${B}/u-boot-${MACHINE}*.dtb "$deployDir/"
778 fi
779 if [ "${UBOOT_FITIMAGE_ENABLE}" = "1" -a -n "${UBOOT_BINARY}" -a -n "${SPL_DTB_BINARY}" ] ; then
780 # If we're also creating and/or signing the uboot fit, now we need to
781 # deploy it, it's its file, as well as u-boot-spl.dtb
782 install -m 0644 ${B}/u-boot-spl-${MACHINE}*.dtb "$deployDir/"
783 bbnote "Copying u-boot-fitImage file..."
784 install -m 0644 ${B}/u-boot-fitImage-* "$deployDir/"
785 bbnote "Copying u-boot-its file..."
786 install -m 0644 ${B}/u-boot-its-* "$deployDir/"
787 fi
788}
789
790# The function below performs the following in case of initramfs bundles:
791# - Removes do_assemble_fitimage. FIT generation is done through
792# do_assemble_fitimage_initramfs. do_assemble_fitimage is not needed
793# and should not be part of the tasks to be executed.
794# - Since do_kernel_generate_rsa_keys is inserted by default
795# between do_compile and do_assemble_fitimage, this is
796# not suitable in case of initramfs bundles. do_kernel_generate_rsa_keys
797# should be between do_bundle_initramfs and do_assemble_fitimage_initramfs.
798python () {
799 if d.getVar('INITRAMFS_IMAGE_BUNDLE') == "1":
800 bb.build.deltask('do_assemble_fitimage', d)
801 bb.build.deltask('kernel_generate_rsa_keys', d)
802 bb.build.addtask('kernel_generate_rsa_keys', 'do_assemble_fitimage_initramfs', 'do_bundle_initramfs', d)
803}
diff --git a/meta/classes-recipe/kernel-grub.bbclass b/meta/classes-recipe/kernel-grub.bbclass
new file mode 100644
index 0000000000..2325e635e1
--- /dev/null
+++ b/meta/classes-recipe/kernel-grub.bbclass
@@ -0,0 +1,111 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7#
8# While installing a rpm to update kernel on a deployed target, it will update
9# the boot area and the boot menu with the kernel as the priority but allow
10# you to fall back to the original kernel as well.
11#
12# - In kernel-image's preinstall scriptlet, it backs up original kernel to avoid
13# probable confliction with the new one.
14#
15# - In kernel-image's postinstall scriptlet, it modifies grub's config file to
16# updates the new kernel as the boot priority.
17#
18
19python __anonymous () {
20 import re
21
22 preinst = '''
23 # Parsing confliction
24 [ -f "$D/boot/grub/menu.list" ] && grubcfg="$D/boot/grub/menu.list"
25 [ -f "$D/boot/grub/grub.cfg" ] && grubcfg="$D/boot/grub/grub.cfg"
26 if [ -n "$grubcfg" ]; then
27 # Dereference symlink to avoid confliction with new kernel name.
28 if grep -q "/KERNEL_IMAGETYPE \+root=" $grubcfg; then
29 if [ -L "$D/boot/KERNEL_IMAGETYPE" ]; then
30 kimage=`realpath $D/boot/KERNEL_IMAGETYPE 2>/dev/null`
31 if [ -f "$D$kimage" ]; then
32 sed -i "s:KERNEL_IMAGETYPE \+root=:${kimage##*/} root=:" $grubcfg
33 fi
34 fi
35 fi
36
37 # Rename old kernel if it conflicts with new kernel name.
38 if grep -q "/KERNEL_IMAGETYPE-${KERNEL_VERSION} \+root=" $grubcfg; then
39 if [ -f "$D/boot/KERNEL_IMAGETYPE-${KERNEL_VERSION}" ]; then
40 timestamp=`date +%s`
41 kimage="$D/boot/KERNEL_IMAGETYPE-${KERNEL_VERSION}-$timestamp-back"
42 sed -i "s:KERNEL_IMAGETYPE-${KERNEL_VERSION} \+root=:${kimage##*/} root=:" $grubcfg
43 mv "$D/boot/KERNEL_IMAGETYPE-${KERNEL_VERSION}" "$kimage"
44 fi
45 fi
46 fi
47'''
48
49 postinst = '''
50 get_new_grub_cfg() {
51 grubcfg="$1"
52 old_image="$2"
53 title="Update KERNEL_IMAGETYPE-${KERNEL_VERSION}-${PV}"
54 if [ "${grubcfg##*/}" = "grub.cfg" ]; then
55 rootfs=`grep " *linux \+[^ ]\+ \+root=" $grubcfg -m 1 | \
56 sed "s#${old_image}#${old_image%/*}/KERNEL_IMAGETYPE-${KERNEL_VERSION}#"`
57
58 echo "menuentry \"$title\" {"
59 echo " set root=(hd0,1)"
60 echo "$rootfs"
61 echo "}"
62 elif [ "${grubcfg##*/}" = "menu.list" ]; then
63 rootfs=`grep "kernel \+[^ ]\+ \+root=" $grubcfg -m 1 | \
64 sed "s#${old_image}#${old_image%/*}/KERNEL_IMAGETYPE-${KERNEL_VERSION}#"`
65
66 echo "default 0"
67 echo "timeout 30"
68 echo "title $title"
69 echo "root (hd0,0)"
70 echo "$rootfs"
71 fi
72 }
73
74 get_old_grub_cfg() {
75 grubcfg="$1"
76 if [ "${grubcfg##*/}" = "grub.cfg" ]; then
77 cat "$grubcfg"
78 elif [ "${grubcfg##*/}" = "menu.list" ]; then
79 sed -e '/^default/d' -e '/^timeout/d' "$grubcfg"
80 fi
81 }
82
83 if [ -f "$D/boot/grub/grub.cfg" ]; then
84 grubcfg="$D/boot/grub/grub.cfg"
85 old_image=`grep ' *linux \+[^ ]\+ \+root=' -m 1 "$grubcfg" | awk '{print $2}'`
86 elif [ -f "$D/boot/grub/menu.list" ]; then
87 grubcfg="$D/boot/grub/menu.list"
88 old_image=`grep '^kernel \+[^ ]\+ \+root=' -m 1 "$grubcfg" | awk '{print $2}'`
89 fi
90
91 # Don't update grubcfg at first install while old bzImage doesn't exist.
92 if [ -f "$D/boot/${old_image##*/}" ]; then
93 grubcfgtmp="$grubcfg.tmp"
94 get_new_grub_cfg "$grubcfg" "$old_image" > $grubcfgtmp
95 get_old_grub_cfg "$grubcfg" >> $grubcfgtmp
96 mv $grubcfgtmp $grubcfg
97 echo "Caution! Update kernel may affect kernel-module!"
98 fi
99'''
100
101 imagetypes = d.getVar('KERNEL_IMAGETYPES')
102 imagetypes = re.sub(r'\.gz$', '', imagetypes)
103
104 for type in imagetypes.split():
105 typelower = type.lower()
106 preinst_append = preinst.replace('KERNEL_IMAGETYPE', type)
107 postinst_prepend = postinst.replace('KERNEL_IMAGETYPE', type)
108 d.setVar('pkg_preinst:kernel-image-' + typelower + ':append', preinst_append)
109 d.setVar('pkg_postinst:kernel-image-' + typelower + ':prepend', postinst_prepend)
110}
111
diff --git a/meta/classes-recipe/kernel-module-split.bbclass b/meta/classes-recipe/kernel-module-split.bbclass
new file mode 100644
index 0000000000..1b4c864a63
--- /dev/null
+++ b/meta/classes-recipe/kernel-module-split.bbclass
@@ -0,0 +1,197 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7pkg_postinst:modules () {
8if [ -z "$D" ]; then
9 depmod -a ${KERNEL_VERSION}
10else
11 # image.bbclass will call depmodwrapper after everything is installed,
12 # no need to do it here as well
13 :
14fi
15}
16
17pkg_postrm:modules () {
18if [ -z "$D" ]; then
19 depmod -a ${KERNEL_VERSION}
20else
21 depmodwrapper -a -b $D ${KERNEL_VERSION}
22fi
23}
24
25autoload_postinst_fragment() {
26if [ x"$D" = "x" ]; then
27 modprobe %s || true
28fi
29}
30
31PACKAGE_WRITE_DEPS += "kmod-native depmodwrapper-cross"
32
33do_install:append() {
34 install -d ${D}${sysconfdir}/modules-load.d/ ${D}${sysconfdir}/modprobe.d/
35}
36
37KERNEL_SPLIT_MODULES ?= "1"
38PACKAGESPLITFUNCS:prepend = "split_kernel_module_packages "
39
40KERNEL_MODULES_META_PACKAGE ?= "${@ d.getVar("KERNEL_PACKAGE_NAME") or "kernel" }-modules"
41
42KERNEL_MODULE_PACKAGE_PREFIX ?= ""
43KERNEL_MODULE_PACKAGE_SUFFIX ?= "-${KERNEL_VERSION}"
44KERNEL_MODULE_PROVIDE_VIRTUAL ?= "1"
45
46python split_kernel_module_packages () {
47 import re
48
49 modinfoexp = re.compile("([^=]+)=(.*)")
50
51 def extract_modinfo(file):
52 import tempfile, subprocess
53 tempfile.tempdir = d.getVar("WORKDIR")
54 compressed = re.match( r'.*\.(gz|xz|zst)$', file)
55 tf = tempfile.mkstemp()
56 tmpfile = tf[1]
57 if compressed:
58 tmpkofile = tmpfile + ".ko"
59 if compressed.group(1) == 'gz':
60 cmd = "gunzip -dc %s > %s" % (file, tmpkofile)
61 subprocess.check_call(cmd, shell=True)
62 elif compressed.group(1) == 'xz':
63 cmd = "xz -dc %s > %s" % (file, tmpkofile)
64 subprocess.check_call(cmd, shell=True)
65 elif compressed.group(1) == 'zst':
66 cmd = "zstd -dc %s > %s" % (file, tmpkofile)
67 subprocess.check_call(cmd, shell=True)
68 else:
69 msg = "Cannot decompress '%s'" % file
70 raise msg
71 cmd = "%sobjcopy -j .modinfo -O binary %s %s" % (d.getVar("HOST_PREFIX") or "", tmpkofile, tmpfile)
72 else:
73 cmd = "%sobjcopy -j .modinfo -O binary %s %s" % (d.getVar("HOST_PREFIX") or "", file, tmpfile)
74 subprocess.check_call(cmd, shell=True)
75 # errors='replace': Some old kernel versions contain invalid utf-8 characters in mod descriptions (like 0xf6, 'ö')
76 f = open(tmpfile, errors='replace')
77 l = f.read().split("\000")
78 f.close()
79 os.close(tf[0])
80 os.unlink(tmpfile)
81 if compressed:
82 os.unlink(tmpkofile)
83 vals = {}
84 for i in l:
85 m = modinfoexp.match(i)
86 if not m:
87 continue
88 vals[m.group(1)] = m.group(2)
89 return vals
90
91 def frob_metadata(file, pkg, pattern, format, basename):
92 vals = extract_modinfo(file)
93
94 dvar = d.getVar('PKGD')
95
96 # If autoloading is requested, output /etc/modules-load.d/<name>.conf and append
97 # appropriate modprobe commands to the postinst
98 autoloadlist = (d.getVar("KERNEL_MODULE_AUTOLOAD") or "").split()
99 autoload = d.getVar('module_autoload_%s' % basename)
100 if autoload and autoload == basename:
101 bb.warn("module_autoload_%s was replaced by KERNEL_MODULE_AUTOLOAD for cases where basename == module name, please drop it" % basename)
102 if autoload and basename not in autoloadlist:
103 bb.warn("module_autoload_%s is defined but '%s' isn't included in KERNEL_MODULE_AUTOLOAD, please add it there" % (basename, basename))
104 if basename in autoloadlist:
105 name = '%s/etc/modules-load.d/%s.conf' % (dvar, basename)
106 f = open(name, 'w')
107 if autoload:
108 for m in autoload.split():
109 f.write('%s\n' % m)
110 else:
111 f.write('%s\n' % basename)
112 f.close()
113 postinst = d.getVar('pkg_postinst:%s' % pkg)
114 if not postinst:
115 bb.fatal("pkg_postinst:%s not defined" % pkg)
116 postinst += d.getVar('autoload_postinst_fragment') % (autoload or basename)
117 d.setVar('pkg_postinst:%s' % pkg, postinst)
118
119 # Write out any modconf fragment
120 modconflist = (d.getVar("KERNEL_MODULE_PROBECONF") or "").split()
121 modconf = d.getVar('module_conf_%s' % basename)
122 if modconf and basename in modconflist:
123 name = '%s/etc/modprobe.d/%s.conf' % (dvar, basename)
124 f = open(name, 'w')
125 f.write("%s\n" % modconf)
126 f.close()
127 elif modconf:
128 bb.error("Please ensure module %s is listed in KERNEL_MODULE_PROBECONF since module_conf_%s is set" % (basename, basename))
129
130 files = d.getVar('FILES:%s' % pkg)
131 files = "%s /etc/modules-load.d/%s.conf /etc/modprobe.d/%s.conf" % (files, basename, basename)
132 d.setVar('FILES:%s' % pkg, files)
133
134 conffiles = d.getVar('CONFFILES:%s' % pkg)
135 conffiles = "%s /etc/modules-load.d/%s.conf /etc/modprobe.d/%s.conf" % (conffiles, basename, basename)
136 d.setVar('CONFFILES:%s' % pkg, conffiles)
137
138 if "description" in vals:
139 old_desc = d.getVar('DESCRIPTION:' + pkg) or ""
140 d.setVar('DESCRIPTION:' + pkg, old_desc + "; " + vals["description"])
141
142 rdepends = bb.utils.explode_dep_versions2(d.getVar('RDEPENDS:' + pkg) or "")
143 modinfo_deps = []
144 if "depends" in vals and vals["depends"] != "":
145 for dep in vals["depends"].split(","):
146 on = legitimize_package_name(dep)
147 dependency_pkg = format % on
148 modinfo_deps.append(dependency_pkg)
149 for dep in modinfo_deps:
150 if not dep in rdepends:
151 rdepends[dep] = []
152 d.setVar('RDEPENDS:' + pkg, bb.utils.join_deps(rdepends, commasep=False))
153
154 # Avoid automatic -dev recommendations for modules ending with -dev.
155 d.setVarFlag('RRECOMMENDS:' + pkg, 'nodeprrecs', 1)
156
157 # Provide virtual package without postfix
158 providevirt = d.getVar('KERNEL_MODULE_PROVIDE_VIRTUAL')
159 if providevirt == "1":
160 postfix = format.split('%s')[1]
161 d.setVar('RPROVIDES:' + pkg, pkg.replace(postfix, ''))
162
163 kernel_package_name = d.getVar("KERNEL_PACKAGE_NAME") or "kernel"
164 kernel_version = d.getVar("KERNEL_VERSION")
165
166 metapkg = d.getVar('KERNEL_MODULES_META_PACKAGE')
167 splitmods = d.getVar('KERNEL_SPLIT_MODULES')
168 postinst = d.getVar('pkg_postinst:modules')
169 postrm = d.getVar('pkg_postrm:modules')
170
171 if splitmods != '1':
172 etcdir = d.getVar('sysconfdir')
173 d.appendVar('FILES:' + metapkg, '%s/modules-load.d/ %s/modprobe.d/ %s/modules/' % (etcdir, etcdir, d.getVar("nonarch_base_libdir")))
174 d.appendVar('pkg_postinst:%s' % metapkg, postinst)
175 d.prependVar('pkg_postrm:%s' % metapkg, postrm);
176 return
177
178 module_regex = r'^(.*)\.k?o(?:\.(gz|xz|zst))?$'
179
180 module_pattern_prefix = d.getVar('KERNEL_MODULE_PACKAGE_PREFIX')
181 module_pattern_suffix = d.getVar('KERNEL_MODULE_PACKAGE_SUFFIX')
182 module_pattern = module_pattern_prefix + kernel_package_name + '-module-%s' + module_pattern_suffix
183
184 modules = do_split_packages(d, root='${nonarch_base_libdir}/modules', file_regex=module_regex, output_pattern=module_pattern, description='%s kernel module', postinst=postinst, postrm=postrm, recursive=True, hook=frob_metadata, extra_depends='%s-%s' % (kernel_package_name, kernel_version))
185 if modules:
186 d.appendVar('RDEPENDS:' + metapkg, ' '+' '.join(modules))
187
188 # If modules-load.d and modprobe.d are empty at this point, remove them to
189 # avoid warnings. removedirs only raises an OSError if an empty
190 # directory cannot be removed.
191 dvar = d.getVar('PKGD')
192 for dir in ["%s/etc/modprobe.d" % (dvar), "%s/etc/modules-load.d" % (dvar), "%s/etc" % (dvar)]:
193 if len(os.listdir(dir)) == 0:
194 os.rmdir(dir)
195}
196
197do_package[vardeps] += '${@" ".join(map(lambda s: "module_conf_" + s, (d.getVar("KERNEL_MODULE_PROBECONF") or "").split()))}'
diff --git a/meta/classes-recipe/kernel-uboot.bbclass b/meta/classes-recipe/kernel-uboot.bbclass
new file mode 100644
index 0000000000..4aab02671e
--- /dev/null
+++ b/meta/classes-recipe/kernel-uboot.bbclass
@@ -0,0 +1,49 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# fitImage kernel compression algorithm
8FIT_KERNEL_COMP_ALG ?= "gzip"
9FIT_KERNEL_COMP_ALG_EXTENSION ?= ".gz"
10
11# Kernel image type passed to mkimage (i.e. kernel kernel_noload...)
12UBOOT_MKIMAGE_KERNEL_TYPE ?= "kernel"
13
14uboot_prep_kimage() {
15 if [ -e arch/${ARCH}/boot/compressed/vmlinux ]; then
16 vmlinux_path="arch/${ARCH}/boot/compressed/vmlinux"
17 linux_suffix=""
18 linux_comp="none"
19 elif [ -e arch/${ARCH}/boot/vmlinuz.bin ]; then
20 rm -f linux.bin
21 cp -l arch/${ARCH}/boot/vmlinuz.bin linux.bin
22 vmlinux_path=""
23 linux_suffix=""
24 linux_comp="none"
25 else
26 vmlinux_path="vmlinux"
27 # Use vmlinux.initramfs for linux.bin when INITRAMFS_IMAGE_BUNDLE set
28 # As per the implementation in kernel.bbclass.
29 # See do_bundle_initramfs function
30 if [ "${INITRAMFS_IMAGE_BUNDLE}" = "1" ] && [ -e vmlinux.initramfs ]; then
31 vmlinux_path="vmlinux.initramfs"
32 fi
33 linux_suffix="${FIT_KERNEL_COMP_ALG_EXTENSION}"
34 linux_comp="${FIT_KERNEL_COMP_ALG}"
35 fi
36
37 [ -n "${vmlinux_path}" ] && ${OBJCOPY} -O binary -R .note -R .comment -S "${vmlinux_path}" linux.bin
38
39 if [ "${linux_comp}" != "none" ] ; then
40 if [ "${linux_comp}" = "gzip" ] ; then
41 gzip -9 linux.bin
42 elif [ "${linux_comp}" = "lzo" ] ; then
43 lzop -9 linux.bin
44 fi
45 mv -f "linux.bin${linux_suffix}" linux.bin
46 fi
47
48 echo "${linux_comp}"
49}
diff --git a/meta/classes-recipe/kernel-uimage.bbclass b/meta/classes-recipe/kernel-uimage.bbclass
new file mode 100644
index 0000000000..1a599e656c
--- /dev/null
+++ b/meta/classes-recipe/kernel-uimage.bbclass
@@ -0,0 +1,41 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7inherit kernel-uboot
8
9python __anonymous () {
10 if "uImage" in d.getVar('KERNEL_IMAGETYPES'):
11 depends = d.getVar("DEPENDS")
12 depends = "%s u-boot-tools-native" % depends
13 d.setVar("DEPENDS", depends)
14
15 # Override KERNEL_IMAGETYPE_FOR_MAKE variable, which is internal
16 # to kernel.bbclass . We override the variable here, since we need
17 # to build uImage using the kernel build system if and only if
18 # KEEPUIMAGE == yes. Otherwise, we pack compressed vmlinux into
19 # the uImage .
20 if d.getVar("KEEPUIMAGE") != 'yes':
21 typeformake = d.getVar("KERNEL_IMAGETYPE_FOR_MAKE") or ""
22 if "uImage" in typeformake.split():
23 d.setVar('KERNEL_IMAGETYPE_FOR_MAKE', typeformake.replace('uImage', 'vmlinux'))
24
25 # Enable building of uImage with mkimage
26 bb.build.addtask('do_uboot_mkimage', 'do_install', 'do_kernel_link_images', d)
27}
28
29do_uboot_mkimage[dirs] += "${B}"
30do_uboot_mkimage() {
31 uboot_prep_kimage
32
33 ENTRYPOINT=${UBOOT_ENTRYPOINT}
34 if [ -n "${UBOOT_ENTRYSYMBOL}" ]; then
35 ENTRYPOINT=`${HOST_PREFIX}nm ${B}/vmlinux | \
36 awk '$3=="${UBOOT_ENTRYSYMBOL}" {print "0x"$1;exit}'`
37 fi
38
39 uboot-mkimage -A ${UBOOT_ARCH} -O linux -T ${UBOOT_MKIMAGE_KERNEL_TYPE} -C "${linux_comp}" -a ${UBOOT_LOADADDRESS} -e $ENTRYPOINT -n "${DISTRO_NAME}/${PV}/${MACHINE}" -d linux.bin ${B}/arch/${ARCH}/boot/uImage
40 rm -f linux.bin
41}
diff --git a/meta/classes-recipe/kernel-yocto.bbclass b/meta/classes-recipe/kernel-yocto.bbclass
new file mode 100644
index 0000000000..8eda0dcaf3
--- /dev/null
+++ b/meta/classes-recipe/kernel-yocto.bbclass
@@ -0,0 +1,732 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# remove tasks that modify the source tree in case externalsrc is inherited
8SRCTREECOVEREDTASKS += "do_validate_branches do_kernel_configcheck do_kernel_checkout do_fetch do_unpack do_patch"
9PATCH_GIT_USER_EMAIL ?= "kernel-yocto@oe"
10PATCH_GIT_USER_NAME ?= "OpenEmbedded"
11
12# The distro or local.conf should set this, but if nobody cares...
13LINUX_KERNEL_TYPE ??= "standard"
14
15# KMETA ?= ""
16KBRANCH ?= "master"
17KMACHINE ?= "${MACHINE}"
18SRCREV_FORMAT ?= "meta_machine"
19
20# LEVELS:
21# 0: no reporting
22# 1: report options that are specified, but not in the final config
23# 2: report options that are not hardware related, but set by a BSP
24KCONF_AUDIT_LEVEL ?= "1"
25KCONF_BSP_AUDIT_LEVEL ?= "0"
26KMETA_AUDIT ?= "yes"
27KMETA_AUDIT_WERROR ?= ""
28
29# returns local (absolute) path names for all valid patches in the
30# src_uri
31def find_patches(d,subdir):
32 patches = src_patches(d)
33 patch_list=[]
34 for p in patches:
35 _, _, local, _, _, parm = bb.fetch.decodeurl(p)
36 # if patchdir has been passed, we won't be able to apply it so skip
37 # the patch for now, and special processing happens later
38 patchdir = ''
39 if "patchdir" in parm:
40 patchdir = parm["patchdir"]
41 if subdir:
42 if subdir == patchdir:
43 patch_list.append(local)
44 else:
45 # skip the patch if a patchdir was supplied, it won't be handled
46 # properly
47 if not patchdir:
48 patch_list.append(local)
49
50 return patch_list
51
52# returns all the elements from the src uri that are .scc files
53def find_sccs(d):
54 sources=src_patches(d, True)
55 sources_list=[]
56 for s in sources:
57 base, ext = os.path.splitext(os.path.basename(s))
58 if ext and ext in [".scc", ".cfg"]:
59 sources_list.append(s)
60 elif base and 'defconfig' in base:
61 sources_list.append(s)
62
63 return sources_list
64
65# check the SRC_URI for "kmeta" type'd git repositories. Return the name of
66# the repository as it will be found in WORKDIR
67def find_kernel_feature_dirs(d):
68 feature_dirs=[]
69 fetch = bb.fetch2.Fetch([], d)
70 for url in fetch.urls:
71 urldata = fetch.ud[url]
72 parm = urldata.parm
73 type=""
74 if "type" in parm:
75 type = parm["type"]
76 if "destsuffix" in parm:
77 destdir = parm["destsuffix"]
78 if type == "kmeta":
79 feature_dirs.append(destdir)
80
81 return feature_dirs
82
83# find the master/machine source branch. In the same way that the fetcher proceses
84# git repositories in the SRC_URI we take the first repo found, first branch.
85def get_machine_branch(d, default):
86 fetch = bb.fetch2.Fetch([], d)
87 for url in fetch.urls:
88 urldata = fetch.ud[url]
89 parm = urldata.parm
90 if "branch" in parm:
91 branches = urldata.parm.get("branch").split(',')
92 btype = urldata.parm.get("type")
93 if btype != "kmeta":
94 return branches[0]
95
96 return default
97
98# returns a list of all directories that are on FILESEXTRAPATHS (and
99# hence available to the build) that contain .scc or .cfg files
100def get_dirs_with_fragments(d):
101 extrapaths = []
102 extrafiles = []
103 extrapathsvalue = (d.getVar("FILESEXTRAPATHS") or "")
104 # Remove default flag which was used for checking
105 extrapathsvalue = extrapathsvalue.replace("__default:", "")
106 extrapaths = extrapathsvalue.split(":")
107 for path in extrapaths:
108 if path + ":True" not in extrafiles:
109 extrafiles.append(path + ":" + str(os.path.exists(path)))
110
111 return " ".join(extrafiles)
112
113do_kernel_metadata() {
114 set +e
115
116 if [ -n "$1" ]; then
117 mode="$1"
118 else
119 mode="patch"
120 fi
121
122 cd ${S}
123 export KMETA=${KMETA}
124
125 bbnote "do_kernel_metadata: for summary/debug, set KCONF_AUDIT_LEVEL > 0"
126
127 # if kernel tools are available in-tree, they are preferred
128 # and are placed on the path before any external tools. Unless
129 # the external tools flag is set, in that case we do nothing.
130 if [ -f "${S}/scripts/util/configme" ]; then
131 if [ -z "${EXTERNAL_KERNEL_TOOLS}" ]; then
132 PATH=${S}/scripts/util:${PATH}
133 fi
134 fi
135
136 # In a similar manner to the kernel itself:
137 #
138 # defconfig: $(obj)/conf
139 # ifeq ($(KBUILD_DEFCONFIG),)
140 # $< --defconfig $(Kconfig)
141 # else
142 # @echo "*** Default configuration is based on '$(KBUILD_DEFCONFIG)'"
143 # $(Q)$< --defconfig=arch/$(SRCARCH)/configs/$(KBUILD_DEFCONFIG) $(Kconfig)
144 # endif
145 #
146 # If a defconfig is specified via the KBUILD_DEFCONFIG variable, we copy it
147 # from the source tree, into a common location and normalized "defconfig" name,
148 # where the rest of the process will include and incoroporate it into the build
149 #
150 # If the fetcher has already placed a defconfig in WORKDIR (from the SRC_URI),
151 # we don't overwrite it, but instead warn the user that SRC_URI defconfigs take
152 # precendence.
153 #
154 if [ -n "${KBUILD_DEFCONFIG}" ]; then
155 if [ -f "${S}/arch/${ARCH}/configs/${KBUILD_DEFCONFIG}" ]; then
156 if [ -f "${WORKDIR}/defconfig" ]; then
157 # If the two defconfig's are different, warn that we overwrote the
158 # one already placed in WORKDIR
159 cmp "${WORKDIR}/defconfig" "${S}/arch/${ARCH}/configs/${KBUILD_DEFCONFIG}"
160 if [ $? -ne 0 ]; then
161 bbdebug 1 "detected SRC_URI or unpatched defconfig in WORKDIR. ${KBUILD_DEFCONFIG} copied over it"
162 fi
163 cp -f ${S}/arch/${ARCH}/configs/${KBUILD_DEFCONFIG} ${WORKDIR}/defconfig
164 else
165 cp -f ${S}/arch/${ARCH}/configs/${KBUILD_DEFCONFIG} ${WORKDIR}/defconfig
166 fi
167 in_tree_defconfig="${WORKDIR}/defconfig"
168 else
169 bbfatal "A KBUILD_DEFCONFIG '${KBUILD_DEFCONFIG}' was specified, but not present in the source tree (${S}/arch/${ARCH}/configs/)"
170 fi
171 fi
172
173 if [ "$mode" = "patch" ]; then
174 # was anyone trying to patch the kernel meta data ?, we need to do
175 # this here, since the scc commands migrate the .cfg fragments to the
176 # kernel source tree, where they'll be used later.
177 check_git_config
178 patches="${@" ".join(find_patches(d,'kernel-meta'))}"
179 for p in $patches; do
180 (
181 cd ${WORKDIR}/kernel-meta
182 git am -s $p
183 )
184 done
185 fi
186
187 sccs_from_src_uri="${@" ".join(find_sccs(d))}"
188 patches="${@" ".join(find_patches(d,''))}"
189 feat_dirs="${@" ".join(find_kernel_feature_dirs(d))}"
190
191 # a quick check to make sure we don't have duplicate defconfigs If
192 # there's a defconfig in the SRC_URI, did we also have one from the
193 # KBUILD_DEFCONFIG processing above ?
194 src_uri_defconfig=$(echo $sccs_from_src_uri | awk '(match($0, "defconfig") != 0) { print $0 }' RS=' ')
195 # drop and defconfig's from the src_uri variable, we captured it just above here if it existed
196 sccs_from_src_uri=$(echo $sccs_from_src_uri | awk '(match($0, "defconfig") == 0) { print $0 }' RS=' ')
197
198 if [ -n "$in_tree_defconfig" ]; then
199 sccs_defconfig=$in_tree_defconfig
200 if [ -n "$src_uri_defconfig" ]; then
201 bbwarn "[NOTE]: defconfig was supplied both via KBUILD_DEFCONFIG and SRC_URI. Dropping SRC_URI entry $src_uri_defconfig"
202 fi
203 else
204 # if we didn't have an in-tree one, make our defconfig the one
205 # from the src_uri. Note: there may not have been one from the
206 # src_uri, so this can be an empty variable.
207 sccs_defconfig=$src_uri_defconfig
208 fi
209 sccs="$sccs_from_src_uri"
210
211 # check for feature directories/repos/branches that were part of the
212 # SRC_URI. If they were supplied, we convert them into include directives
213 # for the update part of the process
214 for f in ${feat_dirs}; do
215 if [ -d "${WORKDIR}/$f/meta" ]; then
216 includes="$includes -I${WORKDIR}/$f/kernel-meta"
217 elif [ -d "${WORKDIR}/../oe-local-files/$f" ]; then
218 includes="$includes -I${WORKDIR}/../oe-local-files/$f"
219 elif [ -d "${WORKDIR}/$f" ]; then
220 includes="$includes -I${WORKDIR}/$f"
221 fi
222 done
223 for s in ${sccs} ${patches}; do
224 sdir=$(dirname $s)
225 includes="$includes -I${sdir}"
226 # if a SRC_URI passed patch or .scc has a subdir of "kernel-meta",
227 # then we add it to the search path
228 if [ -d "${sdir}/kernel-meta" ]; then
229 includes="$includes -I${sdir}/kernel-meta"
230 fi
231 done
232
233 # expand kernel features into their full path equivalents
234 bsp_definition=$(spp ${includes} --find -DKMACHINE=${KMACHINE} -DKTYPE=${LINUX_KERNEL_TYPE})
235 if [ -z "$bsp_definition" ]; then
236 if [ -z "$sccs_defconfig" ]; then
237 bbfatal_log "Could not locate BSP definition for ${KMACHINE}/${LINUX_KERNEL_TYPE} and no defconfig was provided"
238 fi
239 else
240 # if the bsp definition has "define KMETA_EXTERNAL_BSP t",
241 # then we need to set a flag that will instruct the next
242 # steps to use the BSP as both configuration and patches.
243 grep -q KMETA_EXTERNAL_BSP $bsp_definition
244 if [ $? -eq 0 ]; then
245 KMETA_EXTERNAL_BSPS="t"
246 fi
247 fi
248 meta_dir=$(kgit --meta)
249
250 KERNEL_FEATURES_FINAL=""
251 if [ -n "${KERNEL_FEATURES}" ]; then
252 for feature in ${KERNEL_FEATURES}; do
253 feature_found=f
254 for d in $includes; do
255 path_to_check=$(echo $d | sed 's/^-I//')
256 if [ "$feature_found" = "f" ] && [ -e "$path_to_check/$feature" ]; then
257 feature_found=t
258 fi
259 done
260 if [ "$feature_found" = "f" ]; then
261 if [ -n "${KERNEL_DANGLING_FEATURES_WARN_ONLY}" ]; then
262 bbwarn "Feature '$feature' not found, but KERNEL_DANGLING_FEATURES_WARN_ONLY is set"
263 bbwarn "This may cause runtime issues, dropping feature and allowing configuration to continue"
264 else
265 bberror "Feature '$feature' not found, this will cause configuration failures."
266 bberror "Check the SRC_URI for meta-data repositories or directories that may be missing"
267 bbfatal_log "Set KERNEL_DANGLING_FEATURES_WARN_ONLY to ignore this issue"
268 fi
269 else
270 KERNEL_FEATURES_FINAL="$KERNEL_FEATURES_FINAL $feature"
271 fi
272 done
273 fi
274
275 if [ "$mode" = "config" ]; then
276 # run1: pull all the configuration fragments, no matter where they come from
277 elements="`echo -n ${bsp_definition} $sccs_defconfig ${sccs} ${patches} $KERNEL_FEATURES_FINAL`"
278 if [ -n "${elements}" ]; then
279 echo "${bsp_definition}" > ${S}/${meta_dir}/bsp_definition
280 scc --force -o ${S}/${meta_dir}:cfg,merge,meta ${includes} $sccs_defconfig $bsp_definition $sccs $patches $KERNEL_FEATURES_FINAL
281 if [ $? -ne 0 ]; then
282 bbfatal_log "Could not generate configuration queue for ${KMACHINE}."
283 fi
284 fi
285 fi
286
287 # if KMETA_EXTERNAL_BSPS has been set, or it has been detected from
288 # the bsp definition, then we inject the bsp_definition into the
289 # patch phase below. we'll piggy back on the sccs variable.
290 if [ -n "${KMETA_EXTERNAL_BSPS}" ]; then
291 sccs="${bsp_definition} ${sccs}"
292 fi
293
294 if [ "$mode" = "patch" ]; then
295 # run2: only generate patches for elements that have been passed on the SRC_URI
296 elements="`echo -n ${sccs} ${patches} $KERNEL_FEATURES_FINAL`"
297 if [ -n "${elements}" ]; then
298 scc --force -o ${S}/${meta_dir}:patch --cmds patch ${includes} ${sccs} ${patches} $KERNEL_FEATURES_FINAL
299 if [ $? -ne 0 ]; then
300 bbfatal_log "Could not generate configuration queue for ${KMACHINE}."
301 fi
302 fi
303 fi
304
305 if [ ${KCONF_AUDIT_LEVEL} -gt 0 ]; then
306 bbnote "kernel meta data summary for ${KMACHINE} (${LINUX_KERNEL_TYPE}):"
307 bbnote "======================================================================"
308 if [ -n "${KMETA_EXTERNAL_BSPS}" ]; then
309 bbnote "Non kernel-cache (external) bsp"
310 fi
311 bbnote "BSP entry point / definition: $bsp_definition"
312 if [ -n "$in_tree_defconfig" ]; then
313 bbnote "KBUILD_DEFCONFIG: ${KBUILD_DEFCONFIG}"
314 fi
315 bbnote "Fragments from SRC_URI: $sccs_from_src_uri"
316 bbnote "KERNEL_FEATURES: $KERNEL_FEATURES_FINAL"
317 bbnote "Final scc/cfg list: $sccs_defconfig $bsp_definition $sccs $KERNEL_FEATURES_FINAL"
318 fi
319
320 set -e
321}
322
323do_patch() {
324 set +e
325 cd ${S}
326
327 check_git_config
328 meta_dir=$(kgit --meta)
329 (cd ${meta_dir}; ln -sf patch.queue series)
330 if [ -f "${meta_dir}/series" ]; then
331 kgit_extra_args=""
332 if [ "${KERNEL_DEBUG_TIMESTAMPS}" != "1" ]; then
333 kgit_extra_args="--commit-sha author"
334 fi
335 kgit-s2q --gen -v $kgit_extra_args --patches .kernel-meta/
336 if [ $? -ne 0 ]; then
337 bberror "Could not apply patches for ${KMACHINE}."
338 bbfatal_log "Patch failures can be resolved in the linux source directory ${S})"
339 fi
340 fi
341
342 if [ -f "${meta_dir}/merge.queue" ]; then
343 # we need to merge all these branches
344 for b in $(cat ${meta_dir}/merge.queue); do
345 git show-ref --verify --quiet refs/heads/${b}
346 if [ $? -eq 0 ]; then
347 bbnote "Merging branch ${b}"
348 git merge -q --no-ff -m "Merge branch ${b}" ${b}
349 else
350 bbfatal "branch ${b} does not exist, cannot merge"
351 fi
352 done
353 fi
354
355 set -e
356}
357
358do_kernel_checkout() {
359 set +e
360
361 source_dir=`echo ${S} | sed 's%/$%%'`
362 source_workdir="${WORKDIR}/git"
363 if [ -d "${WORKDIR}/git/" ]; then
364 # case: git repository
365 # if S is WORKDIR/git, then we shouldn't be moving or deleting the tree.
366 if [ "${source_dir}" != "${source_workdir}" ]; then
367 if [ -d "${source_workdir}/.git" ]; then
368 # regular git repository with .git
369 rm -rf ${S}
370 mv ${WORKDIR}/git ${S}
371 else
372 # create source for bare cloned git repository
373 git clone ${WORKDIR}/git ${S}
374 rm -rf ${WORKDIR}/git
375 fi
376 fi
377 cd ${S}
378
379 # convert any remote branches to local tracking ones
380 for i in `git branch -a --no-color | grep remotes | grep -v HEAD`; do
381 b=`echo $i | cut -d' ' -f2 | sed 's%remotes/origin/%%'`;
382 git show-ref --quiet --verify -- "refs/heads/$b"
383 if [ $? -ne 0 ]; then
384 git branch $b $i > /dev/null
385 fi
386 done
387
388 # Create a working tree copy of the kernel by checking out a branch
389 machine_branch="${@ get_machine_branch(d, "${KBRANCH}" )}"
390
391 # checkout and clobber any unimportant files
392 git checkout -f ${machine_branch}
393 else
394 # case: we have no git repository at all.
395 # To support low bandwidth options for building the kernel, we'll just
396 # convert the tree to a git repo and let the rest of the process work unchanged
397
398 # if ${S} hasn't been set to the proper subdirectory a default of "linux" is
399 # used, but we can't initialize that empty directory. So check it and throw a
400 # clear error
401
402 cd ${S}
403 if [ ! -f "Makefile" ]; then
404 bberror "S is not set to the linux source directory. Check "
405 bbfatal "the recipe and set S to the proper extracted subdirectory"
406 fi
407 rm -f .gitignore
408 git init
409 check_git_config
410 git add .
411 git commit -q -m "baseline commit: creating repo for ${PN}-${PV}"
412 git clean -d -f
413 fi
414
415 set -e
416}
417do_kernel_checkout[dirs] = "${S} ${WORKDIR}"
418
419addtask kernel_checkout before do_kernel_metadata after do_symlink_kernsrc
420addtask kernel_metadata after do_validate_branches do_unpack before do_patch
421do_kernel_metadata[depends] = "kern-tools-native:do_populate_sysroot"
422do_kernel_metadata[file-checksums] = " ${@get_dirs_with_fragments(d)}"
423do_validate_branches[depends] = "kern-tools-native:do_populate_sysroot"
424
425do_kernel_configme[depends] += "virtual/${TARGET_PREFIX}binutils:do_populate_sysroot"
426do_kernel_configme[depends] += "virtual/${TARGET_PREFIX}gcc:do_populate_sysroot"
427do_kernel_configme[depends] += "bc-native:do_populate_sysroot bison-native:do_populate_sysroot"
428do_kernel_configme[depends] += "kern-tools-native:do_populate_sysroot"
429do_kernel_configme[dirs] += "${S} ${B}"
430do_kernel_configme() {
431 do_kernel_metadata config
432
433 # translate the kconfig_mode into something that merge_config.sh
434 # understands
435 case ${KCONFIG_MODE} in
436 *allnoconfig)
437 config_flags="-n"
438 ;;
439 *alldefconfig)
440 config_flags=""
441 ;;
442 *)
443 if [ -f ${WORKDIR}/defconfig ]; then
444 config_flags="-n"
445 fi
446 ;;
447 esac
448
449 cd ${S}
450
451 meta_dir=$(kgit --meta)
452 configs="$(scc --configs -o ${meta_dir})"
453 if [ $? -ne 0 ]; then
454 bberror "${configs}"
455 bbfatal_log "Could not find configuration queue (${meta_dir}/config.queue)"
456 fi
457
458 CFLAGS="${CFLAGS} ${TOOLCHAIN_OPTIONS}" HOSTCC="${BUILD_CC} ${BUILD_CFLAGS} ${BUILD_LDFLAGS}" HOSTCPP="${BUILD_CPP}" CC="${KERNEL_CC}" LD="${KERNEL_LD}" ARCH=${ARCH} merge_config.sh -O ${B} ${config_flags} ${configs} > ${meta_dir}/cfg/merge_config_build.log 2>&1
459 if [ $? -ne 0 -o ! -f ${B}/.config ]; then
460 bberror "Could not generate a .config for ${KMACHINE}-${LINUX_KERNEL_TYPE}"
461 if [ ${KCONF_AUDIT_LEVEL} -gt 1 ]; then
462 bbfatal_log "`cat ${meta_dir}/cfg/merge_config_build.log`"
463 else
464 bbfatal_log "Details can be found at: ${S}/${meta_dir}/cfg/merge_config_build.log"
465 fi
466 fi
467
468 if [ ! -z "${LINUX_VERSION_EXTENSION}" ]; then
469 echo "# Global settings from linux recipe" >> ${B}/.config
470 echo "CONFIG_LOCALVERSION="\"${LINUX_VERSION_EXTENSION}\" >> ${B}/.config
471 fi
472}
473
474addtask kernel_configme before do_configure after do_patch
475addtask config_analysis
476
477do_config_analysis[depends] = "virtual/kernel:do_configure"
478do_config_analysis[depends] += "kern-tools-native:do_populate_sysroot"
479
480CONFIG_AUDIT_FILE ?= "${WORKDIR}/config-audit.txt"
481CONFIG_ANALYSIS_FILE ?= "${WORKDIR}/config-analysis.txt"
482
483python do_config_analysis() {
484 import re, string, sys, subprocess
485
486 s = d.getVar('S')
487
488 env = os.environ.copy()
489 env['PATH'] = "%s:%s%s" % (d.getVar('PATH'), s, "/scripts/util/")
490 env['LD'] = d.getVar('KERNEL_LD')
491 env['CC'] = d.getVar('KERNEL_CC')
492 env['ARCH'] = d.getVar('ARCH')
493 env['srctree'] = s
494
495 # read specific symbols from the kernel recipe or from local.conf
496 # i.e.: CONFIG_ANALYSIS:pn-linux-yocto-dev = 'NF_CONNTRACK LOCALVERSION'
497 config = d.getVar( 'CONFIG_ANALYSIS' )
498 if not config:
499 config = [ "" ]
500 else:
501 config = config.split()
502
503 for c in config:
504 for action in ["analysis","audit"]:
505 if action == "analysis":
506 try:
507 analysis = subprocess.check_output(['symbol_why.py', '--dotconfig', '{}'.format( d.getVar('B') + '/.config' ), '--blame', c], cwd=s, env=env ).decode('utf-8')
508 except subprocess.CalledProcessError as e:
509 bb.fatal( "config analysis failed: %s" % e.output.decode('utf-8'))
510
511 outfile = d.getVar( 'CONFIG_ANALYSIS_FILE' )
512
513 if action == "audit":
514 try:
515 analysis = subprocess.check_output(['symbol_why.py', '--dotconfig', '{}'.format( d.getVar('B') + '/.config' ), '--summary', '--extended', '--sanity', c], cwd=s, env=env ).decode('utf-8')
516 except subprocess.CalledProcessError as e:
517 bb.fatal( "config analysis failed: %s" % e.output.decode('utf-8'))
518
519 outfile = d.getVar( 'CONFIG_AUDIT_FILE' )
520
521 if c:
522 outdir = os.path.dirname( outfile )
523 outname = os.path.basename( outfile )
524 outfile = outdir + '/'+ c + '-' + outname
525
526 if config and os.path.isfile(outfile):
527 os.remove(outfile)
528
529 with open(outfile, 'w+') as f:
530 f.write( analysis )
531
532 bb.warn( "Configuration {} executed, see: {} for details".format(action,outfile ))
533 if c:
534 bb.warn( analysis )
535}
536
537python do_kernel_configcheck() {
538 import re, string, sys, subprocess
539
540 s = d.getVar('S')
541
542 # if KMETA isn't set globally by a recipe using this routine, use kgit to
543 # locate or create the meta directory. Otherwise, kconf_check is not
544 # passed a valid meta-series for processing
545 kmeta = d.getVar("KMETA")
546 if not kmeta or not os.path.exists('{}/{}'.format(s,kmeta)):
547 kmeta = subprocess.check_output(['kgit', '--meta'], cwd=d.getVar('S')).decode('utf-8').rstrip()
548
549 env = os.environ.copy()
550 env['PATH'] = "%s:%s%s" % (d.getVar('PATH'), s, "/scripts/util/")
551 env['LD'] = d.getVar('KERNEL_LD')
552 env['CC'] = d.getVar('KERNEL_CC')
553 env['ARCH'] = d.getVar('ARCH')
554 env['srctree'] = s
555
556 try:
557 configs = subprocess.check_output(['scc', '--configs', '-o', s + '/.kernel-meta'], env=env).decode('utf-8')
558 except subprocess.CalledProcessError as e:
559 bb.fatal( "Cannot gather config fragments for audit: %s" % e.output.decode("utf-8") )
560
561 config_check_visibility = int(d.getVar("KCONF_AUDIT_LEVEL") or 0)
562 bsp_check_visibility = int(d.getVar("KCONF_BSP_AUDIT_LEVEL") or 0)
563 kmeta_audit_werror = d.getVar("KMETA_AUDIT_WERROR") or ""
564 warnings_detected = False
565
566 # if config check visibility is "1", that's the lowest level of audit. So
567 # we add the --classify option to the run, since classification will
568 # streamline the output to only report options that could be boot issues,
569 # or are otherwise required for proper operation.
570 extra_params = ""
571 if config_check_visibility == 1:
572 extra_params = "--classify"
573
574 # category #1: mismatches
575 try:
576 analysis = subprocess.check_output(['symbol_why.py', '--dotconfig', '{}'.format( d.getVar('B') + '/.config' ), '--mismatches', extra_params], cwd=s, env=env ).decode('utf-8')
577 except subprocess.CalledProcessError as e:
578 bb.fatal( "config analysis failed: %s" % e.output.decode('utf-8'))
579
580 if analysis:
581 outfile = "{}/{}/cfg/mismatch.txt".format( s, kmeta )
582 if os.path.isfile(outfile):
583 os.remove(outfile)
584 with open(outfile, 'w+') as f:
585 f.write( analysis )
586
587 if config_check_visibility and os.stat(outfile).st_size > 0:
588 with open (outfile, "r") as myfile:
589 results = myfile.read()
590 bb.warn( "[kernel config]: specified values did not make it into the kernel's final configuration:\n\n%s" % results)
591 warnings_detected = True
592
593 # category #2: invalid fragment elements
594 extra_params = ""
595 if bsp_check_visibility > 1:
596 extra_params = "--strict"
597 try:
598 analysis = subprocess.check_output(['symbol_why.py', '--dotconfig', '{}'.format( d.getVar('B') + '/.config' ), '--invalid', extra_params], cwd=s, env=env ).decode('utf-8')
599 except subprocess.CalledProcessError as e:
600 bb.fatal( "config analysis failed: %s" % e.output.decode('utf-8'))
601
602 if analysis:
603 outfile = "{}/{}/cfg/invalid.txt".format(s,kmeta)
604 if os.path.isfile(outfile):
605 os.remove(outfile)
606 with open(outfile, 'w+') as f:
607 f.write( analysis )
608
609 if bsp_check_visibility and os.stat(outfile).st_size > 0:
610 with open (outfile, "r") as myfile:
611 results = myfile.read()
612 bb.warn( "[kernel config]: This BSP contains fragments with warnings:\n\n%s" % results)
613 warnings_detected = True
614
615 # category #3: redefined options (this is pretty verbose and is debug only)
616 try:
617 analysis = subprocess.check_output(['symbol_why.py', '--dotconfig', '{}'.format( d.getVar('B') + '/.config' ), '--sanity'], cwd=s, env=env ).decode('utf-8')
618 except subprocess.CalledProcessError as e:
619 bb.fatal( "config analysis failed: %s" % e.output.decode('utf-8'))
620
621 if analysis:
622 outfile = "{}/{}/cfg/redefinition.txt".format(s,kmeta)
623 if os.path.isfile(outfile):
624 os.remove(outfile)
625 with open(outfile, 'w+') as f:
626 f.write( analysis )
627
628 # if the audit level is greater than two, we report if a fragment has overriden
629 # a value from a base fragment. This is really only used for new kernel introduction
630 if bsp_check_visibility > 2 and os.stat(outfile).st_size > 0:
631 with open (outfile, "r") as myfile:
632 results = myfile.read()
633 bb.warn( "[kernel config]: This BSP has configuration options defined in more than one config, with differing values:\n\n%s" % results)
634 warnings_detected = True
635
636 if warnings_detected and kmeta_audit_werror:
637 bb.fatal( "configuration warnings detected, werror is set, promoting to fatal" )
638}
639
640# Ensure that the branches (BSP and meta) are on the locations specified by
641# their SRCREV values. If they are NOT on the right commits, the branches
642# are corrected to the proper commit.
643do_validate_branches() {
644 set +e
645 cd ${S}
646
647 machine_branch="${@ get_machine_branch(d, "${KBRANCH}" )}"
648 machine_srcrev="${SRCREV_machine}"
649
650 # if SRCREV is AUTOREV it shows up as AUTOINC there's nothing to
651 # check and we can exit early
652 if [ "${machine_srcrev}" = "AUTOINC" ]; then
653 linux_yocto_dev='${@oe.utils.conditional("PREFERRED_PROVIDER_virtual/kernel", "linux-yocto-dev", "1", "", d)}'
654 if [ -n "$linux_yocto_dev" ]; then
655 git checkout -q -f ${machine_branch}
656 ver=$(grep "^VERSION =" ${S}/Makefile | sed s/.*=\ *//)
657 patchlevel=$(grep "^PATCHLEVEL =" ${S}/Makefile | sed s/.*=\ *//)
658 sublevel=$(grep "^SUBLEVEL =" ${S}/Makefile | sed s/.*=\ *//)
659 kver="$ver.$patchlevel"
660 bbnote "dev kernel: performing version -> branch -> SRCREV validation"
661 bbnote "dev kernel: recipe version ${LINUX_VERSION}, src version: $kver"
662 echo "${LINUX_VERSION}" | grep -q $kver
663 if [ $? -ne 0 ]; then
664 version="$(echo ${LINUX_VERSION} | sed 's/\+.*$//g')"
665 versioned_branch="v$version/$machine_branch"
666
667 machine_branch=$versioned_branch
668 force_srcrev="$(git rev-parse $machine_branch 2> /dev/null)"
669 if [ $? -ne 0 ]; then
670 bbfatal "kernel version mismatch detected, and no valid branch $machine_branch detected"
671 fi
672
673 bbnote "dev kernel: adjusting branch to $machine_branch, srcrev to: $force_srcrev"
674 fi
675 else
676 bbnote "SRCREV validation is not required for AUTOREV"
677 fi
678 elif [ "${machine_srcrev}" = "" ]; then
679 if [ "${SRCREV}" != "AUTOINC" ] && [ "${SRCREV}" != "INVALID" ]; then
680 # SRCREV_machine_<MACHINE> was not set. This means that a custom recipe
681 # that doesn't use the SRCREV_FORMAT "machine_meta" is being built. In
682 # this case, we need to reset to the give SRCREV before heading to patching
683 bbnote "custom recipe is being built, forcing SRCREV to ${SRCREV}"
684 force_srcrev="${SRCREV}"
685 fi
686 else
687 git cat-file -t ${machine_srcrev} > /dev/null
688 if [ $? -ne 0 ]; then
689 bberror "${machine_srcrev} is not a valid commit ID."
690 bbfatal_log "The kernel source tree may be out of sync"
691 fi
692 force_srcrev=${machine_srcrev}
693 fi
694
695 git checkout -q -f ${machine_branch}
696 if [ -n "${force_srcrev}" ]; then
697 # see if the branch we are about to patch has been properly reset to the defined
698 # SRCREV .. if not, we reset it.
699 branch_head=`git rev-parse HEAD`
700 if [ "${force_srcrev}" != "${branch_head}" ]; then
701 current_branch=`git rev-parse --abbrev-ref HEAD`
702 git branch "$current_branch-orig"
703 git reset --hard ${force_srcrev}
704 # We've checked out HEAD, make sure we cleanup kgit-s2q fence post check
705 # so the patches are applied as expected otherwise no patching
706 # would be done in some corner cases.
707 kgit-s2q --clean
708 fi
709 fi
710
711 set -e
712}
713
714OE_TERMINAL_EXPORTS += "KBUILD_OUTPUT"
715KBUILD_OUTPUT = "${B}"
716
717python () {
718 # If diffconfig is available, ensure it runs after kernel_configme
719 if 'do_diffconfig' in d:
720 bb.build.addtask('do_diffconfig', None, 'do_kernel_configme', d)
721
722 externalsrc = d.getVar('EXTERNALSRC')
723 if externalsrc:
724 # If we deltask do_patch, do_kernel_configme is left without
725 # dependencies and runs too early
726 d.setVarFlag('do_kernel_configme', 'deps', (d.getVarFlag('do_kernel_configme', 'deps', False) or []) + ['do_unpack'])
727}
728
729# extra tasks
730addtask kernel_version_sanity_check after do_kernel_metadata do_kernel_checkout before do_compile
731addtask validate_branches before do_patch after do_kernel_checkout
732addtask kernel_configcheck after do_configure before do_compile
diff --git a/meta/classes-recipe/kernel.bbclass b/meta/classes-recipe/kernel.bbclass
new file mode 100644
index 0000000000..3463179395
--- /dev/null
+++ b/meta/classes-recipe/kernel.bbclass
@@ -0,0 +1,821 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7inherit linux-kernel-base kernel-module-split
8
9COMPATIBLE_HOST = ".*-linux"
10
11KERNEL_PACKAGE_NAME ??= "kernel"
12KERNEL_DEPLOYSUBDIR ??= "${@ "" if (d.getVar("KERNEL_PACKAGE_NAME") == "kernel") else d.getVar("KERNEL_PACKAGE_NAME") }"
13
14PROVIDES += "virtual/kernel"
15DEPENDS += "virtual/${TARGET_PREFIX}binutils virtual/${TARGET_PREFIX}gcc kmod-native bc-native bison-native"
16DEPENDS += "${@bb.utils.contains("INITRAMFS_FSTYPES", "cpio.lzo", "lzop-native", "", d)}"
17DEPENDS += "${@bb.utils.contains("INITRAMFS_FSTYPES", "cpio.lz4", "lz4-native", "", d)}"
18DEPENDS += "${@bb.utils.contains("INITRAMFS_FSTYPES", "cpio.zst", "zstd-native", "", d)}"
19PACKAGE_WRITE_DEPS += "depmodwrapper-cross"
20
21do_deploy[depends] += "depmodwrapper-cross:do_populate_sysroot gzip-native:do_populate_sysroot"
22do_clean[depends] += "make-mod-scripts:do_clean"
23
24CVE_PRODUCT ?= "linux_kernel"
25
26S = "${STAGING_KERNEL_DIR}"
27B = "${WORKDIR}/build"
28KBUILD_OUTPUT = "${B}"
29OE_TERMINAL_EXPORTS += "KBUILD_OUTPUT"
30
31# we include gcc above, we dont need virtual/libc
32INHIBIT_DEFAULT_DEPS = "1"
33
34KERNEL_IMAGETYPE ?= "zImage"
35INITRAMFS_IMAGE ?= ""
36INITRAMFS_IMAGE_NAME ?= "${@['${INITRAMFS_IMAGE}-${MACHINE}', ''][d.getVar('INITRAMFS_IMAGE') == '']}"
37INITRAMFS_TASK ?= ""
38INITRAMFS_IMAGE_BUNDLE ?= ""
39INITRAMFS_DEPLOY_DIR_IMAGE ?= "${DEPLOY_DIR_IMAGE}"
40INITRAMFS_MULTICONFIG ?= ""
41
42# KERNEL_VERSION is extracted from source code. It is evaluated as
43# None for the first parsing, since the code has not been fetched.
44# After the code is fetched, it will be evaluated as real version
45# number and cause kernel to be rebuilt. To avoid this, make
46# KERNEL_VERSION_NAME and KERNEL_VERSION_PKG_NAME depend on
47# LINUX_VERSION which is a constant.
48KERNEL_VERSION_NAME = "${@d.getVar('KERNEL_VERSION') or ""}"
49KERNEL_VERSION_NAME[vardepvalue] = "${LINUX_VERSION}"
50KERNEL_VERSION_PKG_NAME = "${@legitimize_package_name(d.getVar('KERNEL_VERSION'))}"
51KERNEL_VERSION_PKG_NAME[vardepvalue] = "${LINUX_VERSION}"
52
53python __anonymous () {
54 pn = d.getVar("PN")
55 kpn = d.getVar("KERNEL_PACKAGE_NAME")
56
57 # XXX Remove this after bug 11905 is resolved
58 # FILES:${KERNEL_PACKAGE_NAME}-dev doesn't expand correctly
59 if kpn == pn:
60 bb.warn("Some packages (E.g. *-dev) might be missing due to "
61 "bug 11905 (variable KERNEL_PACKAGE_NAME == PN)")
62
63 # The default kernel recipe builds in a shared location defined by
64 # bitbake/distro confs: STAGING_KERNEL_DIR and STAGING_KERNEL_BUILDDIR.
65 # Set these variables to directories under ${WORKDIR} in alternate
66 # kernel recipes (I.e. where KERNEL_PACKAGE_NAME != kernel) so that they
67 # may build in parallel with the default kernel without clobbering.
68 if kpn != "kernel":
69 workdir = d.getVar("WORKDIR")
70 sourceDir = os.path.join(workdir, 'kernel-source')
71 artifactsDir = os.path.join(workdir, 'kernel-build-artifacts')
72 d.setVar("STAGING_KERNEL_DIR", sourceDir)
73 d.setVar("STAGING_KERNEL_BUILDDIR", artifactsDir)
74
75 # Merge KERNEL_IMAGETYPE and KERNEL_ALT_IMAGETYPE into KERNEL_IMAGETYPES
76 type = d.getVar('KERNEL_IMAGETYPE') or ""
77 alttype = d.getVar('KERNEL_ALT_IMAGETYPE') or ""
78 types = d.getVar('KERNEL_IMAGETYPES') or ""
79 if type not in types.split():
80 types = (type + ' ' + types).strip()
81 if alttype not in types.split():
82 types = (alttype + ' ' + types).strip()
83 d.setVar('KERNEL_IMAGETYPES', types)
84
85 # KERNEL_IMAGETYPES may contain a mixture of image types supported directly
86 # by the kernel build system and types which are created by post-processing
87 # the output of the kernel build system (e.g. compressing vmlinux ->
88 # vmlinux.gz in kernel_do_transform_kernel()).
89 # KERNEL_IMAGETYPE_FOR_MAKE should contain only image types supported
90 # directly by the kernel build system.
91 if not d.getVar('KERNEL_IMAGETYPE_FOR_MAKE'):
92 typeformake = set()
93 for type in types.split():
94 if type == 'vmlinux.gz':
95 type = 'vmlinux'
96 typeformake.add(type)
97
98 d.setVar('KERNEL_IMAGETYPE_FOR_MAKE', ' '.join(sorted(typeformake)))
99
100 kname = d.getVar('KERNEL_PACKAGE_NAME') or "kernel"
101 imagedest = d.getVar('KERNEL_IMAGEDEST')
102
103 for type in types.split():
104 if bb.data.inherits_class('nopackages', d):
105 continue
106 typelower = type.lower()
107 d.appendVar('PACKAGES', ' %s-image-%s' % (kname, typelower))
108 d.setVar('FILES:' + kname + '-image-' + typelower, '/' + imagedest + '/' + type + '-${KERNEL_VERSION_NAME}' + ' /' + imagedest + '/' + type)
109 d.appendVar('RDEPENDS:%s-image' % kname, ' %s-image-%s (= ${EXTENDPKGV})' % (kname, typelower))
110 splitmods = d.getVar("KERNEL_SPLIT_MODULES")
111 if splitmods != '1':
112 d.appendVar('RDEPENDS:%s-image' % kname, ' %s-modules (= ${EXTENDPKGV})' % kname)
113 d.appendVar('RDEPENDS:%s-image-%s' % (kname, typelower), ' %s-modules-${KERNEL_VERSION_PKG_NAME} (= ${EXTENDPKGV})' % kname)
114 d.setVar('PKG:%s-modules' % kname, '%s-modules-${KERNEL_VERSION_PKG_NAME}' % kname)
115 d.appendVar('RPROVIDES:%s-modules' % kname, '%s-modules-${KERNEL_VERSION_PKG_NAME}' % kname)
116
117 d.setVar('PKG:%s-image-%s' % (kname,typelower), '%s-image-%s-${KERNEL_VERSION_PKG_NAME}' % (kname, typelower))
118 d.setVar('ALLOW_EMPTY:%s-image-%s' % (kname, typelower), '1')
119 d.prependVar('pkg_postinst:%s-image-%s' % (kname,typelower), """set +e
120if [ -n "$D" ]; then
121 ln -sf %s-${KERNEL_VERSION} $D/${KERNEL_IMAGEDEST}/%s > /dev/null 2>&1
122else
123 ln -sf %s-${KERNEL_VERSION} ${KERNEL_IMAGEDEST}/%s > /dev/null 2>&1
124 if [ $? -ne 0 ]; then
125 echo "Filesystem on ${KERNEL_IMAGEDEST}/ doesn't support symlinks, falling back to copied image (%s)."
126 install -m 0644 ${KERNEL_IMAGEDEST}/%s-${KERNEL_VERSION} ${KERNEL_IMAGEDEST}/%s
127 fi
128fi
129set -e
130""" % (type, type, type, type, type, type, type))
131 d.setVar('pkg_postrm:%s-image-%s' % (kname,typelower), """set +e
132if [ -f "${KERNEL_IMAGEDEST}/%s" -o -L "${KERNEL_IMAGEDEST}/%s" ]; then
133 rm -f ${KERNEL_IMAGEDEST}/%s > /dev/null 2>&1
134fi
135set -e
136""" % (type, type, type))
137
138
139 image = d.getVar('INITRAMFS_IMAGE')
140 # If the INTIRAMFS_IMAGE is set but the INITRAMFS_IMAGE_BUNDLE is set to 0,
141 # the do_bundle_initramfs does nothing, but the INITRAMFS_IMAGE is built
142 # standalone for use by wic and other tools.
143 if image:
144 if d.getVar('INITRAMFS_MULTICONFIG'):
145 d.appendVarFlag('do_bundle_initramfs', 'mcdepends', ' mc::${INITRAMFS_MULTICONFIG}:${INITRAMFS_IMAGE}:do_image_complete')
146 else:
147 d.appendVarFlag('do_bundle_initramfs', 'depends', ' ${INITRAMFS_IMAGE}:do_image_complete')
148 if image and bb.utils.to_boolean(d.getVar('INITRAMFS_IMAGE_BUNDLE')):
149 bb.build.addtask('do_transform_bundled_initramfs', 'do_deploy', 'do_bundle_initramfs', d)
150
151 # NOTE: setting INITRAMFS_TASK is for backward compatibility
152 # The preferred method is to set INITRAMFS_IMAGE, because
153 # this INITRAMFS_TASK has circular dependency problems
154 # if the initramfs requires kernel modules
155 image_task = d.getVar('INITRAMFS_TASK')
156 if image_task:
157 d.appendVarFlag('do_configure', 'depends', ' ${INITRAMFS_TASK}')
158}
159
160# Here we pull in all various kernel image types which we support.
161#
162# In case you're wondering why kernel.bbclass inherits the other image
163# types instead of the other way around, the reason for that is to
164# maintain compatibility with various currently existing meta-layers.
165# By pulling in the various kernel image types here, we retain the
166# original behavior of kernel.bbclass, so no meta-layers should get
167# broken.
168#
169# KERNEL_CLASSES by default pulls in kernel-uimage.bbclass, since this
170# used to be the default behavior when only uImage was supported. This
171# variable can be appended by users who implement support for new kernel
172# image types.
173
174KERNEL_CLASSES ?= " kernel-uimage "
175inherit ${KERNEL_CLASSES}
176
177# Old style kernels may set ${S} = ${WORKDIR}/git for example
178# We need to move these over to STAGING_KERNEL_DIR. We can't just
179# create the symlink in advance as the git fetcher can't cope with
180# the symlink.
181do_unpack[cleandirs] += " ${S} ${STAGING_KERNEL_DIR} ${B} ${STAGING_KERNEL_BUILDDIR}"
182do_clean[cleandirs] += " ${S} ${STAGING_KERNEL_DIR} ${B} ${STAGING_KERNEL_BUILDDIR}"
183python do_symlink_kernsrc () {
184 s = d.getVar("S")
185 if s[-1] == '/':
186 # drop trailing slash, so that os.symlink(kernsrc, s) doesn't use s as directory name and fail
187 s=s[:-1]
188 kernsrc = d.getVar("STAGING_KERNEL_DIR")
189 if s != kernsrc:
190 bb.utils.mkdirhier(kernsrc)
191 bb.utils.remove(kernsrc, recurse=True)
192 if d.getVar("EXTERNALSRC"):
193 # With EXTERNALSRC S will not be wiped so we can symlink to it
194 os.symlink(s, kernsrc)
195 else:
196 import shutil
197 shutil.move(s, kernsrc)
198 os.symlink(kernsrc, s)
199}
200# do_patch is normally ordered before do_configure, but
201# externalsrc.bbclass deletes do_patch, breaking the dependency of
202# do_configure on do_symlink_kernsrc.
203addtask symlink_kernsrc before do_patch do_configure after do_unpack
204
205inherit kernel-arch deploy
206
207PACKAGES_DYNAMIC += "^${KERNEL_PACKAGE_NAME}-module-.*"
208PACKAGES_DYNAMIC += "^${KERNEL_PACKAGE_NAME}-image-.*"
209PACKAGES_DYNAMIC += "^${KERNEL_PACKAGE_NAME}-firmware-.*"
210
211export OS = "${TARGET_OS}"
212export CROSS_COMPILE = "${TARGET_PREFIX}"
213export KBUILD_BUILD_VERSION = "1"
214export KBUILD_BUILD_USER ?= "oe-user"
215export KBUILD_BUILD_HOST ?= "oe-host"
216
217KERNEL_RELEASE ?= "${KERNEL_VERSION}"
218
219# The directory where built kernel lies in the kernel tree
220KERNEL_OUTPUT_DIR ?= "arch/${ARCH}/boot"
221KERNEL_IMAGEDEST ?= "boot"
222
223#
224# configuration
225#
226export CMDLINE_CONSOLE = "console=${@d.getVar("KERNEL_CONSOLE") or "ttyS0"}"
227
228KERNEL_VERSION = "${@get_kernelversion_headers('${B}')}"
229
230# kernels are generally machine specific
231PACKAGE_ARCH = "${MACHINE_ARCH}"
232
233# U-Boot support
234UBOOT_ENTRYPOINT ?= "20008000"
235UBOOT_LOADADDRESS ?= "${UBOOT_ENTRYPOINT}"
236
237# Some Linux kernel configurations need additional parameters on the command line
238KERNEL_EXTRA_ARGS ?= ""
239
240EXTRA_OEMAKE = " HOSTCC="${BUILD_CC}" HOSTCFLAGS="${BUILD_CFLAGS}" HOSTLDFLAGS="${BUILD_LDFLAGS}" HOSTCPP="${BUILD_CPP}""
241EXTRA_OEMAKE += " HOSTCXX="${BUILD_CXX}" HOSTCXXFLAGS="${BUILD_CXXFLAGS}" PAHOLE=false"
242
243KERNEL_ALT_IMAGETYPE ??= ""
244
245copy_initramfs() {
246 echo "Copying initramfs into ./usr ..."
247 # In case the directory is not created yet from the first pass compile:
248 mkdir -p ${B}/usr
249 # Find and use the first initramfs image archive type we find
250 rm -f ${B}/usr/${INITRAMFS_IMAGE_NAME}.cpio
251 for img in cpio cpio.gz cpio.lz4 cpio.lzo cpio.lzma cpio.xz cpio.zst; do
252 if [ -e "${INITRAMFS_DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE_NAME}.$img" ]; then
253 cp ${INITRAMFS_DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE_NAME}.$img ${B}/usr/.
254 case $img in
255 *gz)
256 echo "gzip decompressing image"
257 gunzip -f ${B}/usr/${INITRAMFS_IMAGE_NAME}.$img
258 break
259 ;;
260 *lz4)
261 echo "lz4 decompressing image"
262 lz4 -df ${B}/usr/${INITRAMFS_IMAGE_NAME}.$img ${B}/usr/${INITRAMFS_IMAGE_NAME}.cpio
263 break
264 ;;
265 *lzo)
266 echo "lzo decompressing image"
267 lzop -df ${B}/usr/${INITRAMFS_IMAGE_NAME}.$img
268 break
269 ;;
270 *lzma)
271 echo "lzma decompressing image"
272 lzma -df ${B}/usr/${INITRAMFS_IMAGE_NAME}.$img
273 break
274 ;;
275 *xz)
276 echo "xz decompressing image"
277 xz -df ${B}/usr/${INITRAMFS_IMAGE_NAME}.$img
278 break
279 ;;
280 *zst)
281 echo "zst decompressing image"
282 zstd -df ${B}/usr/${INITRAMFS_IMAGE_NAME}.$img
283 break
284 ;;
285 esac
286 break
287 fi
288 done
289 # Verify that the above loop found a initramfs, fail otherwise
290 [ -f ${B}/usr/${INITRAMFS_IMAGE_NAME}.cpio ] && echo "Finished copy of initramfs into ./usr" || die "Could not find any ${INITRAMFS_DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE_NAME}.cpio{.gz|.lz4|.lzo|.lzma|.xz|.zst) for bundling; INITRAMFS_IMAGE_NAME might be wrong."
291}
292
293do_bundle_initramfs () {
294 if [ ! -z "${INITRAMFS_IMAGE}" -a x"${INITRAMFS_IMAGE_BUNDLE}" = x1 ]; then
295 echo "Creating a kernel image with a bundled initramfs..."
296 copy_initramfs
297 # Backing up kernel image relies on its type(regular file or symbolic link)
298 tmp_path=""
299 for imageType in ${KERNEL_IMAGETYPE_FOR_MAKE} ; do
300 if [ -h ${KERNEL_OUTPUT_DIR}/$imageType ] ; then
301 linkpath=`readlink -n ${KERNEL_OUTPUT_DIR}/$imageType`
302 realpath=`readlink -fn ${KERNEL_OUTPUT_DIR}/$imageType`
303 mv -f $realpath $realpath.bak
304 tmp_path=$tmp_path" "$imageType"#"$linkpath"#"$realpath
305 elif [ -f ${KERNEL_OUTPUT_DIR}/$imageType ]; then
306 mv -f ${KERNEL_OUTPUT_DIR}/$imageType ${KERNEL_OUTPUT_DIR}/$imageType.bak
307 tmp_path=$tmp_path" "$imageType"##"
308 fi
309 done
310 use_alternate_initrd=CONFIG_INITRAMFS_SOURCE=${B}/usr/${INITRAMFS_IMAGE_NAME}.cpio
311 kernel_do_compile
312 # Restoring kernel image
313 for tp in $tmp_path ; do
314 imageType=`echo $tp|cut -d "#" -f 1`
315 linkpath=`echo $tp|cut -d "#" -f 2`
316 realpath=`echo $tp|cut -d "#" -f 3`
317 if [ -n "$realpath" ]; then
318 mv -f $realpath $realpath.initramfs
319 mv -f $realpath.bak $realpath
320 ln -sf $linkpath.initramfs ${B}/${KERNEL_OUTPUT_DIR}/$imageType.initramfs
321 else
322 mv -f ${KERNEL_OUTPUT_DIR}/$imageType ${KERNEL_OUTPUT_DIR}/$imageType.initramfs
323 mv -f ${KERNEL_OUTPUT_DIR}/$imageType.bak ${KERNEL_OUTPUT_DIR}/$imageType
324 fi
325 done
326 fi
327}
328do_bundle_initramfs[dirs] = "${B}"
329
330kernel_do_transform_bundled_initramfs() {
331 # vmlinux.gz is not built by kernel
332 if (echo "${KERNEL_IMAGETYPES}" | grep -wq "vmlinux\.gz"); then
333 gzip -9cn < ${KERNEL_OUTPUT_DIR}/vmlinux.initramfs > ${KERNEL_OUTPUT_DIR}/vmlinux.gz.initramfs
334 fi
335}
336do_transform_bundled_initramfs[dirs] = "${B}"
337
338python do_devshell:prepend () {
339 os.environ["LDFLAGS"] = ''
340}
341
342addtask bundle_initramfs after do_install before do_deploy
343
344KERNEL_DEBUG_TIMESTAMPS ??= "0"
345
346kernel_do_compile() {
347 unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS MACHINE
348
349 # setup native pkg-config variables (kconfig scripts call pkg-config directly, cannot generically be overriden to pkg-config-native)
350 export PKG_CONFIG_DIR="${STAGING_DIR_NATIVE}${libdir_native}/pkgconfig"
351 export PKG_CONFIG_PATH="$PKG_CONFIG_DIR:${STAGING_DATADIR_NATIVE}/pkgconfig"
352 export PKG_CONFIG_LIBDIR="$PKG_CONFIG_DIR"
353 export PKG_CONFIG_SYSROOT_DIR=""
354
355 if [ "${KERNEL_DEBUG_TIMESTAMPS}" != "1" ]; then
356 # kernel sources do not use do_unpack, so SOURCE_DATE_EPOCH may not
357 # be set....
358 if [ "${SOURCE_DATE_EPOCH}" = "" -o "${SOURCE_DATE_EPOCH}" = "0" ]; then
359 # The source directory is not necessarily a git repository, so we
360 # specify the git-dir to ensure that git does not query a
361 # repository in any parent directory.
362 SOURCE_DATE_EPOCH=`git --git-dir="${S}/.git" log -1 --pretty=%ct 2>/dev/null || echo "${REPRODUCIBLE_TIMESTAMP_ROOTFS}"`
363 fi
364
365 ts=`LC_ALL=C date -d @$SOURCE_DATE_EPOCH`
366 export KBUILD_BUILD_TIMESTAMP="$ts"
367 export KCONFIG_NOTIMESTAMP=1
368 bbnote "KBUILD_BUILD_TIMESTAMP: $ts"
369 fi
370 # The $use_alternate_initrd is only set from
371 # do_bundle_initramfs() This variable is specifically for the
372 # case where we are making a second pass at the kernel
373 # compilation and we want to force the kernel build to use a
374 # different initramfs image. The way to do that in the kernel
375 # is to specify:
376 # make ...args... CONFIG_INITRAMFS_SOURCE=some_other_initramfs.cpio
377 if [ "$use_alternate_initrd" = "" ] && [ "${INITRAMFS_TASK}" != "" ] ; then
378 # The old style way of copying an prebuilt image and building it
379 # is turned on via INTIRAMFS_TASK != ""
380 copy_initramfs
381 use_alternate_initrd=CONFIG_INITRAMFS_SOURCE=${B}/usr/${INITRAMFS_IMAGE_NAME}.cpio
382 fi
383 for typeformake in ${KERNEL_IMAGETYPE_FOR_MAKE} ; do
384 oe_runmake ${typeformake} CC="${KERNEL_CC}" LD="${KERNEL_LD}" ${KERNEL_EXTRA_ARGS} $use_alternate_initrd
385 done
386}
387
388kernel_do_transform_kernel() {
389 # vmlinux.gz is not built by kernel
390 if (echo "${KERNEL_IMAGETYPES}" | grep -wq "vmlinux\.gz"); then
391 mkdir -p "${KERNEL_OUTPUT_DIR}"
392 gzip -9cn < ${B}/vmlinux > "${KERNEL_OUTPUT_DIR}/vmlinux.gz"
393 fi
394}
395do_transform_kernel[dirs] = "${B}"
396addtask transform_kernel after do_compile before do_install
397
398do_compile_kernelmodules() {
399 unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS MACHINE
400 if [ "${KERNEL_DEBUG_TIMESTAMPS}" != "1" ]; then
401 # kernel sources do not use do_unpack, so SOURCE_DATE_EPOCH may not
402 # be set....
403 if [ "${SOURCE_DATE_EPOCH}" = "" -o "${SOURCE_DATE_EPOCH}" = "0" ]; then
404 # The source directory is not necessarily a git repository, so we
405 # specify the git-dir to ensure that git does not query a
406 # repository in any parent directory.
407 SOURCE_DATE_EPOCH=`git --git-dir="${S}/.git" log -1 --pretty=%ct 2>/dev/null || echo "${REPRODUCIBLE_TIMESTAMP_ROOTFS}"`
408 fi
409
410 ts=`LC_ALL=C date -d @$SOURCE_DATE_EPOCH`
411 export KBUILD_BUILD_TIMESTAMP="$ts"
412 export KCONFIG_NOTIMESTAMP=1
413 bbnote "KBUILD_BUILD_TIMESTAMP: $ts"
414 fi
415 if (grep -q -i -e '^CONFIG_MODULES=y$' ${B}/.config); then
416 oe_runmake -C ${B} ${PARALLEL_MAKE} modules CC="${KERNEL_CC}" LD="${KERNEL_LD}" ${KERNEL_EXTRA_ARGS}
417
418 # Module.symvers gets updated during the
419 # building of the kernel modules. We need to
420 # update this in the shared workdir since some
421 # external kernel modules has a dependency on
422 # other kernel modules and will look at this
423 # file to do symbol lookups
424 cp ${B}/Module.symvers ${STAGING_KERNEL_BUILDDIR}/
425 # 5.10+ kernels have module.lds that we need to copy for external module builds
426 if [ -e "${B}/scripts/module.lds" ]; then
427 install -Dm 0644 ${B}/scripts/module.lds ${STAGING_KERNEL_BUILDDIR}/scripts/module.lds
428 fi
429 else
430 bbnote "no modules to compile"
431 fi
432}
433addtask compile_kernelmodules after do_compile before do_strip
434
435kernel_do_install() {
436 #
437 # First install the modules
438 #
439 unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS MACHINE
440 if (grep -q -i -e '^CONFIG_MODULES=y$' .config); then
441 oe_runmake DEPMOD=echo MODLIB=${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION} INSTALL_FW_PATH=${D}${nonarch_base_libdir}/firmware modules_install
442 rm "${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION}/build"
443 rm "${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION}/source"
444 # If the kernel/ directory is empty remove it to prevent QA issues
445 rmdir --ignore-fail-on-non-empty "${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION}/kernel"
446 else
447 bbnote "no modules to install"
448 fi
449
450 #
451 # Install various kernel output (zImage, map file, config, module support files)
452 #
453 install -d ${D}/${KERNEL_IMAGEDEST}
454
455 #
456 # When including an initramfs bundle inside a FIT image, the fitImage is created after the install task
457 # by do_assemble_fitimage_initramfs.
458 # This happens after the generation of the initramfs bundle (done by do_bundle_initramfs).
459 # So, at the level of the install task we should not try to install the fitImage. fitImage is still not
460 # generated yet.
461 # After the generation of the fitImage, the deploy task copies the fitImage from the build directory to
462 # the deploy folder.
463 #
464
465 for imageType in ${KERNEL_IMAGETYPES} ; do
466 if [ $imageType != "fitImage" ] || [ "${INITRAMFS_IMAGE_BUNDLE}" != "1" ] ; then
467 install -m 0644 ${KERNEL_OUTPUT_DIR}/$imageType ${D}/${KERNEL_IMAGEDEST}/$imageType-${KERNEL_VERSION}
468 fi
469 done
470
471 install -m 0644 System.map ${D}/${KERNEL_IMAGEDEST}/System.map-${KERNEL_VERSION}
472 install -m 0644 .config ${D}/${KERNEL_IMAGEDEST}/config-${KERNEL_VERSION}
473 install -m 0644 vmlinux ${D}/${KERNEL_IMAGEDEST}/vmlinux-${KERNEL_VERSION}
474 [ -e Module.symvers ] && install -m 0644 Module.symvers ${D}/${KERNEL_IMAGEDEST}/Module.symvers-${KERNEL_VERSION}
475 install -d ${D}${sysconfdir}/modules-load.d
476 install -d ${D}${sysconfdir}/modprobe.d
477}
478
479# Must be ran no earlier than after do_kernel_checkout or else Makefile won't be in ${S}/Makefile
480do_kernel_version_sanity_check() {
481 if [ "x${KERNEL_VERSION_SANITY_SKIP}" = "x1" ]; then
482 exit 0
483 fi
484
485 # The Makefile determines the kernel version shown at runtime
486 # Don't use KERNEL_VERSION because the headers it grabs the version from aren't generated until do_compile
487 VERSION=$(grep "^VERSION =" ${S}/Makefile | sed s/.*=\ *//)
488 PATCHLEVEL=$(grep "^PATCHLEVEL =" ${S}/Makefile | sed s/.*=\ *//)
489 SUBLEVEL=$(grep "^SUBLEVEL =" ${S}/Makefile | sed s/.*=\ *//)
490 EXTRAVERSION=$(grep "^EXTRAVERSION =" ${S}/Makefile | sed s/.*=\ *//)
491
492 # Build a string for regex and a plain version string
493 reg="^${VERSION}\.${PATCHLEVEL}"
494 vers="${VERSION}.${PATCHLEVEL}"
495 if [ -n "${SUBLEVEL}" ]; then
496 # Ignoring a SUBLEVEL of zero is fine
497 if [ "${SUBLEVEL}" = "0" ]; then
498 reg="${reg}(\.${SUBLEVEL})?"
499 else
500 reg="${reg}\.${SUBLEVEL}"
501 vers="${vers}.${SUBLEVEL}"
502 fi
503 fi
504 vers="${vers}${EXTRAVERSION}"
505 reg="${reg}${EXTRAVERSION}"
506
507 if [ -z `echo ${PV} | grep -E "${reg}"` ]; then
508 bbfatal "Package Version (${PV}) does not match of kernel being built (${vers}). Please update the PV variable to match the kernel source or set KERNEL_VERSION_SANITY_SKIP=\"1\" in your recipe."
509 fi
510 exit 0
511}
512
513addtask shared_workdir after do_compile before do_compile_kernelmodules
514addtask shared_workdir_setscene
515
516do_shared_workdir_setscene () {
517 exit 1
518}
519
520emit_depmod_pkgdata() {
521 # Stash data for depmod
522 install -d ${PKGDESTWORK}/${KERNEL_PACKAGE_NAME}-depmod/
523 echo "${KERNEL_VERSION}" > ${PKGDESTWORK}/${KERNEL_PACKAGE_NAME}-depmod/${KERNEL_PACKAGE_NAME}-abiversion
524 cp ${B}/System.map ${PKGDESTWORK}/${KERNEL_PACKAGE_NAME}-depmod/System.map-${KERNEL_VERSION}
525}
526
527PACKAGEFUNCS += "emit_depmod_pkgdata"
528
529do_shared_workdir[cleandirs] += " ${STAGING_KERNEL_BUILDDIR}"
530do_shared_workdir () {
531 cd ${B}
532
533 kerneldir=${STAGING_KERNEL_BUILDDIR}
534 install -d $kerneldir
535
536 #
537 # Store the kernel version in sysroots for module-base.bbclass
538 #
539
540 echo "${KERNEL_VERSION}" > $kerneldir/${KERNEL_PACKAGE_NAME}-abiversion
541
542 # Copy files required for module builds
543 cp System.map $kerneldir/System.map-${KERNEL_VERSION}
544 [ -e Module.symvers ] && cp Module.symvers $kerneldir/
545 cp .config $kerneldir/
546 mkdir -p $kerneldir/include/config
547 cp include/config/kernel.release $kerneldir/include/config/kernel.release
548 if [ -e certs/signing_key.x509 ]; then
549 # The signing_key.* files are stored in the certs/ dir in
550 # newer Linux kernels
551 mkdir -p $kerneldir/certs
552 cp certs/signing_key.* $kerneldir/certs/
553 elif [ -e signing_key.priv ]; then
554 cp signing_key.* $kerneldir/
555 fi
556
557 # We can also copy over all the generated files and avoid special cases
558 # like version.h, but we've opted to keep this small until file creep starts
559 # to happen
560 if [ -e include/linux/version.h ]; then
561 mkdir -p $kerneldir/include/linux
562 cp include/linux/version.h $kerneldir/include/linux/version.h
563 fi
564
565 # As of Linux kernel version 3.0.1, the clean target removes
566 # arch/powerpc/lib/crtsavres.o which is present in
567 # KBUILD_LDFLAGS_MODULE, making it required to build external modules.
568 if [ ${ARCH} = "powerpc" ]; then
569 if [ -e arch/powerpc/lib/crtsavres.o ]; then
570 mkdir -p $kerneldir/arch/powerpc/lib/
571 cp arch/powerpc/lib/crtsavres.o $kerneldir/arch/powerpc/lib/crtsavres.o
572 fi
573 fi
574
575 if [ -d include/generated ]; then
576 mkdir -p $kerneldir/include/generated/
577 cp -fR include/generated/* $kerneldir/include/generated/
578 fi
579
580 if [ -d arch/${ARCH}/include/generated ]; then
581 mkdir -p $kerneldir/arch/${ARCH}/include/generated/
582 cp -fR arch/${ARCH}/include/generated/* $kerneldir/arch/${ARCH}/include/generated/
583 fi
584
585 if (grep -q -i -e '^CONFIG_UNWINDER_ORC=y$' $kerneldir/.config); then
586 # With CONFIG_UNWINDER_ORC (the default in 4.14), objtool is required for
587 # out-of-tree modules to be able to generate object files.
588 if [ -x tools/objtool/objtool ]; then
589 mkdir -p ${kerneldir}/tools/objtool
590 cp tools/objtool/objtool ${kerneldir}/tools/objtool/
591 fi
592 fi
593}
594
595# We don't need to stage anything, not the modules/firmware since those would clash with linux-firmware
596sysroot_stage_all () {
597 :
598}
599
600KERNEL_CONFIG_COMMAND ?= "oe_runmake_call -C ${S} CC="${KERNEL_CC}" LD="${KERNEL_LD}" O=${B} olddefconfig || oe_runmake -C ${S} O=${B} CC="${KERNEL_CC}" LD="${KERNEL_LD}" oldnoconfig"
601
602python check_oldest_kernel() {
603 oldest_kernel = d.getVar('OLDEST_KERNEL')
604 kernel_version = d.getVar('KERNEL_VERSION')
605 tclibc = d.getVar('TCLIBC')
606 if tclibc == 'glibc':
607 kernel_version = kernel_version.split('-', 1)[0]
608 if oldest_kernel and kernel_version:
609 if bb.utils.vercmp_string(kernel_version, oldest_kernel) < 0:
610 bb.warn('%s: OLDEST_KERNEL is "%s" but the version of the kernel you are building is "%s" - therefore %s as built may not be compatible with this kernel. Either set OLDEST_KERNEL to an older version, or build a newer kernel.' % (d.getVar('PN'), oldest_kernel, kernel_version, tclibc))
611}
612
613check_oldest_kernel[vardepsexclude] += "OLDEST_KERNEL KERNEL_VERSION"
614do_configure[prefuncs] += "check_oldest_kernel"
615
616kernel_do_configure() {
617 # fixes extra + in /lib/modules/2.6.37+
618 # $ scripts/setlocalversion . => +
619 # $ make kernelversion => 2.6.37
620 # $ make kernelrelease => 2.6.37+
621 touch ${B}/.scmversion ${S}/.scmversion
622
623 if [ "${S}" != "${B}" ] && [ -f "${S}/.config" ] && [ ! -f "${B}/.config" ]; then
624 mv "${S}/.config" "${B}/.config"
625 fi
626
627 # Copy defconfig to .config if .config does not exist. This allows
628 # recipes to manage the .config themselves in do_configure:prepend().
629 if [ -f "${WORKDIR}/defconfig" ] && [ ! -f "${B}/.config" ]; then
630 cp "${WORKDIR}/defconfig" "${B}/.config"
631 fi
632
633 ${KERNEL_CONFIG_COMMAND}
634}
635
636do_savedefconfig() {
637 bbplain "Saving defconfig to:\n${B}/defconfig"
638 oe_runmake -C ${B} LD='${KERNEL_LD}' savedefconfig
639}
640do_savedefconfig[nostamp] = "1"
641addtask savedefconfig after do_configure
642
643inherit cml1
644
645KCONFIG_CONFIG_COMMAND:append = " PAHOLE=false LD='${KERNEL_LD}' HOSTLDFLAGS='${BUILD_LDFLAGS}'"
646
647EXPORT_FUNCTIONS do_compile do_transform_kernel do_transform_bundled_initramfs do_install do_configure
648
649# kernel-base becomes kernel-${KERNEL_VERSION}
650# kernel-image becomes kernel-image-${KERNEL_VERSION}
651PACKAGES = "${KERNEL_PACKAGE_NAME} ${KERNEL_PACKAGE_NAME}-base ${KERNEL_PACKAGE_NAME}-vmlinux ${KERNEL_PACKAGE_NAME}-image ${KERNEL_PACKAGE_NAME}-dev ${KERNEL_PACKAGE_NAME}-modules ${KERNEL_PACKAGE_NAME}-dbg"
652FILES:${PN} = ""
653FILES:${KERNEL_PACKAGE_NAME}-base = "${nonarch_base_libdir}/modules/${KERNEL_VERSION}/modules.order ${nonarch_base_libdir}/modules/${KERNEL_VERSION}/modules.builtin ${nonarch_base_libdir}/modules/${KERNEL_VERSION}/modules.builtin.modinfo"
654FILES:${KERNEL_PACKAGE_NAME}-image = ""
655FILES:${KERNEL_PACKAGE_NAME}-dev = "/${KERNEL_IMAGEDEST}/System.map* /${KERNEL_IMAGEDEST}/Module.symvers* /${KERNEL_IMAGEDEST}/config* ${KERNEL_SRC_PATH} ${nonarch_base_libdir}/modules/${KERNEL_VERSION}/build"
656FILES:${KERNEL_PACKAGE_NAME}-vmlinux = "/${KERNEL_IMAGEDEST}/vmlinux-${KERNEL_VERSION_NAME}"
657FILES:${KERNEL_PACKAGE_NAME}-modules = ""
658FILES:${KERNEL_PACKAGE_NAME}-dbg = "/usr/lib/debug /usr/src/debug"
659RDEPENDS:${KERNEL_PACKAGE_NAME} = "${KERNEL_PACKAGE_NAME}-base (= ${EXTENDPKGV})"
660# Allow machines to override this dependency if kernel image files are
661# not wanted in images as standard
662RRECOMMENDS:${KERNEL_PACKAGE_NAME}-base ?= "${KERNEL_PACKAGE_NAME}-image (= ${EXTENDPKGV})"
663PKG:${KERNEL_PACKAGE_NAME}-image = "${KERNEL_PACKAGE_NAME}-image-${@legitimize_package_name(d.getVar('KERNEL_VERSION'))}"
664RDEPENDS:${KERNEL_PACKAGE_NAME}-image += "${@oe.utils.conditional('KERNEL_IMAGETYPE', 'vmlinux', '${KERNEL_PACKAGE_NAME}-vmlinux (= ${EXTENDPKGV})', '', d)}"
665PKG:${KERNEL_PACKAGE_NAME}-base = "${KERNEL_PACKAGE_NAME}-${@legitimize_package_name(d.getVar('KERNEL_VERSION'))}"
666RPROVIDES:${KERNEL_PACKAGE_NAME}-base += "${KERNEL_PACKAGE_NAME}-${KERNEL_VERSION}"
667ALLOW_EMPTY:${KERNEL_PACKAGE_NAME} = "1"
668ALLOW_EMPTY:${KERNEL_PACKAGE_NAME}-base = "1"
669ALLOW_EMPTY:${KERNEL_PACKAGE_NAME}-image = "1"
670ALLOW_EMPTY:${KERNEL_PACKAGE_NAME}-modules = "1"
671DESCRIPTION:${KERNEL_PACKAGE_NAME}-modules = "Kernel modules meta package"
672
673pkg_postinst:${KERNEL_PACKAGE_NAME}-base () {
674 if [ ! -e "$D/lib/modules/${KERNEL_VERSION}" ]; then
675 mkdir -p $D/lib/modules/${KERNEL_VERSION}
676 fi
677 if [ -n "$D" ]; then
678 depmodwrapper -a -b $D ${KERNEL_VERSION}
679 else
680 depmod -a ${KERNEL_VERSION}
681 fi
682}
683
684PACKAGESPLITFUNCS:prepend = "split_kernel_packages "
685
686python split_kernel_packages () {
687 do_split_packages(d, root='${nonarch_base_libdir}/firmware', file_regex=r'^(.*)\.(bin|fw|cis|csp|dsp)$', output_pattern='${KERNEL_PACKAGE_NAME}-firmware-%s', description='Firmware for %s', recursive=True, extra_depends='')
688}
689
690# Many scripts want to look in arch/$arch/boot for the bootable
691# image. This poses a problem for vmlinux and vmlinuz based
692# booting. This task arranges to have vmlinux and vmlinuz appear
693# in the normalized directory location.
694do_kernel_link_images() {
695 if [ ! -d "${B}/arch/${ARCH}/boot" ]; then
696 mkdir ${B}/arch/${ARCH}/boot
697 fi
698 cd ${B}/arch/${ARCH}/boot
699 ln -sf ../../../vmlinux
700 if [ -f ../../../vmlinuz ]; then
701 ln -sf ../../../vmlinuz
702 fi
703 if [ -f ../../../vmlinuz.bin ]; then
704 ln -sf ../../../vmlinuz.bin
705 fi
706 if [ -f ../../../vmlinux.64 ]; then
707 ln -sf ../../../vmlinux.64
708 fi
709}
710addtask kernel_link_images after do_compile before do_strip
711
712python do_strip() {
713 import shutil
714
715 strip = d.getVar('STRIP')
716 extra_sections = d.getVar('KERNEL_IMAGE_STRIP_EXTRA_SECTIONS')
717 kernel_image = d.getVar('B') + "/" + d.getVar('KERNEL_OUTPUT_DIR') + "/vmlinux"
718
719 if (extra_sections and kernel_image.find(d.getVar('KERNEL_IMAGEDEST') + '/vmlinux') != -1):
720 kernel_image_stripped = kernel_image + ".stripped"
721 shutil.copy2(kernel_image, kernel_image_stripped)
722 oe.package.runstrip((kernel_image_stripped, 8, strip, extra_sections))
723 bb.debug(1, "KERNEL_IMAGE_STRIP_EXTRA_SECTIONS is set, stripping sections: " + \
724 extra_sections)
725}
726do_strip[dirs] = "${B}"
727
728addtask strip before do_sizecheck after do_kernel_link_images
729
730# Support checking the kernel size since some kernels need to reside in partitions
731# with a fixed length or there is a limit in transferring the kernel to memory.
732# If more than one image type is enabled, warn on any that don't fit but only fail
733# if none fit.
734do_sizecheck() {
735 if [ ! -z "${KERNEL_IMAGE_MAXSIZE}" ]; then
736 invalid=`echo ${KERNEL_IMAGE_MAXSIZE} | sed 's/[0-9]//g'`
737 if [ -n "$invalid" ]; then
738 die "Invalid KERNEL_IMAGE_MAXSIZE: ${KERNEL_IMAGE_MAXSIZE}, should be an integer (The unit is Kbytes)"
739 fi
740 at_least_one_fits=
741 for imageType in ${KERNEL_IMAGETYPES} ; do
742 size=`du -ks ${B}/${KERNEL_OUTPUT_DIR}/$imageType | awk '{print $1}'`
743 if [ $size -gt ${KERNEL_IMAGE_MAXSIZE} ]; then
744 bbwarn "This kernel $imageType (size=$size(K) > ${KERNEL_IMAGE_MAXSIZE}(K)) is too big for your device."
745 else
746 at_least_one_fits=y
747 fi
748 done
749 if [ -z "$at_least_one_fits" ]; then
750 die "All kernel images are too big for your device. Please reduce the size of the kernel by making more of it modular."
751 fi
752 fi
753}
754do_sizecheck[dirs] = "${B}"
755
756addtask sizecheck before do_install after do_strip
757
758inherit kernel-artifact-names
759
760kernel_do_deploy() {
761 deployDir="${DEPLOYDIR}"
762 if [ -n "${KERNEL_DEPLOYSUBDIR}" ]; then
763 deployDir="${DEPLOYDIR}/${KERNEL_DEPLOYSUBDIR}"
764 mkdir "$deployDir"
765 fi
766
767 for imageType in ${KERNEL_IMAGETYPES} ; do
768 baseName=$imageType-${KERNEL_IMAGE_NAME}
769
770 if [ -s ${KERNEL_OUTPUT_DIR}/$imageType.stripped ] ; then
771 install -m 0644 ${KERNEL_OUTPUT_DIR}/$imageType.stripped $deployDir/$baseName${KERNEL_IMAGE_BIN_EXT}
772 else
773 install -m 0644 ${KERNEL_OUTPUT_DIR}/$imageType $deployDir/$baseName${KERNEL_IMAGE_BIN_EXT}
774 fi
775 if [ -n "${KERNEL_IMAGE_LINK_NAME}" ] ; then
776 ln -sf $baseName${KERNEL_IMAGE_BIN_EXT} $deployDir/$imageType-${KERNEL_IMAGE_LINK_NAME}${KERNEL_IMAGE_BIN_EXT}
777 fi
778 if [ "${KERNEL_IMAGETYPE_SYMLINK}" = "1" ] ; then
779 ln -sf $baseName${KERNEL_IMAGE_BIN_EXT} $deployDir/$imageType
780 fi
781 done
782
783 if [ ${MODULE_TARBALL_DEPLOY} = "1" ] && (grep -q -i -e '^CONFIG_MODULES=y$' .config); then
784 mkdir -p ${D}${root_prefix}/lib
785 if [ -n "${SOURCE_DATE_EPOCH}" ]; then
786 TAR_ARGS="--sort=name --clamp-mtime --mtime=@${SOURCE_DATE_EPOCH}"
787 else
788 TAR_ARGS=""
789 fi
790 TAR_ARGS="$TAR_ARGS --owner=0 --group=0"
791 tar $TAR_ARGS -cv -C ${D}${root_prefix} lib | gzip -9n > $deployDir/modules-${MODULE_TARBALL_NAME}.tgz
792
793 if [ -n "${MODULE_TARBALL_LINK_NAME}" ] ; then
794 ln -sf modules-${MODULE_TARBALL_NAME}.tgz $deployDir/modules-${MODULE_TARBALL_LINK_NAME}.tgz
795 fi
796 fi
797
798 if [ ! -z "${INITRAMFS_IMAGE}" -a x"${INITRAMFS_IMAGE_BUNDLE}" = x1 ]; then
799 for imageType in ${KERNEL_IMAGETYPES} ; do
800 if [ "$imageType" = "fitImage" ] ; then
801 continue
802 fi
803 initramfsBaseName=$imageType-${INITRAMFS_NAME}
804 install -m 0644 ${KERNEL_OUTPUT_DIR}/$imageType.initramfs $deployDir/$initramfsBaseName${KERNEL_IMAGE_BIN_EXT}
805 if [ -n "${INITRAMFS_LINK_NAME}" ] ; then
806 ln -sf $initramfsBaseName${KERNEL_IMAGE_BIN_EXT} $deployDir/$imageType-${INITRAMFS_LINK_NAME}${KERNEL_IMAGE_BIN_EXT}
807 fi
808 done
809 fi
810}
811
812# We deploy to filenames that include PKGV and PKGR, read the saved data to
813# ensure we get the right values for both
814do_deploy[prefuncs] += "read_subpackage_metadata"
815
816addtask deploy after do_populate_sysroot do_packagedata
817
818EXPORT_FUNCTIONS do_deploy
819
820# Add using Device Tree support
821inherit kernel-devicetree
diff --git a/meta/classes-recipe/kernelsrc.bbclass b/meta/classes-recipe/kernelsrc.bbclass
new file mode 100644
index 0000000000..a32882a5d2
--- /dev/null
+++ b/meta/classes-recipe/kernelsrc.bbclass
@@ -0,0 +1,16 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7S = "${STAGING_KERNEL_DIR}"
8deltask do_fetch
9deltask do_unpack
10do_patch[depends] += "virtual/kernel:do_shared_workdir"
11do_patch[noexec] = "1"
12do_package[depends] += "virtual/kernel:do_populate_sysroot"
13KERNEL_VERSION = "${@get_kernelversion_file("${STAGING_KERNEL_BUILDDIR}")}"
14
15inherit linux-kernel-base
16
diff --git a/meta/classes-recipe/lib_package.bbclass b/meta/classes-recipe/lib_package.bbclass
new file mode 100644
index 0000000000..6d110155e5
--- /dev/null
+++ b/meta/classes-recipe/lib_package.bbclass
@@ -0,0 +1,12 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6#
7# ${PN}-bin is defined in bitbake.conf
8#
9# We need to allow the other packages to be greedy with what they
10# want out of /usr/bin and /usr/sbin before ${PN}-bin gets greedy.
11#
12PACKAGE_BEFORE_PN = "${PN}-bin"
diff --git a/meta/classes-recipe/libc-package.bbclass b/meta/classes-recipe/libc-package.bbclass
new file mode 100644
index 0000000000..de3d4223a8
--- /dev/null
+++ b/meta/classes-recipe/libc-package.bbclass
@@ -0,0 +1,390 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7#
8# This class knows how to package up [e]glibc. Its shared since prebuild binary toolchains
9# may need packaging and its pointless to duplicate this code.
10#
11# Caller should set GLIBC_INTERNAL_USE_BINARY_LOCALE to one of:
12# "compile" - Use QEMU to generate the binary locale files
13# "precompiled" - The binary locale files are pregenerated and already present
14# "ondevice" - The device will build the locale files upon first boot through the postinst
15
16GLIBC_INTERNAL_USE_BINARY_LOCALE ?= "ondevice"
17
18GLIBC_SPLIT_LC_PACKAGES ?= "0"
19
20python __anonymous () {
21 enabled = d.getVar("ENABLE_BINARY_LOCALE_GENERATION")
22
23 pn = d.getVar("PN")
24 if pn.endswith("-initial"):
25 enabled = False
26
27 if enabled and int(enabled):
28 import re
29
30 target_arch = d.getVar("TARGET_ARCH")
31 binary_arches = d.getVar("BINARY_LOCALE_ARCHES") or ""
32 use_cross_localedef = d.getVar("LOCALE_GENERATION_WITH_CROSS-LOCALEDEF") or ""
33
34 for regexp in binary_arches.split(" "):
35 r = re.compile(regexp)
36
37 if r.match(target_arch):
38 depends = d.getVar("DEPENDS")
39 if use_cross_localedef == "1" :
40 depends = "%s cross-localedef-native" % depends
41 else:
42 depends = "%s qemu-native" % depends
43 d.setVar("DEPENDS", depends)
44 d.setVar("GLIBC_INTERNAL_USE_BINARY_LOCALE", "compile")
45 break
46}
47
48# try to fix disable charsets/locales/locale-code compile fail
49PACKAGE_NO_GCONV ?= "0"
50
51OVERRIDES:append = ":${TARGET_ARCH}-${TARGET_OS}"
52
53locale_base_postinst_ontarget() {
54localedef --inputfile=${datadir}/i18n/locales/%s --charmap=%s %s
55}
56
57locale_base_postrm() {
58#!/bin/sh
59localedef --delete-from-archive --inputfile=${datadir}/locales/%s --charmap=%s %s
60}
61
62LOCALETREESRC ?= "${PKGD}"
63
64do_prep_locale_tree() {
65 treedir=${WORKDIR}/locale-tree
66 rm -rf $treedir
67 mkdir -p $treedir/${base_bindir} $treedir/${base_libdir} $treedir/${datadir} $treedir/${localedir}
68 tar -cf - -C ${LOCALETREESRC}${datadir} -p i18n | tar -xf - -C $treedir/${datadir}
69 # unzip to avoid parsing errors
70 for i in $treedir/${datadir}/i18n/charmaps/*gz; do
71 gunzip $i
72 done
73 # The extract pattern "./l*.so*" is carefully selected so that it will
74 # match ld*.so and lib*.so*, but not any files in the gconv directory
75 # (if it exists). This makes sure we only unpack the files we need.
76 # This is important in case usrmerge is set in DISTRO_FEATURES, which
77 # means ${base_libdir} == ${libdir}.
78 tar -cf - -C ${LOCALETREESRC}${base_libdir} -p . | tar -xf - -C $treedir/${base_libdir} --wildcards './l*.so*'
79 if [ -f ${STAGING_LIBDIR_NATIVE}/libgcc_s.* ]; then
80 tar -cf - -C ${STAGING_LIBDIR_NATIVE} -p libgcc_s.* | tar -xf - -C $treedir/${base_libdir}
81 fi
82 install -m 0755 ${LOCALETREESRC}${bindir}/localedef $treedir/${base_bindir}
83}
84
85do_collect_bins_from_locale_tree() {
86 treedir=${WORKDIR}/locale-tree
87
88 parent=$(dirname ${localedir})
89 mkdir -p ${PKGD}/$parent
90 tar -cf - -C $treedir/$parent -p $(basename ${localedir}) | tar -xf - -C ${PKGD}$parent
91
92 # Finalize tree by chaning all duplicate files into hard links
93 cross-localedef-hardlink -c -v ${WORKDIR}/locale-tree
94}
95
96inherit qemu
97
98python package_do_split_gconvs () {
99 import re
100 if (d.getVar('PACKAGE_NO_GCONV') == '1'):
101 bb.note("package requested not splitting gconvs")
102 return
103
104 if not d.getVar('PACKAGES'):
105 return
106
107 mlprefix = d.getVar("MLPREFIX") or ""
108
109 bpn = d.getVar('BPN')
110 libdir = d.getVar('libdir')
111 if not libdir:
112 bb.error("libdir not defined")
113 return
114 datadir = d.getVar('datadir')
115 if not datadir:
116 bb.error("datadir not defined")
117 return
118
119 gconv_libdir = oe.path.join(libdir, "gconv")
120 charmap_dir = oe.path.join(datadir, "i18n", "charmaps")
121 locales_dir = oe.path.join(datadir, "i18n", "locales")
122 binary_locales_dir = d.getVar('localedir')
123
124 def calc_gconv_deps(fn, pkg, file_regex, output_pattern, group):
125 deps = []
126 f = open(fn, "rb")
127 c_re = re.compile(r'^copy "(.*)"')
128 i_re = re.compile(r'^include "(\w+)".*')
129 for l in f.readlines():
130 l = l.decode("latin-1")
131 m = c_re.match(l) or i_re.match(l)
132 if m:
133 dp = legitimize_package_name('%s%s-gconv-%s' % (mlprefix, bpn, m.group(1)))
134 if not dp in deps:
135 deps.append(dp)
136 f.close()
137 if deps != []:
138 d.setVar('RDEPENDS:%s' % pkg, " ".join(deps))
139 if bpn != 'glibc':
140 d.setVar('RPROVIDES:%s' % pkg, pkg.replace(bpn, 'glibc'))
141
142 do_split_packages(d, gconv_libdir, file_regex=r'^(.*)\.so$', output_pattern=bpn+'-gconv-%s', \
143 description='gconv module for character set %s', hook=calc_gconv_deps, \
144 extra_depends=bpn+'-gconv')
145
146 def calc_charmap_deps(fn, pkg, file_regex, output_pattern, group):
147 deps = []
148 f = open(fn, "rb")
149 c_re = re.compile(r'^copy "(.*)"')
150 i_re = re.compile(r'^include "(\w+)".*')
151 for l in f.readlines():
152 l = l.decode("latin-1")
153 m = c_re.match(l) or i_re.match(l)
154 if m:
155 dp = legitimize_package_name('%s%s-charmap-%s' % (mlprefix, bpn, m.group(1)))
156 if not dp in deps:
157 deps.append(dp)
158 f.close()
159 if deps != []:
160 d.setVar('RDEPENDS:%s' % pkg, " ".join(deps))
161 if bpn != 'glibc':
162 d.setVar('RPROVIDES:%s' % pkg, pkg.replace(bpn, 'glibc'))
163
164 do_split_packages(d, charmap_dir, file_regex=r'^(.*)\.gz$', output_pattern=bpn+'-charmap-%s', \
165 description='character map for %s encoding', hook=calc_charmap_deps, extra_depends='')
166
167 def calc_locale_deps(fn, pkg, file_regex, output_pattern, group):
168 deps = []
169 f = open(fn, "rb")
170 c_re = re.compile(r'^copy "(.*)"')
171 i_re = re.compile(r'^include "(\w+)".*')
172 for l in f.readlines():
173 l = l.decode("latin-1")
174 m = c_re.match(l) or i_re.match(l)
175 if m:
176 dp = legitimize_package_name(mlprefix+bpn+'-localedata-%s' % m.group(1))
177 if not dp in deps:
178 deps.append(dp)
179 f.close()
180 if deps != []:
181 d.setVar('RDEPENDS:%s' % pkg, " ".join(deps))
182 if bpn != 'glibc':
183 d.setVar('RPROVIDES:%s' % pkg, pkg.replace(bpn, 'glibc'))
184
185 do_split_packages(d, locales_dir, file_regex=r'(.*)', output_pattern=bpn+'-localedata-%s', \
186 description='locale definition for %s', hook=calc_locale_deps, extra_depends='')
187 d.setVar('PACKAGES', d.getVar('PACKAGES', False) + ' ' + d.getVar('MLPREFIX', False) + bpn + '-gconv')
188
189 use_bin = d.getVar("GLIBC_INTERNAL_USE_BINARY_LOCALE")
190
191 dot_re = re.compile(r"(.*)\.(.*)")
192
193 # Read in supported locales and associated encodings
194 supported = {}
195 with open(oe.path.join(d.getVar('WORKDIR'), "SUPPORTED")) as f:
196 for line in f.readlines():
197 try:
198 locale, charset = line.rstrip().split()
199 except ValueError:
200 continue
201 supported[locale] = charset
202
203 # GLIBC_GENERATE_LOCALES var specifies which locales to be generated. empty or "all" means all locales
204 to_generate = d.getVar('GLIBC_GENERATE_LOCALES')
205 if not to_generate or to_generate == 'all':
206 to_generate = sorted(supported.keys())
207 else:
208 to_generate = to_generate.split()
209 for locale in to_generate:
210 if locale not in supported:
211 if '.' in locale:
212 charset = locale.split('.')[1]
213 else:
214 charset = 'UTF-8'
215 bb.warn("Unsupported locale '%s', assuming encoding '%s'" % (locale, charset))
216 supported[locale] = charset
217
218 def output_locale_source(name, pkgname, locale, encoding):
219 d.setVar('RDEPENDS:%s' % pkgname, '%slocaledef %s-localedata-%s %s-charmap-%s' % \
220 (mlprefix, mlprefix+bpn, legitimize_package_name(locale), mlprefix+bpn, legitimize_package_name(encoding)))
221 d.setVar('pkg_postinst_ontarget:%s' % pkgname, d.getVar('locale_base_postinst_ontarget') \
222 % (locale, encoding, locale))
223 d.setVar('pkg_postrm:%s' % pkgname, d.getVar('locale_base_postrm') % \
224 (locale, encoding, locale))
225
226 def output_locale_binary_rdepends(name, pkgname, locale, encoding):
227 dep = legitimize_package_name('%s-binary-localedata-%s' % (bpn, name))
228 lcsplit = d.getVar('GLIBC_SPLIT_LC_PACKAGES')
229 if lcsplit and int(lcsplit):
230 d.appendVar('PACKAGES', ' ' + dep)
231 d.setVar('ALLOW_EMPTY:%s' % dep, '1')
232 d.setVar('RDEPENDS:%s' % pkgname, mlprefix + dep)
233
234 commands = {}
235
236 def output_locale_binary(name, pkgname, locale, encoding):
237 treedir = oe.path.join(d.getVar("WORKDIR"), "locale-tree")
238 ldlibdir = oe.path.join(treedir, d.getVar("base_libdir"))
239 path = d.getVar("PATH")
240 i18npath = oe.path.join(treedir, datadir, "i18n")
241 gconvpath = oe.path.join(treedir, "iconvdata")
242 outputpath = oe.path.join(treedir, binary_locales_dir)
243
244 use_cross_localedef = d.getVar("LOCALE_GENERATION_WITH_CROSS-LOCALEDEF") or "0"
245 if use_cross_localedef == "1":
246 target_arch = d.getVar('TARGET_ARCH')
247 locale_arch_options = { \
248 "arc": " --uint32-align=4 --little-endian ", \
249 "arceb": " --uint32-align=4 --big-endian ", \
250 "arm": " --uint32-align=4 --little-endian ", \
251 "armeb": " --uint32-align=4 --big-endian ", \
252 "aarch64": " --uint32-align=4 --little-endian ", \
253 "aarch64_be": " --uint32-align=4 --big-endian ", \
254 "sh4": " --uint32-align=4 --big-endian ", \
255 "powerpc": " --uint32-align=4 --big-endian ", \
256 "powerpc64": " --uint32-align=4 --big-endian ", \
257 "powerpc64le": " --uint32-align=4 --little-endian ", \
258 "mips": " --uint32-align=4 --big-endian ", \
259 "mipsisa32r6": " --uint32-align=4 --big-endian ", \
260 "mips64": " --uint32-align=4 --big-endian ", \
261 "mipsisa64r6": " --uint32-align=4 --big-endian ", \
262 "mipsel": " --uint32-align=4 --little-endian ", \
263 "mipsisa32r6el": " --uint32-align=4 --little-endian ", \
264 "mips64el":" --uint32-align=4 --little-endian ", \
265 "mipsisa64r6el":" --uint32-align=4 --little-endian ", \
266 "riscv64": " --uint32-align=4 --little-endian ", \
267 "riscv32": " --uint32-align=4 --little-endian ", \
268 "i586": " --uint32-align=4 --little-endian ", \
269 "i686": " --uint32-align=4 --little-endian ", \
270 "x86_64": " --uint32-align=4 --little-endian " }
271
272 if target_arch in locale_arch_options:
273 localedef_opts = locale_arch_options[target_arch]
274 else:
275 bb.error("locale_arch_options not found for target_arch=" + target_arch)
276 bb.fatal("unknown arch:" + target_arch + " for locale_arch_options")
277
278 localedef_opts += " --force --no-hard-links --no-archive --prefix=%s \
279 --inputfile=%s/%s/i18n/locales/%s --charmap=%s %s/%s" \
280 % (treedir, treedir, datadir, locale, encoding, outputpath, name)
281
282 cmd = "PATH=\"%s\" I18NPATH=\"%s\" GCONV_PATH=\"%s\" cross-localedef %s" % \
283 (path, i18npath, gconvpath, localedef_opts)
284 else: # earlier slower qemu way
285 qemu = qemu_target_binary(d)
286 localedef_opts = "--force --no-hard-links --no-archive --prefix=%s \
287 --inputfile=%s/i18n/locales/%s --charmap=%s %s" \
288 % (treedir, datadir, locale, encoding, name)
289
290 qemu_options = d.getVar('QEMU_OPTIONS')
291
292 cmd = "PSEUDO_RELOADED=YES PATH=\"%s\" I18NPATH=\"%s\" %s -L %s \
293 -E LD_LIBRARY_PATH=%s %s %s${base_bindir}/localedef %s" % \
294 (path, i18npath, qemu, treedir, ldlibdir, qemu_options, treedir, localedef_opts)
295
296 commands["%s/%s" % (outputpath, name)] = cmd
297
298 bb.note("generating locale %s (%s)" % (locale, encoding))
299
300 def output_locale(name, locale, encoding):
301 pkgname = d.getVar('MLPREFIX', False) + 'locale-base-' + legitimize_package_name(name)
302 d.setVar('ALLOW_EMPTY:%s' % pkgname, '1')
303 d.setVar('PACKAGES', '%s %s' % (pkgname, d.getVar('PACKAGES')))
304 rprovides = ' %svirtual-locale-%s' % (mlprefix, legitimize_package_name(name))
305 m = re.match(r"(.*)_(.*)", name)
306 if m:
307 rprovides += ' %svirtual-locale-%s' % (mlprefix, m.group(1))
308 d.setVar('RPROVIDES:%s' % pkgname, rprovides)
309
310 if use_bin == "compile":
311 output_locale_binary_rdepends(name, pkgname, locale, encoding)
312 output_locale_binary(name, pkgname, locale, encoding)
313 elif use_bin == "precompiled":
314 output_locale_binary_rdepends(name, pkgname, locale, encoding)
315 else:
316 output_locale_source(name, pkgname, locale, encoding)
317
318 if use_bin == "compile":
319 bb.note("preparing tree for binary locale generation")
320 bb.build.exec_func("do_prep_locale_tree", d)
321
322 utf8_only = int(d.getVar('LOCALE_UTF8_ONLY') or 0)
323 utf8_is_default = int(d.getVar('LOCALE_UTF8_IS_DEFAULT') or 0)
324
325 encodings = {}
326 for locale in to_generate:
327 charset = supported[locale]
328 if utf8_only and charset != 'UTF-8':
329 continue
330
331 m = dot_re.match(locale)
332 if m:
333 base = m.group(1)
334 else:
335 base = locale
336
337 # Non-precompiled locales may be renamed so that the default
338 # (non-suffixed) encoding is always UTF-8, i.e., instead of en_US and
339 # en_US.UTF-8, we have en_US and en_US.ISO-8859-1. This implicitly
340 # contradicts SUPPORTED.
341 if use_bin == "precompiled" or not utf8_is_default:
342 output_locale(locale, base, charset)
343 else:
344 if charset == 'UTF-8':
345 output_locale(base, base, charset)
346 else:
347 output_locale('%s.%s' % (base, charset), base, charset)
348
349 def metapkg_hook(file, pkg, pattern, format, basename):
350 name = basename.split('/', 1)[0]
351 metapkg = legitimize_package_name('%s-binary-localedata-%s' % (mlprefix+bpn, name))
352 d.appendVar('RDEPENDS:%s' % metapkg, ' ' + pkg)
353
354 if use_bin == "compile":
355 makefile = oe.path.join(d.getVar("WORKDIR"), "locale-tree", "Makefile")
356 with open(makefile, "w") as m:
357 m.write("all: %s\n\n" % " ".join(commands.keys()))
358 total = len(commands)
359 for i, (maketarget, makerecipe) in enumerate(commands.items()):
360 m.write(maketarget + ":\n")
361 m.write("\t@echo 'Progress %d/%d'\n" % (i, total))
362 m.write("\t" + makerecipe + "\n\n")
363 d.setVar("EXTRA_OEMAKE", "-C %s ${PARALLEL_MAKE}" % (os.path.dirname(makefile)))
364 d.setVarFlag("oe_runmake", "progress", r"outof:Progress\s(\d+)/(\d+)")
365 bb.note("Executing binary locale generation makefile")
366 bb.build.exec_func("oe_runmake", d)
367 bb.note("collecting binary locales from locale tree")
368 bb.build.exec_func("do_collect_bins_from_locale_tree", d)
369
370 if use_bin in ('compile', 'precompiled'):
371 lcsplit = d.getVar('GLIBC_SPLIT_LC_PACKAGES')
372 if lcsplit and int(lcsplit):
373 do_split_packages(d, binary_locales_dir, file_regex=r'^(.*/LC_\w+)', \
374 output_pattern=bpn+'-binary-localedata-%s', \
375 description='binary locale definition for %s', recursive=True,
376 hook=metapkg_hook, extra_depends='', allow_dirs=True, match_path=True)
377 else:
378 do_split_packages(d, binary_locales_dir, file_regex=r'(.*)', \
379 output_pattern=bpn+'-binary-localedata-%s', \
380 description='binary locale definition for %s', extra_depends='', allow_dirs=True)
381 else:
382 bb.note("generation of binary locales disabled. this may break i18n!")
383
384}
385
386# We want to do this indirection so that we can safely 'return'
387# from the called function even though we're prepending
388python populate_packages:prepend () {
389 bb.build.exec_func('package_do_split_gconvs', d)
390}
diff --git a/meta/classes-recipe/license_image.bbclass b/meta/classes-recipe/license_image.bbclass
new file mode 100644
index 0000000000..b60d6e44f4
--- /dev/null
+++ b/meta/classes-recipe/license_image.bbclass
@@ -0,0 +1,295 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7ROOTFS_LICENSE_DIR = "${IMAGE_ROOTFS}/usr/share/common-licenses"
8
9# This requires LICENSE_CREATE_PACKAGE=1 to work too
10COMPLEMENTARY_GLOB[lic-pkgs] = "*-lic"
11
12python() {
13 if not oe.data.typed_value('LICENSE_CREATE_PACKAGE', d):
14 features = set(oe.data.typed_value('IMAGE_FEATURES', d))
15 if 'lic-pkgs' in features:
16 bb.error("'lic-pkgs' in IMAGE_FEATURES but LICENSE_CREATE_PACKAGE not enabled to generate -lic packages")
17}
18
19python write_package_manifest() {
20 # Get list of installed packages
21 license_image_dir = d.expand('${LICENSE_DIRECTORY}/${IMAGE_NAME}')
22 bb.utils.mkdirhier(license_image_dir)
23 from oe.rootfs import image_list_installed_packages
24 from oe.utils import format_pkg_list
25
26 pkgs = image_list_installed_packages(d)
27 output = format_pkg_list(pkgs)
28 with open(os.path.join(license_image_dir, 'package.manifest'), "w+") as package_manifest:
29 package_manifest.write(output)
30}
31
32python license_create_manifest() {
33 import oe.packagedata
34 from oe.rootfs import image_list_installed_packages
35
36 build_images_from_feeds = d.getVar('BUILD_IMAGES_FROM_FEEDS')
37 if build_images_from_feeds == "1":
38 return 0
39
40 pkg_dic = {}
41 for pkg in sorted(image_list_installed_packages(d)):
42 pkg_info = os.path.join(d.getVar('PKGDATA_DIR'),
43 'runtime-reverse', pkg)
44 pkg_name = os.path.basename(os.readlink(pkg_info))
45
46 pkg_dic[pkg_name] = oe.packagedata.read_pkgdatafile(pkg_info)
47 if not "LICENSE" in pkg_dic[pkg_name].keys():
48 pkg_lic_name = "LICENSE:" + pkg_name
49 pkg_dic[pkg_name]["LICENSE"] = pkg_dic[pkg_name][pkg_lic_name]
50
51 rootfs_license_manifest = os.path.join(d.getVar('LICENSE_DIRECTORY'),
52 d.getVar('IMAGE_NAME'), 'license.manifest')
53 write_license_files(d, rootfs_license_manifest, pkg_dic, rootfs=True)
54}
55
56def write_license_files(d, license_manifest, pkg_dic, rootfs=True):
57 import re
58 import stat
59
60 bad_licenses = (d.getVar("INCOMPATIBLE_LICENSE") or "").split()
61 bad_licenses = expand_wildcard_licenses(d, bad_licenses)
62
63 exceptions = (d.getVar("INCOMPATIBLE_LICENSE_EXCEPTIONS") or "").split()
64 with open(license_manifest, "w") as license_file:
65 for pkg in sorted(pkg_dic):
66 remaining_bad_licenses = oe.license.apply_pkg_license_exception(pkg, bad_licenses, exceptions)
67 incompatible_licenses = incompatible_pkg_license(d, remaining_bad_licenses, pkg_dic[pkg]["LICENSE"])
68 if incompatible_licenses:
69 bb.fatal("Package %s cannot be installed into the image because it has incompatible license(s): %s" %(pkg, ' '.join(incompatible_licenses)))
70 else:
71 incompatible_licenses = incompatible_pkg_license(d, bad_licenses, pkg_dic[pkg]["LICENSE"])
72 if incompatible_licenses:
73 oe.qa.handle_error('license-incompatible', "Including %s with incompatible license(s) %s into the image, because it has been allowed by exception list." %(pkg, ' '.join(incompatible_licenses)), d)
74 try:
75 (pkg_dic[pkg]["LICENSE"], pkg_dic[pkg]["LICENSES"]) = \
76 oe.license.manifest_licenses(pkg_dic[pkg]["LICENSE"],
77 remaining_bad_licenses, canonical_license, d)
78 except oe.license.LicenseError as exc:
79 bb.fatal('%s: %s' % (d.getVar('P'), exc))
80
81 if not "IMAGE_MANIFEST" in pkg_dic[pkg]:
82 # Rootfs manifest
83 license_file.write("PACKAGE NAME: %s\n" % pkg)
84 license_file.write("PACKAGE VERSION: %s\n" % pkg_dic[pkg]["PV"])
85 license_file.write("RECIPE NAME: %s\n" % pkg_dic[pkg]["PN"])
86 license_file.write("LICENSE: %s\n\n" % pkg_dic[pkg]["LICENSE"])
87
88 # If the package doesn't contain any file, that is, its size is 0, the license
89 # isn't relevant as far as the final image is concerned. So doing license check
90 # doesn't make much sense, skip it.
91 if pkg_dic[pkg]["PKGSIZE:%s" % pkg] == "0":
92 continue
93 else:
94 # Image manifest
95 license_file.write("RECIPE NAME: %s\n" % pkg_dic[pkg]["PN"])
96 license_file.write("VERSION: %s\n" % pkg_dic[pkg]["PV"])
97 license_file.write("LICENSE: %s\n" % pkg_dic[pkg]["LICENSE"])
98 license_file.write("FILES: %s\n\n" % pkg_dic[pkg]["FILES"])
99
100 for lic in pkg_dic[pkg]["LICENSES"]:
101 lic_file = os.path.join(d.getVar('LICENSE_DIRECTORY'),
102 pkg_dic[pkg]["PN"], "generic_%s" %
103 re.sub(r'\+', '', lic))
104 # add explicity avoid of CLOSED license because isn't generic
105 if lic == "CLOSED":
106 continue
107
108 if not os.path.exists(lic_file):
109 oe.qa.handle_error('license-file-missing',
110 "The license listed %s was not in the "\
111 "licenses collected for recipe %s"
112 % (lic, pkg_dic[pkg]["PN"]), d)
113 oe.qa.exit_if_errors(d)
114
115 # Two options here:
116 # - Just copy the manifest
117 # - Copy the manifest and the license directories
118 # With both options set we see a .5 M increase in core-image-minimal
119 copy_lic_manifest = d.getVar('COPY_LIC_MANIFEST')
120 copy_lic_dirs = d.getVar('COPY_LIC_DIRS')
121 if rootfs and copy_lic_manifest == "1":
122 rootfs_license_dir = d.getVar('ROOTFS_LICENSE_DIR')
123 bb.utils.mkdirhier(rootfs_license_dir)
124 rootfs_license_manifest = os.path.join(rootfs_license_dir,
125 os.path.split(license_manifest)[1])
126 if not os.path.exists(rootfs_license_manifest):
127 oe.path.copyhardlink(license_manifest, rootfs_license_manifest)
128
129 if copy_lic_dirs == "1":
130 for pkg in sorted(pkg_dic):
131 pkg_rootfs_license_dir = os.path.join(rootfs_license_dir, pkg)
132 bb.utils.mkdirhier(pkg_rootfs_license_dir)
133 pkg_license_dir = os.path.join(d.getVar('LICENSE_DIRECTORY'),
134 pkg_dic[pkg]["PN"])
135
136 pkg_manifest_licenses = [canonical_license(d, lic) \
137 for lic in pkg_dic[pkg]["LICENSES"]]
138
139 licenses = os.listdir(pkg_license_dir)
140 for lic in licenses:
141 pkg_license = os.path.join(pkg_license_dir, lic)
142 pkg_rootfs_license = os.path.join(pkg_rootfs_license_dir, lic)
143
144 if re.match(r"^generic_.*$", lic):
145 generic_lic = canonical_license(d,
146 re.search(r"^generic_(.*)$", lic).group(1))
147
148 # Do not copy generic license into package if isn't
149 # declared into LICENSES of the package.
150 if not re.sub(r'\+$', '', generic_lic) in \
151 [re.sub(r'\+', '', lic) for lic in \
152 pkg_manifest_licenses]:
153 continue
154
155 if oe.license.license_ok(generic_lic,
156 bad_licenses) == False:
157 continue
158
159 # Make sure we use only canonical name for the license file
160 generic_lic_file = "generic_%s" % generic_lic
161 rootfs_license = os.path.join(rootfs_license_dir, generic_lic_file)
162 if not os.path.exists(rootfs_license):
163 oe.path.copyhardlink(pkg_license, rootfs_license)
164
165 if not os.path.exists(pkg_rootfs_license):
166 os.symlink(os.path.join('..', generic_lic_file), pkg_rootfs_license)
167 else:
168 if (oe.license.license_ok(canonical_license(d,
169 lic), bad_licenses) == False or
170 os.path.exists(pkg_rootfs_license)):
171 continue
172
173 oe.path.copyhardlink(pkg_license, pkg_rootfs_license)
174 # Fixup file ownership and permissions
175 for walkroot, dirs, files in os.walk(rootfs_license_dir):
176 for f in files:
177 p = os.path.join(walkroot, f)
178 os.lchown(p, 0, 0)
179 if not os.path.islink(p):
180 os.chmod(p, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH)
181 for dir in dirs:
182 p = os.path.join(walkroot, dir)
183 os.lchown(p, 0, 0)
184 os.chmod(p, stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH)
185
186
187
188def license_deployed_manifest(d):
189 """
190 Write the license manifest for the deployed recipes.
191 The deployed recipes usually includes the bootloader
192 and extra files to boot the target.
193 """
194
195 dep_dic = {}
196 man_dic = {}
197 lic_dir = d.getVar("LICENSE_DIRECTORY")
198
199 dep_dic = get_deployed_dependencies(d)
200 for dep in dep_dic.keys():
201 man_dic[dep] = {}
202 # It is necessary to mark this will be used for image manifest
203 man_dic[dep]["IMAGE_MANIFEST"] = True
204 man_dic[dep]["PN"] = dep
205 man_dic[dep]["FILES"] = \
206 " ".join(get_deployed_files(dep_dic[dep]))
207 with open(os.path.join(lic_dir, dep, "recipeinfo"), "r") as f:
208 for line in f.readlines():
209 key,val = line.split(": ", 1)
210 man_dic[dep][key] = val[:-1]
211
212 lic_manifest_dir = os.path.join(d.getVar('LICENSE_DIRECTORY'),
213 d.getVar('IMAGE_NAME'))
214 bb.utils.mkdirhier(lic_manifest_dir)
215 image_license_manifest = os.path.join(lic_manifest_dir, 'image_license.manifest')
216 write_license_files(d, image_license_manifest, man_dic, rootfs=False)
217
218 link_name = d.getVar('IMAGE_LINK_NAME')
219 if link_name:
220 lic_manifest_symlink_dir = os.path.join(d.getVar('LICENSE_DIRECTORY'),
221 link_name)
222 # remove old symlink
223 if os.path.islink(lic_manifest_symlink_dir):
224 os.unlink(lic_manifest_symlink_dir)
225
226 # create the image dir symlink
227 if lic_manifest_dir != lic_manifest_symlink_dir:
228 os.symlink(lic_manifest_dir, lic_manifest_symlink_dir)
229
230def get_deployed_dependencies(d):
231 """
232 Get all the deployed dependencies of an image
233 """
234
235 deploy = {}
236 # Get all the dependencies for the current task (rootfs).
237 taskdata = d.getVar("BB_TASKDEPDATA", False)
238 pn = d.getVar("PN", True)
239 depends = list(set([dep[0] for dep
240 in list(taskdata.values())
241 if not dep[0].endswith("-native") and not dep[0] == pn]))
242
243 # To verify what was deployed it checks the rootfs dependencies against
244 # the SSTATE_MANIFESTS for "deploy" task.
245 # The manifest file name contains the arch. Because we are not running
246 # in the recipe context it is necessary to check every arch used.
247 sstate_manifest_dir = d.getVar("SSTATE_MANIFESTS")
248 archs = list(set(d.getVar("SSTATE_ARCHS").split()))
249 for dep in depends:
250 for arch in archs:
251 sstate_manifest_file = os.path.join(sstate_manifest_dir,
252 "manifest-%s-%s.deploy" % (arch, dep))
253 if os.path.exists(sstate_manifest_file):
254 deploy[dep] = sstate_manifest_file
255 break
256
257 return deploy
258get_deployed_dependencies[vardepsexclude] = "BB_TASKDEPDATA"
259
260def get_deployed_files(man_file):
261 """
262 Get the files deployed from the sstate manifest
263 """
264
265 dep_files = []
266 excluded_files = []
267 with open(man_file, "r") as manifest:
268 all_files = manifest.read()
269 for f in all_files.splitlines():
270 if ((not (os.path.islink(f) or os.path.isdir(f))) and
271 not os.path.basename(f) in excluded_files):
272 dep_files.append(os.path.basename(f))
273 return dep_files
274
275ROOTFS_POSTPROCESS_COMMAND:prepend = "write_package_manifest; license_create_manifest; "
276do_rootfs[recrdeptask] += "do_populate_lic"
277
278python do_populate_lic_deploy() {
279 license_deployed_manifest(d)
280 oe.qa.exit_if_errors(d)
281}
282
283addtask populate_lic_deploy before do_build after do_image_complete
284do_populate_lic_deploy[recrdeptask] += "do_populate_lic do_deploy"
285
286python license_qa_dead_symlink() {
287 import os
288
289 for root, dirs, files in os.walk(d.getVar('ROOTFS_LICENSE_DIR')):
290 for file in files:
291 full_path = root + "/" + file
292 if os.path.islink(full_path) and not os.path.exists(full_path):
293 bb.error("broken symlink: " + full_path)
294}
295IMAGE_QA_COMMANDS += "license_qa_dead_symlink"
diff --git a/meta/classes-recipe/linux-dummy.bbclass b/meta/classes-recipe/linux-dummy.bbclass
new file mode 100644
index 0000000000..9291533cf9
--- /dev/null
+++ b/meta/classes-recipe/linux-dummy.bbclass
@@ -0,0 +1,31 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7python __anonymous () {
8 if d.getVar('PREFERRED_PROVIDER_virtual/kernel') == 'linux-dummy':
9 # copy part codes from kernel.bbclass
10 kname = d.getVar('KERNEL_PACKAGE_NAME') or "kernel"
11
12 # set an empty package of kernel-devicetree
13 d.appendVar('PACKAGES', ' %s-devicetree' % kname)
14 d.setVar('ALLOW_EMPTY:%s-devicetree' % kname, '1')
15
16 # Merge KERNEL_IMAGETYPE and KERNEL_ALT_IMAGETYPE into KERNEL_IMAGETYPES
17 type = d.getVar('KERNEL_IMAGETYPE') or ""
18 alttype = d.getVar('KERNEL_ALT_IMAGETYPE') or ""
19 types = d.getVar('KERNEL_IMAGETYPES') or ""
20 if type not in types.split():
21 types = (type + ' ' + types).strip()
22 if alttype not in types.split():
23 types = (alttype + ' ' + types).strip()
24
25 # set empty packages of kernel-image-*
26 for type in types.split():
27 typelower = type.lower()
28 d.appendVar('PACKAGES', ' %s-image-%s' % (kname, typelower))
29 d.setVar('ALLOW_EMPTY:%s-image-%s' % (kname, typelower), '1')
30}
31
diff --git a/meta/classes-recipe/linux-kernel-base.bbclass b/meta/classes-recipe/linux-kernel-base.bbclass
new file mode 100644
index 0000000000..cb2212c948
--- /dev/null
+++ b/meta/classes-recipe/linux-kernel-base.bbclass
@@ -0,0 +1,47 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# parse kernel ABI version out of <linux/version.h>
8def get_kernelversion_headers(p):
9 import re
10
11 fn = p + '/include/linux/utsrelease.h'
12 if not os.path.isfile(fn):
13 # after 2.6.33-rc1
14 fn = p + '/include/generated/utsrelease.h'
15 if not os.path.isfile(fn):
16 fn = p + '/include/linux/version.h'
17
18 try:
19 f = open(fn, 'r')
20 except IOError:
21 return None
22
23 l = f.readlines()
24 f.close()
25 r = re.compile("#define UTS_RELEASE \"(.*)\"")
26 for s in l:
27 m = r.match(s)
28 if m:
29 return m.group(1)
30 return None
31
32
33def get_kernelversion_file(p):
34 fn = p + '/kernel-abiversion'
35
36 try:
37 with open(fn, 'r') as f:
38 return f.readlines()[0].strip()
39 except IOError:
40 return None
41
42def linux_module_packages(s, d):
43 suffix = ""
44 return " ".join(map(lambda s: "kernel-module-%s%s" % (s.lower().replace('_', '-').replace('@', '+'), suffix), s.split()))
45
46# that's all
47
diff --git a/meta/classes-recipe/linuxloader.bbclass b/meta/classes-recipe/linuxloader.bbclass
new file mode 100644
index 0000000000..1dfb95e31d
--- /dev/null
+++ b/meta/classes-recipe/linuxloader.bbclass
@@ -0,0 +1,82 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7def get_musl_loader_arch(d):
8 import re
9 ldso_arch = "NotSupported"
10
11 targetarch = d.getVar("TARGET_ARCH")
12 if targetarch.startswith("microblaze"):
13 ldso_arch = "microblaze${@bb.utils.contains('TUNE_FEATURES', 'bigendian', '', 'el', d)}"
14 elif targetarch.startswith("mips"):
15 ldso_arch = "mips${ABIEXTENSION}${MIPSPKGSFX_BYTE}${MIPSPKGSFX_R6}${MIPSPKGSFX_ENDIAN}${@['', '-sf'][d.getVar('TARGET_FPU') == 'soft']}"
16 elif targetarch == "powerpc":
17 ldso_arch = "powerpc${@['', '-sf'][d.getVar('TARGET_FPU') == 'soft']}"
18 elif targetarch.startswith("powerpc64"):
19 ldso_arch = "powerpc64${@bb.utils.contains('TUNE_FEATURES', 'bigendian', '', 'le', d)}"
20 elif targetarch == "x86_64":
21 ldso_arch = "x86_64"
22 elif re.search("i.86", targetarch):
23 ldso_arch = "i386"
24 elif targetarch.startswith("arm"):
25 ldso_arch = "arm${ARMPKGSFX_ENDIAN}${ARMPKGSFX_EABI}"
26 elif targetarch.startswith("aarch64"):
27 ldso_arch = "aarch64${ARMPKGSFX_ENDIAN_64}"
28 elif targetarch.startswith("riscv64"):
29 ldso_arch = "riscv64${@['', '-sf'][d.getVar('TARGET_FPU') == 'soft']}"
30 elif targetarch.startswith("riscv32"):
31 ldso_arch = "riscv32${@['', '-sf'][d.getVar('TARGET_FPU') == 'soft']}"
32 return ldso_arch
33
34def get_musl_loader(d):
35 import re
36 return "/lib/ld-musl-" + get_musl_loader_arch(d) + ".so.1"
37
38def get_glibc_loader(d):
39 import re
40
41 dynamic_loader = "NotSupported"
42 targetarch = d.getVar("TARGET_ARCH")
43 if targetarch in ["powerpc", "microblaze"]:
44 dynamic_loader = "${base_libdir}/ld.so.1"
45 elif targetarch in ["mipsisa32r6el", "mipsisa32r6", "mipsisa64r6el", "mipsisa64r6"]:
46 dynamic_loader = "${base_libdir}/ld-linux-mipsn8.so.1"
47 elif targetarch.startswith("mips"):
48 dynamic_loader = "${base_libdir}/ld.so.1"
49 elif targetarch == "powerpc64le":
50 dynamic_loader = "${base_libdir}/ld64.so.2"
51 elif targetarch == "powerpc64":
52 dynamic_loader = "${base_libdir}/ld64.so.1"
53 elif targetarch == "x86_64":
54 dynamic_loader = "${base_libdir}/ld-linux-x86-64.so.2"
55 elif re.search("i.86", targetarch):
56 dynamic_loader = "${base_libdir}/ld-linux.so.2"
57 elif targetarch == "arm":
58 dynamic_loader = "${base_libdir}/ld-linux${@['-armhf', ''][d.getVar('TARGET_FPU') == 'soft']}.so.3"
59 elif targetarch.startswith("aarch64"):
60 dynamic_loader = "${base_libdir}/ld-linux-aarch64${ARMPKGSFX_ENDIAN_64}.so.1"
61 elif targetarch.startswith("riscv64"):
62 dynamic_loader = "${base_libdir}/ld-linux-riscv64-lp64${@['d', ''][d.getVar('TARGET_FPU') == 'soft']}.so.1"
63 elif targetarch.startswith("riscv32"):
64 dynamic_loader = "${base_libdir}/ld-linux-riscv32-ilp32${@['d', ''][d.getVar('TARGET_FPU') == 'soft']}.so.1"
65 return dynamic_loader
66
67def get_linuxloader(d):
68 overrides = d.getVar("OVERRIDES").split(":")
69
70 if "libc-baremetal" in overrides:
71 return "NotSupported"
72
73 if "libc-musl" in overrides:
74 dynamic_loader = get_musl_loader(d)
75 else:
76 dynamic_loader = get_glibc_loader(d)
77 return dynamic_loader
78
79get_linuxloader[vardepvalue] = "${@get_linuxloader(d)}"
80get_musl_loader[vardepvalue] = "${@get_musl_loader(d)}"
81get_musl_loader_arch[vardepvalue] = "${@get_musl_loader_arch(d)}"
82get_glibc_loader[vardepvalue] = "${@get_glibc_loader(d)}"
diff --git a/meta/classes-recipe/live-vm-common.bbclass b/meta/classes-recipe/live-vm-common.bbclass
new file mode 100644
index 0000000000..b619f3a4be
--- /dev/null
+++ b/meta/classes-recipe/live-vm-common.bbclass
@@ -0,0 +1,100 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# Some of the vars for vm and live image are conflicted, this function
8# is used for fixing the problem.
9def set_live_vm_vars(d, suffix):
10 vars = ['GRUB_CFG', 'SYSLINUX_CFG', 'ROOT', 'LABELS', 'INITRD']
11 for var in vars:
12 var_with_suffix = var + '_' + suffix
13 if d.getVar(var):
14 bb.warn('Found potential conflicted var %s, please use %s rather than %s' % \
15 (var, var_with_suffix, var))
16 elif d.getVar(var_with_suffix):
17 d.setVar(var, d.getVar(var_with_suffix))
18
19
20EFI = "${@bb.utils.contains("MACHINE_FEATURES", "efi", "1", "0", d)}"
21EFI_PROVIDER ?= "grub-efi"
22EFI_CLASS = "${@bb.utils.contains("MACHINE_FEATURES", "efi", "${EFI_PROVIDER}", "", d)}"
23
24MKDOSFS_EXTRAOPTS ??= "-S 512"
25
26# Include legacy boot if MACHINE_FEATURES includes "pcbios" or if it does not
27# contain "efi". This way legacy is supported by default if neither is
28# specified, maintaining the original behavior.
29def pcbios(d):
30 pcbios = bb.utils.contains("MACHINE_FEATURES", "pcbios", "1", "0", d)
31 if pcbios == "0":
32 pcbios = bb.utils.contains("MACHINE_FEATURES", "efi", "0", "1", d)
33 return pcbios
34
35PCBIOS = "${@pcbios(d)}"
36PCBIOS_CLASS = "${@['','syslinux'][d.getVar('PCBIOS') == '1']}"
37
38# efi_populate_common DEST BOOTLOADER
39efi_populate_common() {
40 # DEST must be the root of the image so that EFIDIR is not
41 # nested under a top level directory.
42 DEST=$1
43
44 install -d ${DEST}${EFIDIR}
45
46 install -m 0644 ${DEPLOY_DIR_IMAGE}/$2-${EFI_BOOT_IMAGE} ${DEST}${EFIDIR}/${EFI_BOOT_IMAGE}
47 EFIPATH=$(echo "${EFIDIR}" | sed 's/\//\\/g')
48 printf 'fs0:%s\%s\n' "$EFIPATH" "${EFI_BOOT_IMAGE}" >${DEST}/startup.nsh
49}
50
51efi_iso_populate() {
52 iso_dir=$1
53 efi_populate $iso_dir
54 # Build a EFI directory to create efi.img
55 mkdir -p ${EFIIMGDIR}/${EFIDIR}
56 cp $iso_dir/${EFIDIR}/* ${EFIIMGDIR}${EFIDIR}
57 cp $iso_dir/${KERNEL_IMAGETYPE} ${EFIIMGDIR}
58
59 EFIPATH=$(echo "${EFIDIR}" | sed 's/\//\\/g')
60 printf 'fs0:%s\%s\n' "$EFIPATH" "${EFI_BOOT_IMAGE}" >${EFIIMGDIR}/startup.nsh
61
62 if [ -f "$iso_dir/initrd" ] ; then
63 cp $iso_dir/initrd ${EFIIMGDIR}
64 fi
65}
66
67efi_hddimg_populate() {
68 efi_populate $1
69}
70
71inherit ${EFI_CLASS}
72inherit ${PCBIOS_CLASS}
73
74populate_kernel() {
75 dest=$1
76 install -d $dest
77
78 # Install bzImage, initrd, and rootfs.img in DEST for all loaders to use.
79 bbnote "Trying to install ${DEPLOY_DIR_IMAGE}/${KERNEL_IMAGETYPE} as $dest/${KERNEL_IMAGETYPE}"
80 if [ -e ${DEPLOY_DIR_IMAGE}/${KERNEL_IMAGETYPE} ]; then
81 install -m 0644 ${DEPLOY_DIR_IMAGE}/${KERNEL_IMAGETYPE} $dest/${KERNEL_IMAGETYPE}
82 else
83 bbwarn "${DEPLOY_DIR_IMAGE}/${KERNEL_IMAGETYPE} doesn't exist"
84 fi
85
86 # initrd is made of concatenation of multiple filesystem images
87 if [ -n "${INITRD}" ]; then
88 rm -f $dest/initrd
89 for fs in ${INITRD}
90 do
91 if [ -s "$fs" ]; then
92 cat $fs >> $dest/initrd
93 else
94 bbfatal "$fs is invalid. initrd image creation failed."
95 fi
96 done
97 chmod 0644 $dest/initrd
98 fi
99}
100
diff --git a/meta/classes-recipe/manpages.bbclass b/meta/classes-recipe/manpages.bbclass
new file mode 100644
index 0000000000..693fb53671
--- /dev/null
+++ b/meta/classes-recipe/manpages.bbclass
@@ -0,0 +1,51 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# Inherit this class to enable or disable building and installation of manpages
8# depending on whether 'api-documentation' is in DISTRO_FEATURES. Such building
9# tends to pull in the entire XML stack and other tools, so it's not enabled
10# by default.
11PACKAGECONFIG:append:class-target = " ${@bb.utils.contains('DISTRO_FEATURES', 'api-documentation', 'manpages', '', d)}"
12
13inherit qemu
14
15# usually manual files are packaged to ${PN}-doc except man-pages
16MAN_PKG ?= "${PN}-doc"
17
18# only add man-db to RDEPENDS when manual files are built and installed
19RDEPENDS:${MAN_PKG} += "${@bb.utils.contains('PACKAGECONFIG', 'manpages', 'man-db', '', d)}"
20
21pkg_postinst:${MAN_PKG}:append () {
22 # only update manual page index caches when manual files are built and installed
23 if ${@bb.utils.contains('PACKAGECONFIG', 'manpages', 'true', 'false', d)}; then
24 if test -n "$D"; then
25 if ${@bb.utils.contains('MACHINE_FEATURES', 'qemu-usermode', 'true', 'false', d)}; then
26 sed "s:\(\s\)/:\1$D/:g" $D${sysconfdir}/man_db.conf | ${@qemu_run_binary(d, '$D', '${bindir}/mandb')} -C - -u -q $D${mandir}
27 chown -R root:root $D${mandir}
28
29 mkdir -p $D${localstatedir}/cache/man
30 cd $D${mandir}
31 find . -name index.db | while read index; do
32 mkdir -p $D${localstatedir}/cache/man/$(dirname ${index})
33 mv ${index} $D${localstatedir}/cache/man/${index}
34 chown man:man $D${localstatedir}/cache/man/${index}
35 done
36 cd -
37 else
38 $INTERCEPT_DIR/postinst_intercept delay_to_first_boot ${PKG} mlprefix=${MLPREFIX}
39 fi
40 else
41 mandb -q
42 fi
43 fi
44}
45
46pkg_postrm:${MAN_PKG}:append () {
47 # only update manual page index caches when manual files are built and installed
48 if ${@bb.utils.contains('PACKAGECONFIG', 'manpages', 'true', 'false', d)}; then
49 mandb -q
50 fi
51}
diff --git a/meta/classes-recipe/meson-routines.bbclass b/meta/classes-recipe/meson-routines.bbclass
new file mode 100644
index 0000000000..6086fce9d9
--- /dev/null
+++ b/meta/classes-recipe/meson-routines.bbclass
@@ -0,0 +1,57 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7inherit siteinfo
8
9def meson_array(var, d):
10 items = d.getVar(var).split()
11 return repr(items[0] if len(items) == 1 else items)
12
13# Map our ARCH values to what Meson expects:
14# http://mesonbuild.com/Reference-tables.html#cpu-families
15def meson_cpu_family(var, d):
16 import re
17 arch = d.getVar(var)
18 if arch == 'powerpc':
19 return 'ppc'
20 elif arch == 'powerpc64' or arch == 'powerpc64le':
21 return 'ppc64'
22 elif arch == 'armeb':
23 return 'arm'
24 elif arch == 'aarch64_be':
25 return 'aarch64'
26 elif arch == 'mipsel':
27 return 'mips'
28 elif arch == 'mips64el':
29 return 'mips64'
30 elif re.match(r"i[3-6]86", arch):
31 return "x86"
32 elif arch == "microblazeel":
33 return "microblaze"
34 else:
35 return arch
36
37# Map our OS values to what Meson expects:
38# https://mesonbuild.com/Reference-tables.html#operating-system-names
39def meson_operating_system(var, d):
40 os = d.getVar(var)
41 if "mingw" in os:
42 return "windows"
43 # avoid e.g 'linux-gnueabi'
44 elif "linux" in os:
45 return "linux"
46 else:
47 return os
48
49def meson_endian(prefix, d):
50 arch, os = d.getVar(prefix + "_ARCH"), d.getVar(prefix + "_OS")
51 sitedata = siteinfo_data_for_machine(arch, os, d)
52 if "endian-little" in sitedata:
53 return "little"
54 elif "endian-big" in sitedata:
55 return "big"
56 else:
57 bb.fatal("Cannot determine endianism for %s-%s" % (arch, os))
diff --git a/meta/classes-recipe/meson.bbclass b/meta/classes-recipe/meson.bbclass
new file mode 100644
index 0000000000..765e81bc4f
--- /dev/null
+++ b/meta/classes-recipe/meson.bbclass
@@ -0,0 +1,179 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7inherit python3native meson-routines qemu
8
9DEPENDS:append = " meson-native ninja-native"
10
11EXEWRAPPER_ENABLED:class-native = "False"
12EXEWRAPPER_ENABLED:class-nativesdk = "False"
13EXEWRAPPER_ENABLED ?= "${@bb.utils.contains('MACHINE_FEATURES', 'qemu-usermode', 'True', 'False', d)}"
14DEPENDS:append = "${@' qemu-native' if d.getVar('EXEWRAPPER_ENABLED') == 'True' else ''}"
15
16# As Meson enforces out-of-tree builds we can just use cleandirs
17B = "${WORKDIR}/build"
18do_configure[cleandirs] = "${B}"
19
20# Where the meson.build build configuration is
21MESON_SOURCEPATH = "${S}"
22
23def noprefix(var, d):
24 return d.getVar(var).replace(d.getVar('prefix') + '/', '', 1)
25
26MESON_BUILDTYPE ?= "${@oe.utils.vartrue('DEBUG_BUILD', 'debug', 'plain', d)}"
27MESON_BUILDTYPE[vardeps] += "DEBUG_BUILD"
28MESONOPTS = " --prefix ${prefix} \
29 --buildtype ${MESON_BUILDTYPE} \
30 --bindir ${@noprefix('bindir', d)} \
31 --sbindir ${@noprefix('sbindir', d)} \
32 --datadir ${@noprefix('datadir', d)} \
33 --libdir ${@noprefix('libdir', d)} \
34 --libexecdir ${@noprefix('libexecdir', d)} \
35 --includedir ${@noprefix('includedir', d)} \
36 --mandir ${@noprefix('mandir', d)} \
37 --infodir ${@noprefix('infodir', d)} \
38 --sysconfdir ${sysconfdir} \
39 --localstatedir ${localstatedir} \
40 --sharedstatedir ${sharedstatedir} \
41 --wrap-mode nodownload \
42 --native-file ${WORKDIR}/meson.native"
43
44EXTRA_OEMESON:append = " ${PACKAGECONFIG_CONFARGS}"
45
46MESON_CROSS_FILE = ""
47MESON_CROSS_FILE:class-target = "--cross-file ${WORKDIR}/meson.cross"
48MESON_CROSS_FILE:class-nativesdk = "--cross-file ${WORKDIR}/meson.cross"
49
50# Needed to set up qemu wrapper below
51export STAGING_DIR_HOST
52
53def rust_tool(d, target_var):
54 rustc = d.getVar('RUSTC')
55 if not rustc:
56 return ""
57 cmd = [rustc, "--target", d.getVar(target_var)] + d.getVar("RUSTFLAGS").split()
58 return "rust = %s" % repr(cmd)
59
60addtask write_config before do_configure
61do_write_config[vardeps] += "CC CXX LD AR NM STRIP READELF CFLAGS CXXFLAGS LDFLAGS RUSTC RUSTFLAGS"
62do_write_config() {
63 # This needs to be Py to split the args into single-element lists
64 cat >${WORKDIR}/meson.cross <<EOF
65[binaries]
66c = ${@meson_array('CC', d)}
67cpp = ${@meson_array('CXX', d)}
68cython = 'cython3'
69ar = ${@meson_array('AR', d)}
70nm = ${@meson_array('NM', d)}
71strip = ${@meson_array('STRIP', d)}
72readelf = ${@meson_array('READELF', d)}
73objcopy = ${@meson_array('OBJCOPY', d)}
74pkgconfig = 'pkg-config'
75llvm-config = 'llvm-config${LLVMVERSION}'
76cups-config = 'cups-config'
77g-ir-scanner = '${STAGING_BINDIR}/g-ir-scanner-wrapper'
78g-ir-compiler = '${STAGING_BINDIR}/g-ir-compiler-wrapper'
79${@rust_tool(d, "HOST_SYS")}
80${@"exe_wrapper = '${WORKDIR}/meson-qemuwrapper'" if d.getVar('EXEWRAPPER_ENABLED') == 'True' else ""}
81
82[built-in options]
83c_args = ${@meson_array('CFLAGS', d)}
84c_link_args = ${@meson_array('LDFLAGS', d)}
85cpp_args = ${@meson_array('CXXFLAGS', d)}
86cpp_link_args = ${@meson_array('LDFLAGS', d)}
87
88[properties]
89needs_exe_wrapper = true
90
91[host_machine]
92system = '${@meson_operating_system('HOST_OS', d)}'
93cpu_family = '${@meson_cpu_family('HOST_ARCH', d)}'
94cpu = '${HOST_ARCH}'
95endian = '${@meson_endian('HOST', d)}'
96
97[target_machine]
98system = '${@meson_operating_system('TARGET_OS', d)}'
99cpu_family = '${@meson_cpu_family('TARGET_ARCH', d)}'
100cpu = '${TARGET_ARCH}'
101endian = '${@meson_endian('TARGET', d)}'
102EOF
103
104 cat >${WORKDIR}/meson.native <<EOF
105[binaries]
106c = ${@meson_array('BUILD_CC', d)}
107cpp = ${@meson_array('BUILD_CXX', d)}
108cython = 'cython3'
109ar = ${@meson_array('BUILD_AR', d)}
110nm = ${@meson_array('BUILD_NM', d)}
111strip = ${@meson_array('BUILD_STRIP', d)}
112readelf = ${@meson_array('BUILD_READELF', d)}
113objcopy = ${@meson_array('BUILD_OBJCOPY', d)}
114pkgconfig = 'pkg-config-native'
115${@rust_tool(d, "BUILD_SYS")}
116
117[built-in options]
118c_args = ${@meson_array('BUILD_CFLAGS', d)}
119c_link_args = ${@meson_array('BUILD_LDFLAGS', d)}
120cpp_args = ${@meson_array('BUILD_CXXFLAGS', d)}
121cpp_link_args = ${@meson_array('BUILD_LDFLAGS', d)}
122EOF
123}
124
125do_write_config:append:class-target() {
126 # Write out a qemu wrapper that will be used as exe_wrapper so that meson
127 # can run target helper binaries through that.
128 qemu_binary="${@qemu_wrapper_cmdline(d, '$STAGING_DIR_HOST', ['$STAGING_DIR_HOST/${libdir}','$STAGING_DIR_HOST/${base_libdir}'])}"
129 cat > ${WORKDIR}/meson-qemuwrapper << EOF
130#!/bin/sh
131# Use a modules directory which doesn't exist so we don't load random things
132# which may then get deleted (or their dependencies) and potentially segfault
133export GIO_MODULE_DIR=${STAGING_LIBDIR}/gio/modules-dummy
134
135# meson sets this wrongly (only to libs in build-dir), qemu_wrapper_cmdline() and GIR_EXTRA_LIBS_PATH take care of it properly
136unset LD_LIBRARY_PATH
137
138$qemu_binary "\$@"
139EOF
140 chmod +x ${WORKDIR}/meson-qemuwrapper
141}
142
143# Tell externalsrc that changes to this file require a reconfigure
144CONFIGURE_FILES = "meson.build"
145
146meson_do_configure() {
147 # Meson requires this to be 'bfd, 'lld' or 'gold' from 0.53 onwards
148 # https://github.com/mesonbuild/meson/commit/ef9aeb188ea2bc7353e59916c18901cde90fa2b3
149 unset LD
150
151 # Work around "Meson fails if /tmp is mounted with noexec #2972"
152 mkdir -p "${B}/meson-private/tmp"
153 export TMPDIR="${B}/meson-private/tmp"
154 bbnote Executing meson ${EXTRA_OEMESON}...
155 if ! meson ${MESONOPTS} "${MESON_SOURCEPATH}" "${B}" ${MESON_CROSS_FILE} ${EXTRA_OEMESON}; then
156 bbfatal_log meson failed
157 fi
158}
159
160python meson_do_qa_configure() {
161 import re
162 warn_re = re.compile(r"^WARNING: Cross property (.+) is using default value (.+)$", re.MULTILINE)
163 with open(d.expand("${B}/meson-logs/meson-log.txt")) as logfile:
164 log = logfile.read()
165 for (prop, value) in warn_re.findall(log):
166 bb.warn("Meson cross property %s used without explicit assignment, defaulting to %s" % (prop, value))
167}
168do_configure[postfuncs] += "meson_do_qa_configure"
169
170do_compile[progress] = "outof:^\[(\d+)/(\d+)\]\s+"
171meson_do_compile() {
172 ninja -v ${PARALLEL_MAKE}
173}
174
175meson_do_install() {
176 DESTDIR='${D}' ninja -v ${PARALLEL_MAKEINST} install
177}
178
179EXPORT_FUNCTIONS do_configure do_compile do_install
diff --git a/meta/classes-recipe/mime-xdg.bbclass b/meta/classes-recipe/mime-xdg.bbclass
new file mode 100644
index 0000000000..cbdcb4c7e9
--- /dev/null
+++ b/meta/classes-recipe/mime-xdg.bbclass
@@ -0,0 +1,78 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6# This class creates mime <-> application associations based on entry
7# 'MimeType' in *.desktop files
8#
9
10DEPENDS += "desktop-file-utils"
11PACKAGE_WRITE_DEPS += "desktop-file-utils-native"
12DESKTOPDIR = "${datadir}/applications"
13
14# There are recipes out there installing their .desktop files as absolute
15# symlinks. For us these are dangling and cannot be introspected for "MimeType"
16# easily. By addding package-names to MIME_XDG_PACKAGES, packager can force
17# proper update-desktop-database handling. Note that all introspection is
18# skipped for MIME_XDG_PACKAGES not empty
19MIME_XDG_PACKAGES ?= ""
20
21mime_xdg_postinst() {
22if [ "x$D" != "x" ]; then
23 $INTERCEPT_DIR/postinst_intercept update_desktop_database ${PKG} \
24 mlprefix=${MLPREFIX} \
25 desktop_dir=${DESKTOPDIR}
26else
27 update-desktop-database $D${DESKTOPDIR}
28fi
29}
30
31mime_xdg_postrm() {
32if [ "x$D" != "x" ]; then
33 $INTERCEPT_DIR/postinst_intercept update_desktop_database ${PKG} \
34 mlprefix=${MLPREFIX} \
35 desktop_dir=${DESKTOPDIR}
36else
37 update-desktop-database $D${DESKTOPDIR}
38fi
39}
40
41python populate_packages:append () {
42 packages = d.getVar('PACKAGES').split()
43 pkgdest = d.getVar('PKGDEST')
44 desktop_base = d.getVar('DESKTOPDIR')
45 forced_mime_xdg_pkgs = (d.getVar('MIME_XDG_PACKAGES') or '').split()
46
47 for pkg in packages:
48 desktops_with_mime_found = pkg in forced_mime_xdg_pkgs
49 if d.getVar('MIME_XDG_PACKAGES') == '':
50 desktop_dir = '%s/%s%s' % (pkgdest, pkg, desktop_base)
51 if os.path.exists(desktop_dir):
52 for df in os.listdir(desktop_dir):
53 if df.endswith('.desktop'):
54 try:
55 with open(desktop_dir + '/'+ df, 'r') as f:
56 for line in f.read().split('\n'):
57 if 'MimeType' in line:
58 desktops_with_mime_found = True
59 break;
60 except:
61 bb.warn('Could not open %s. Set MIME_XDG_PACKAGES in recipe or add mime-xdg to INSANE_SKIP.' % desktop_dir + '/'+ df)
62 if desktops_with_mime_found:
63 break
64 if desktops_with_mime_found:
65 bb.note("adding mime-xdg postinst and postrm scripts to %s" % pkg)
66 postinst = d.getVar('pkg_postinst:%s' % pkg)
67 if not postinst:
68 postinst = '#!/bin/sh\n'
69 postinst += d.getVar('mime_xdg_postinst')
70 d.setVar('pkg_postinst:%s' % pkg, postinst)
71 postrm = d.getVar('pkg_postrm:%s' % pkg)
72 if not postrm:
73 postrm = '#!/bin/sh\n'
74 postrm += d.getVar('mime_xdg_postrm')
75 d.setVar('pkg_postrm:%s' % pkg, postrm)
76 bb.note("adding desktop-file-utils dependency to %s" % pkg)
77 d.appendVar('RDEPENDS:' + pkg, " " + d.getVar('MLPREFIX')+"desktop-file-utils")
78}
diff --git a/meta/classes-recipe/mime.bbclass b/meta/classes-recipe/mime.bbclass
new file mode 100644
index 0000000000..9b13f62bda
--- /dev/null
+++ b/meta/classes-recipe/mime.bbclass
@@ -0,0 +1,76 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7#
8# This class is used by recipes installing mime types
9#
10
11DEPENDS += "${@bb.utils.contains('BPN', 'shared-mime-info', '', 'shared-mime-info', d)}"
12PACKAGE_WRITE_DEPS += "shared-mime-info-native"
13MIMEDIR = "${datadir}/mime"
14
15mime_postinst() {
16if [ "x$D" != "x" ]; then
17 $INTERCEPT_DIR/postinst_intercept update_mime_database ${PKG} \
18 mlprefix=${MLPREFIX} \
19 mimedir=${MIMEDIR}
20else
21 echo "Updating MIME database... this may take a while."
22 update-mime-database $D${MIMEDIR}
23fi
24}
25
26mime_postrm() {
27if [ "x$D" != "x" ]; then
28 $INTERCEPT_DIR/postinst_intercept update_mime_database ${PKG} \
29 mlprefix=${MLPREFIX} \
30 mimedir=${MIMEDIR}
31else
32 echo "Updating MIME database... this may take a while."
33 # $D${MIMEDIR}/packages belong to package shared-mime-info-data,
34 # packages like libfm-mime depend on shared-mime-info-data.
35 # after shared-mime-info-data uninstalled, $D${MIMEDIR}/packages
36 # is removed, but update-mime-database need this dir to update
37 # database, workaround to create one and remove it later
38 if [ ! -d $D${MIMEDIR}/packages ]; then
39 mkdir -p $D${MIMEDIR}/packages
40 update-mime-database $D${MIMEDIR}
41 rmdir --ignore-fail-on-non-empty $D${MIMEDIR}/packages
42 else
43 update-mime-database $D${MIMEDIR}
44fi
45fi
46}
47
48python populate_packages:append () {
49 packages = d.getVar('PACKAGES').split()
50 pkgdest = d.getVar('PKGDEST')
51 mimedir = d.getVar('MIMEDIR')
52
53 for pkg in packages:
54 mime_packages_dir = '%s/%s%s/packages' % (pkgdest, pkg, mimedir)
55 mimes_types_found = False
56 if os.path.exists(mime_packages_dir):
57 for f in os.listdir(mime_packages_dir):
58 if f.endswith('.xml'):
59 mimes_types_found = True
60 break
61 if mimes_types_found:
62 bb.note("adding mime postinst and postrm scripts to %s" % pkg)
63 postinst = d.getVar('pkg_postinst:%s' % pkg)
64 if not postinst:
65 postinst = '#!/bin/sh\n'
66 postinst += d.getVar('mime_postinst')
67 d.setVar('pkg_postinst:%s' % pkg, postinst)
68 postrm = d.getVar('pkg_postrm:%s' % pkg)
69 if not postrm:
70 postrm = '#!/bin/sh\n'
71 postrm += d.getVar('mime_postrm')
72 d.setVar('pkg_postrm:%s' % pkg, postrm)
73 if pkg != 'shared-mime-info-data':
74 bb.note("adding shared-mime-info-data dependency to %s" % pkg)
75 d.appendVar('RDEPENDS:' + pkg, " " + d.getVar('MLPREFIX')+"shared-mime-info-data")
76}
diff --git a/meta/classes-recipe/module-base.bbclass b/meta/classes-recipe/module-base.bbclass
new file mode 100644
index 0000000000..094b563b1a
--- /dev/null
+++ b/meta/classes-recipe/module-base.bbclass
@@ -0,0 +1,27 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7inherit kernel-arch
8
9# We do the dependency this way because the output is not preserved
10# in sstate, so we must force do_compile to run (once).
11do_configure[depends] += "make-mod-scripts:do_compile"
12
13export OS = "${TARGET_OS}"
14export CROSS_COMPILE = "${TARGET_PREFIX}"
15
16# This points to the build artefacts from the main kernel build
17# such as .config and System.map
18# Confusingly it is not the module build output (which is ${B}) but
19# we didn't pick the name.
20export KBUILD_OUTPUT = "${STAGING_KERNEL_BUILDDIR}"
21
22export KERNEL_VERSION = "${@oe.utils.read_file('${STAGING_KERNEL_BUILDDIR}/kernel-abiversion')}"
23KERNEL_OBJECT_SUFFIX = ".ko"
24
25# kernel modules are generally machine specific
26PACKAGE_ARCH = "${MACHINE_ARCH}"
27
diff --git a/meta/classes-recipe/module.bbclass b/meta/classes-recipe/module.bbclass
new file mode 100644
index 0000000000..d52d5e3098
--- /dev/null
+++ b/meta/classes-recipe/module.bbclass
@@ -0,0 +1,80 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7inherit module-base kernel-module-split pkgconfig
8
9EXTRA_OEMAKE += "KERNEL_SRC=${STAGING_KERNEL_DIR}"
10
11MODULES_INSTALL_TARGET ?= "modules_install"
12MODULES_MODULE_SYMVERS_LOCATION ?= ""
13
14python __anonymous () {
15 depends = d.getVar('DEPENDS')
16 extra_symbols = []
17 for dep in depends.split():
18 if dep.startswith("kernel-module-"):
19 extra_symbols.append("${STAGING_INCDIR}/" + dep + "/Module.symvers")
20 d.setVar('KBUILD_EXTRA_SYMBOLS', " ".join(extra_symbols))
21}
22
23python do_devshell:prepend () {
24 os.environ['CFLAGS'] = ''
25 os.environ['CPPFLAGS'] = ''
26 os.environ['CXXFLAGS'] = ''
27 os.environ['LDFLAGS'] = ''
28
29 os.environ['KERNEL_PATH'] = d.getVar('STAGING_KERNEL_DIR')
30 os.environ['KERNEL_SRC'] = d.getVar('STAGING_KERNEL_DIR')
31 os.environ['KERNEL_VERSION'] = d.getVar('KERNEL_VERSION')
32 os.environ['CC'] = d.getVar('KERNEL_CC')
33 os.environ['LD'] = d.getVar('KERNEL_LD')
34 os.environ['AR'] = d.getVar('KERNEL_AR')
35 os.environ['O'] = d.getVar('STAGING_KERNEL_BUILDDIR')
36 kbuild_extra_symbols = d.getVar('KBUILD_EXTRA_SYMBOLS')
37 if kbuild_extra_symbols:
38 os.environ['KBUILD_EXTRA_SYMBOLS'] = kbuild_extra_symbols
39 else:
40 os.environ['KBUILD_EXTRA_SYMBOLS'] = ''
41}
42
43module_do_compile() {
44 unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS
45 oe_runmake KERNEL_PATH=${STAGING_KERNEL_DIR} \
46 KERNEL_VERSION=${KERNEL_VERSION} \
47 CC="${KERNEL_CC}" LD="${KERNEL_LD}" \
48 AR="${KERNEL_AR}" \
49 O=${STAGING_KERNEL_BUILDDIR} \
50 KBUILD_EXTRA_SYMBOLS="${KBUILD_EXTRA_SYMBOLS}" \
51 ${MAKE_TARGETS}
52}
53
54module_do_install() {
55 unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS
56 oe_runmake DEPMOD=echo MODLIB="${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION}" \
57 INSTALL_FW_PATH="${D}${nonarch_base_libdir}/firmware" \
58 CC="${KERNEL_CC}" LD="${KERNEL_LD}" \
59 O=${STAGING_KERNEL_BUILDDIR} \
60 ${MODULES_INSTALL_TARGET}
61
62 if [ ! -e "${B}/${MODULES_MODULE_SYMVERS_LOCATION}/Module.symvers" ] ; then
63 bbwarn "Module.symvers not found in ${B}/${MODULES_MODULE_SYMVERS_LOCATION}"
64 bbwarn "Please consider setting MODULES_MODULE_SYMVERS_LOCATION to a"
65 bbwarn "directory below B to get correct inter-module dependencies"
66 else
67 install -Dm0644 "${B}/${MODULES_MODULE_SYMVERS_LOCATION}"/Module.symvers ${D}${includedir}/${BPN}/Module.symvers
68 # Module.symvers contains absolute path to the build directory.
69 # While it doesn't actually seem to matter which path is specified,
70 # clear them out to avoid confusion
71 sed -e 's:${B}/::g' -i ${D}${includedir}/${BPN}/Module.symvers
72 fi
73}
74
75EXPORT_FUNCTIONS do_compile do_install
76
77# add all splitted modules to PN RDEPENDS, PN can be empty now
78KERNEL_MODULES_META_PACKAGE = "${PN}"
79FILES:${PN} = ""
80ALLOW_EMPTY:${PN} = "1"
diff --git a/meta/classes-recipe/multilib_header.bbclass b/meta/classes-recipe/multilib_header.bbclass
new file mode 100644
index 0000000000..33f7e027f0
--- /dev/null
+++ b/meta/classes-recipe/multilib_header.bbclass
@@ -0,0 +1,58 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7inherit siteinfo
8
9# If applicable on the architecture, this routine will rename the header and
10# add a unique identifier to the name for the ABI/bitsize that is being used.
11# A wrapper will be generated for the architecture that knows how to call
12# all of the ABI variants for that given architecture.
13#
14oe_multilib_header() {
15
16 case ${HOST_OS} in
17 *-musl*)
18 return
19 ;;
20 *)
21 esac
22 # For MIPS: "n32" is a special case, which needs to be
23 # distinct from both 64-bit and 32-bit.
24 case ${TARGET_ARCH} in
25 mips*) case "${MIPSPKGSFX_ABI}" in
26 "-n32")
27 ident=n32
28 ;;
29 *)
30 ident=${SITEINFO_BITS}
31 ;;
32 esac
33 ;;
34 *) ident=${SITEINFO_BITS}
35 esac
36 for each_header in "$@" ; do
37 if [ ! -f "${D}/${includedir}/$each_header" ]; then
38 bberror "oe_multilib_header: Unable to find header $each_header."
39 continue
40 fi
41 stem=$(echo $each_header | sed 's#\.h$##')
42 # if mips64/n32 set ident to n32
43 mv ${D}/${includedir}/$each_header ${D}/${includedir}/${stem}-${ident}.h
44
45 sed -e "s#ENTER_HEADER_FILENAME_HERE#${stem}#g" ${COREBASE}/scripts/multilib_header_wrapper.h > ${D}/${includedir}/$each_header
46 done
47}
48
49# Dependencies on arch variables like MIPSPKGSFX_ABI can be problematic.
50# We don't need multilib headers for native builds so brute force things.
51oe_multilib_header:class-native () {
52 return
53}
54
55# Nor do we need multilib headers for nativesdk builds.
56oe_multilib_header:class-nativesdk () {
57 return
58}
diff --git a/meta/classes-recipe/multilib_script.bbclass b/meta/classes-recipe/multilib_script.bbclass
new file mode 100644
index 0000000000..7011526254
--- /dev/null
+++ b/meta/classes-recipe/multilib_script.bbclass
@@ -0,0 +1,40 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7#
8# Recipe needs to set MULTILIB_SCRIPTS in the form <pkgname>:<scriptname>, e.g.
9# MULTILIB_SCRIPTS = "${PN}-dev:${bindir}/file1 ${PN}:${base_bindir}/file2"
10# to indicate which script files to process from which packages.
11#
12
13inherit update-alternatives
14
15MULTILIB_SUFFIX = "${@d.getVar('base_libdir',1).split('/')[-1]}"
16
17PACKAGE_PREPROCESS_FUNCS += "multilibscript_rename"
18
19multilibscript_rename() {
20 :
21}
22
23python () {
24 # Do nothing if multilib isn't being used
25 if not d.getVar("MULTILIB_VARIANTS"):
26 return
27 # Do nothing for native/cross
28 if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross', d):
29 return
30
31 for entry in (d.getVar("MULTILIB_SCRIPTS", False) or "").split():
32 pkg, script = entry.split(":")
33 epkg = d.expand(pkg)
34 scriptname = os.path.basename(script)
35 d.appendVar("ALTERNATIVE:" + epkg, " " + scriptname + " ")
36 d.setVarFlag("ALTERNATIVE_LINK_NAME", scriptname, script)
37 d.setVarFlag("ALTERNATIVE_TARGET", scriptname, script + "-${MULTILIB_SUFFIX}")
38 d.appendVar("multilibscript_rename", "\n mv ${PKGD}" + script + " ${PKGD}" + script + "-${MULTILIB_SUFFIX}")
39 d.appendVar("FILES:" + epkg, " " + script + "-${MULTILIB_SUFFIX}")
40}
diff --git a/meta/classes-recipe/native.bbclass b/meta/classes-recipe/native.bbclass
new file mode 100644
index 0000000000..61ad053def
--- /dev/null
+++ b/meta/classes-recipe/native.bbclass
@@ -0,0 +1,236 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# We want native packages to be relocatable
8inherit relocatable
9
10# Native packages are built indirectly via dependency,
11# no need for them to be a direct target of 'world'
12EXCLUDE_FROM_WORLD = "1"
13
14PACKAGE_ARCH = "${BUILD_ARCH}"
15
16# used by cmake class
17OECMAKE_RPATH = "${libdir}"
18OECMAKE_RPATH:class-native = "${libdir}"
19
20TARGET_ARCH = "${BUILD_ARCH}"
21TARGET_OS = "${BUILD_OS}"
22TARGET_VENDOR = "${BUILD_VENDOR}"
23TARGET_PREFIX = "${BUILD_PREFIX}"
24TARGET_CC_ARCH = "${BUILD_CC_ARCH}"
25TARGET_LD_ARCH = "${BUILD_LD_ARCH}"
26TARGET_AS_ARCH = "${BUILD_AS_ARCH}"
27TARGET_CPPFLAGS = "${BUILD_CPPFLAGS}"
28TARGET_CFLAGS = "${BUILD_CFLAGS}"
29TARGET_CXXFLAGS = "${BUILD_CXXFLAGS}"
30TARGET_LDFLAGS = "${BUILD_LDFLAGS}"
31TARGET_FPU = ""
32TUNE_FEATURES = ""
33ABIEXTENSION = ""
34
35HOST_ARCH = "${BUILD_ARCH}"
36HOST_OS = "${BUILD_OS}"
37HOST_VENDOR = "${BUILD_VENDOR}"
38HOST_PREFIX = "${BUILD_PREFIX}"
39HOST_CC_ARCH = "${BUILD_CC_ARCH}"
40HOST_LD_ARCH = "${BUILD_LD_ARCH}"
41HOST_AS_ARCH = "${BUILD_AS_ARCH}"
42
43CPPFLAGS = "${BUILD_CPPFLAGS}"
44CFLAGS = "${BUILD_CFLAGS}"
45CXXFLAGS = "${BUILD_CXXFLAGS}"
46LDFLAGS = "${BUILD_LDFLAGS}"
47
48STAGING_BINDIR = "${STAGING_BINDIR_NATIVE}"
49STAGING_BINDIR_CROSS = "${STAGING_BINDIR_NATIVE}"
50
51# native pkg doesn't need the TOOLCHAIN_OPTIONS.
52TOOLCHAIN_OPTIONS = ""
53
54# Don't build ptest natively
55PTEST_ENABLED = "0"
56
57# Don't use site files for native builds
58export CONFIG_SITE = "${COREBASE}/meta/site/native"
59
60# set the compiler as well. It could have been set to something else
61export CC = "${BUILD_CC}"
62export CXX = "${BUILD_CXX}"
63export FC = "${BUILD_FC}"
64export CPP = "${BUILD_CPP}"
65export LD = "${BUILD_LD}"
66export CCLD = "${BUILD_CCLD}"
67export AR = "${BUILD_AR}"
68export AS = "${BUILD_AS}"
69export RANLIB = "${BUILD_RANLIB}"
70export STRIP = "${BUILD_STRIP}"
71export NM = "${BUILD_NM}"
72
73# Path prefixes
74base_prefix = "${STAGING_DIR_NATIVE}"
75prefix = "${STAGING_DIR_NATIVE}${prefix_native}"
76exec_prefix = "${STAGING_DIR_NATIVE}${prefix_native}"
77
78bindir = "${STAGING_BINDIR_NATIVE}"
79sbindir = "${STAGING_SBINDIR_NATIVE}"
80base_libdir = "${STAGING_LIBDIR_NATIVE}"
81libdir = "${STAGING_LIBDIR_NATIVE}"
82includedir = "${STAGING_INCDIR_NATIVE}"
83sysconfdir = "${STAGING_ETCDIR_NATIVE}"
84datadir = "${STAGING_DATADIR_NATIVE}"
85
86baselib = "lib"
87
88export lt_cv_sys_lib_dlsearch_path_spec = "${libdir} ${base_libdir} /lib /lib64 /usr/lib /usr/lib64"
89
90NATIVE_PACKAGE_PATH_SUFFIX ?= ""
91bindir .= "${NATIVE_PACKAGE_PATH_SUFFIX}"
92sbindir .= "${NATIVE_PACKAGE_PATH_SUFFIX}"
93base_libdir .= "${NATIVE_PACKAGE_PATH_SUFFIX}"
94libdir .= "${NATIVE_PACKAGE_PATH_SUFFIX}"
95libexecdir .= "${NATIVE_PACKAGE_PATH_SUFFIX}"
96
97do_populate_sysroot[sstate-inputdirs] = "${SYSROOT_DESTDIR}/${STAGING_DIR_NATIVE}/"
98do_populate_sysroot[sstate-outputdirs] = "${COMPONENTS_DIR}/${PACKAGE_ARCH}/${PN}"
99
100# Since we actually install these into situ there is no staging prefix
101STAGING_DIR_HOST = ""
102STAGING_DIR_TARGET = ""
103PKG_CONFIG_DIR = "${libdir}/pkgconfig"
104
105EXTRA_NATIVE_PKGCONFIG_PATH ?= ""
106PKG_CONFIG_PATH .= "${EXTRA_NATIVE_PKGCONFIG_PATH}"
107PKG_CONFIG_SYSROOT_DIR = ""
108PKG_CONFIG_SYSTEM_LIBRARY_PATH[unexport] = "1"
109PKG_CONFIG_SYSTEM_INCLUDE_PATH[unexport] = "1"
110
111# we dont want libc-*libc to kick in for native recipes
112LIBCOVERRIDE = ""
113CLASSOVERRIDE = "class-native"
114MACHINEOVERRIDES = ""
115MACHINE_FEATURES = ""
116
117PATH:prepend = "${COREBASE}/scripts/native-intercept:"
118
119# This class encodes staging paths into its scripts data so can only be
120# reused if we manipulate the paths.
121SSTATE_SCAN_CMD ?= "${SSTATE_SCAN_CMD_NATIVE}"
122
123# No strip sysroot when DEBUG_BUILD is enabled
124INHIBIT_SYSROOT_STRIP ?= "${@oe.utils.vartrue('DEBUG_BUILD', '1', '', d)}"
125
126python native_virtclass_handler () {
127 pn = e.data.getVar("PN")
128 if not pn.endswith("-native"):
129 return
130 bpn = e.data.getVar("BPN")
131
132 # Set features here to prevent appends and distro features backfill
133 # from modifying native distro features
134 features = set(d.getVar("DISTRO_FEATURES_NATIVE").split())
135 filtered = set(bb.utils.filter("DISTRO_FEATURES", d.getVar("DISTRO_FEATURES_FILTER_NATIVE"), d).split())
136 d.setVar("DISTRO_FEATURES", " ".join(sorted(features | filtered)))
137
138 classextend = e.data.getVar('BBCLASSEXTEND') or ""
139 if "native" not in classextend:
140 return
141
142 def map_dependencies(varname, d, suffix = "", selfref=True):
143 if suffix:
144 varname = varname + ":" + suffix
145 deps = d.getVar(varname)
146 if not deps:
147 return
148 deps = bb.utils.explode_deps(deps)
149 newdeps = []
150 for dep in deps:
151 if dep == pn:
152 if not selfref:
153 continue
154 newdeps.append(dep)
155 elif "-cross-" in dep:
156 newdeps.append(dep.replace("-cross", "-native"))
157 elif not dep.endswith("-native"):
158 # Replace ${PN} with ${BPN} in the dependency to make sure
159 # dependencies on, e.g., ${PN}-foo become ${BPN}-foo-native
160 # rather than ${BPN}-native-foo-native.
161 newdeps.append(dep.replace(pn, bpn) + "-native")
162 else:
163 newdeps.append(dep)
164 d.setVar(varname, " ".join(newdeps), parsing=True)
165
166 map_dependencies("DEPENDS", e.data, selfref=False)
167 for pkg in e.data.getVar("PACKAGES", False).split():
168 map_dependencies("RDEPENDS", e.data, pkg)
169 map_dependencies("RRECOMMENDS", e.data, pkg)
170 map_dependencies("RSUGGESTS", e.data, pkg)
171 map_dependencies("RPROVIDES", e.data, pkg)
172 map_dependencies("RREPLACES", e.data, pkg)
173 map_dependencies("PACKAGES", e.data)
174
175 provides = e.data.getVar("PROVIDES")
176 nprovides = []
177 for prov in provides.split():
178 if prov.find(pn) != -1:
179 nprovides.append(prov)
180 elif not prov.endswith("-native"):
181 nprovides.append(prov + "-native")
182 else:
183 nprovides.append(prov)
184 e.data.setVar("PROVIDES", ' '.join(nprovides))
185
186
187}
188
189addhandler native_virtclass_handler
190native_virtclass_handler[eventmask] = "bb.event.RecipePreFinalise"
191
192python do_addto_recipe_sysroot () {
193 bb.build.exec_func("extend_recipe_sysroot", d)
194}
195addtask addto_recipe_sysroot after do_populate_sysroot
196do_addto_recipe_sysroot[deptask] = "do_populate_sysroot"
197
198inherit nopackages
199
200do_packagedata[stamp-extra-info] = ""
201
202USE_NLS = "no"
203
204RECIPERDEPTASK = "do_populate_sysroot"
205do_populate_sysroot[rdeptask] = "${RECIPERDEPTASK}"
206
207#
208# Native task outputs are directly run on the target (host) system after being
209# built. Even if the output of this recipe doesn't change, a change in one of
210# its dependencies may cause a change in the output it generates (e.g. rpm
211# output depends on the output of its dependent zstd library).
212#
213# This can cause poor interactions with hash equivalence, since this recipes
214# output-changing dependency is "hidden" and downstream task only see that this
215# recipe has the same outhash and therefore is equivalent. This can result in
216# different output in different cases.
217#
218# To resolve this, unhide the output-changing dependency by adding its unihash
219# to this tasks outhash calculation. Unfortunately, don't know specifically
220# know which dependencies are output-changing, so we have to add all of them.
221#
222python native_add_do_populate_sysroot_deps () {
223 current_task = "do_" + d.getVar("BB_CURRENTTASK")
224 if current_task != "do_populate_sysroot":
225 return
226
227 taskdepdata = d.getVar("BB_TASKDEPDATA", False)
228 pn = d.getVar("PN")
229 deps = {
230 dep[0]:dep[6] for dep in taskdepdata.values() if
231 dep[1] == current_task and dep[0] != pn
232 }
233
234 d.setVar("HASHEQUIV_EXTRA_SIGDATA", "\n".join("%s: %s" % (k, deps[k]) for k in sorted(deps.keys())))
235}
236SSTATECREATEFUNCS += "native_add_do_populate_sysroot_deps"
diff --git a/meta/classes-recipe/nativesdk.bbclass b/meta/classes-recipe/nativesdk.bbclass
new file mode 100644
index 0000000000..08288fdb73
--- /dev/null
+++ b/meta/classes-recipe/nativesdk.bbclass
@@ -0,0 +1,124 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# SDK packages are built either explicitly by the user,
8# or indirectly via dependency. No need to be in 'world'.
9EXCLUDE_FROM_WORLD = "1"
10
11STAGING_BINDIR_TOOLCHAIN = "${STAGING_DIR_NATIVE}${bindir_native}/${SDK_ARCH}${SDK_VENDOR}-${SDK_OS}"
12
13# libc for the SDK can be different to that of the target
14NATIVESDKLIBC ?= "libc-glibc"
15LIBCOVERRIDE = ":${NATIVESDKLIBC}"
16CLASSOVERRIDE = "class-nativesdk"
17MACHINEOVERRIDES = ""
18MACHINE_FEATURES = ""
19
20MULTILIBS = ""
21
22# we need consistent staging dir whether or not multilib is enabled
23STAGING_DIR_HOST = "${WORKDIR}/recipe-sysroot"
24STAGING_DIR_TARGET = "${WORKDIR}/recipe-sysroot"
25RECIPE_SYSROOT = "${WORKDIR}/recipe-sysroot"
26
27#
28# Update PACKAGE_ARCH and PACKAGE_ARCHS
29#
30PACKAGE_ARCH = "${SDK_ARCH}-${SDKPKGSUFFIX}"
31PACKAGE_ARCHS = "${SDK_PACKAGE_ARCHS}"
32
33#
34# We need chrpath >= 0.14 to ensure we can deal with 32 and 64 bit
35# binaries
36#
37DEPENDS:append = " chrpath-replacement-native"
38EXTRANATIVEPATH += "chrpath-native"
39
40PKGDATA_DIR = "${PKGDATA_DIR_SDK}"
41
42HOST_ARCH = "${SDK_ARCH}"
43HOST_VENDOR = "${SDK_VENDOR}"
44HOST_OS = "${SDK_OS}"
45HOST_PREFIX = "${SDK_PREFIX}"
46HOST_CC_ARCH = "${SDK_CC_ARCH}"
47HOST_LD_ARCH = "${SDK_LD_ARCH}"
48HOST_AS_ARCH = "${SDK_AS_ARCH}"
49#HOST_SYS = "${HOST_ARCH}${TARGET_VENDOR}-${HOST_OS}"
50
51TARGET_ARCH = "${SDK_ARCH}"
52TARGET_VENDOR = "${SDK_VENDOR}"
53TARGET_OS = "${SDK_OS}"
54TARGET_PREFIX = "${SDK_PREFIX}"
55TARGET_CC_ARCH = "${SDK_CC_ARCH}"
56TARGET_LD_ARCH = "${SDK_LD_ARCH}"
57TARGET_AS_ARCH = "${SDK_AS_ARCH}"
58TARGET_CPPFLAGS = "${BUILDSDK_CPPFLAGS}"
59TARGET_CFLAGS = "${BUILDSDK_CFLAGS}"
60TARGET_CXXFLAGS = "${BUILDSDK_CXXFLAGS}"
61TARGET_LDFLAGS = "${BUILDSDK_LDFLAGS}"
62TARGET_FPU = ""
63EXTRA_OECONF_GCC_FLOAT = ""
64TUNE_FEATURES = ""
65
66CPPFLAGS = "${BUILDSDK_CPPFLAGS}"
67CFLAGS = "${BUILDSDK_CFLAGS}"
68CXXFLAGS = "${BUILDSDK_CXXFLAGS}"
69LDFLAGS = "${BUILDSDK_LDFLAGS}"
70
71# Change to place files in SDKPATH
72base_prefix = "${SDKPATHNATIVE}"
73prefix = "${SDKPATHNATIVE}${prefix_nativesdk}"
74exec_prefix = "${SDKPATHNATIVE}${prefix_nativesdk}"
75baselib = "lib"
76sbindir = "${bindir}"
77
78export PKG_CONFIG_DIR = "${STAGING_DIR_HOST}${libdir}/pkgconfig"
79export PKG_CONFIG_SYSROOT_DIR = "${STAGING_DIR_HOST}"
80
81python nativesdk_virtclass_handler () {
82 pn = e.data.getVar("PN")
83 if not (pn.endswith("-nativesdk") or pn.startswith("nativesdk-")):
84 return
85
86 # Set features here to prevent appends and distro features backfill
87 # from modifying nativesdk distro features
88 features = set(d.getVar("DISTRO_FEATURES_NATIVESDK").split())
89 filtered = set(bb.utils.filter("DISTRO_FEATURES", d.getVar("DISTRO_FEATURES_FILTER_NATIVESDK"), d).split())
90 d.setVar("DISTRO_FEATURES", " ".join(sorted(features | filtered)))
91
92 e.data.setVar("MLPREFIX", "nativesdk-")
93 e.data.setVar("PN", "nativesdk-" + e.data.getVar("PN").replace("-nativesdk", "").replace("nativesdk-", ""))
94}
95
96python () {
97 pn = d.getVar("PN")
98 if not pn.startswith("nativesdk-"):
99 return
100
101 import oe.classextend
102
103 clsextend = oe.classextend.NativesdkClassExtender("nativesdk", d)
104 clsextend.rename_packages()
105 clsextend.rename_package_variables((d.getVar("PACKAGEVARS") or "").split())
106
107 clsextend.map_depends_variable("DEPENDS")
108 clsextend.map_packagevars()
109 clsextend.map_variable("PROVIDES")
110 clsextend.map_regexp_variable("PACKAGES_DYNAMIC")
111 d.setVar("LIBCEXTENSION", "")
112 d.setVar("ABIEXTENSION", "")
113}
114
115addhandler nativesdk_virtclass_handler
116nativesdk_virtclass_handler[eventmask] = "bb.event.RecipePreFinalise"
117
118do_packagedata[stamp-extra-info] = ""
119
120USE_NLS = "${SDKUSE_NLS}"
121
122OLDEST_KERNEL = "${SDK_OLDEST_KERNEL}"
123
124PATH:prepend = "${COREBASE}/scripts/nativesdk-intercept:"
diff --git a/meta/classes-recipe/nopackages.bbclass b/meta/classes-recipe/nopackages.bbclass
new file mode 100644
index 0000000000..9ea7273530
--- /dev/null
+++ b/meta/classes-recipe/nopackages.bbclass
@@ -0,0 +1,19 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7deltask do_package
8deltask do_package_write_rpm
9deltask do_package_write_ipk
10deltask do_package_write_deb
11deltask do_package_write_tar
12deltask do_package_qa
13deltask do_packagedata
14deltask do_package_setscene
15deltask do_package_write_rpm_setscene
16deltask do_package_write_ipk_setscene
17deltask do_package_write_deb_setscene
18deltask do_package_qa_setscene
19deltask do_packagedata_setscene
diff --git a/meta/classes-recipe/npm.bbclass b/meta/classes-recipe/npm.bbclass
new file mode 100644
index 0000000000..deea53c9ec
--- /dev/null
+++ b/meta/classes-recipe/npm.bbclass
@@ -0,0 +1,340 @@
1# Copyright (C) 2020 Savoir-Faire Linux
2#
3# SPDX-License-Identifier: GPL-2.0-only
4#
5# This bbclass builds and installs an npm package to the target. The package
6# sources files should be fetched in the calling recipe by using the SRC_URI
7# variable. The ${S} variable should be updated depending of your fetcher.
8#
9# Usage:
10# SRC_URI = "..."
11# inherit npm
12#
13# Optional variables:
14# NPM_ARCH:
15# Override the auto generated npm architecture.
16#
17# NPM_INSTALL_DEV:
18# Set to 1 to also install devDependencies.
19
20inherit python3native
21
22DEPENDS:prepend = "nodejs-native nodejs-oe-cache-native "
23RDEPENDS:${PN}:append:class-target = " nodejs"
24
25EXTRA_OENPM = ""
26
27NPM_INSTALL_DEV ?= "0"
28
29NPM_NODEDIR ?= "${RECIPE_SYSROOT_NATIVE}${prefix_native}"
30
31def npm_target_arch_map(target_arch):
32 """Maps arch names to npm arch names"""
33 import re
34 if re.match("p(pc|owerpc)(|64)", target_arch):
35 return "ppc"
36 elif re.match("i.86$", target_arch):
37 return "ia32"
38 elif re.match("x86_64$", target_arch):
39 return "x64"
40 elif re.match("arm64$", target_arch):
41 return "arm"
42 return target_arch
43
44NPM_ARCH ?= "${@npm_target_arch_map(d.getVar("TARGET_ARCH"))}"
45
46NPM_PACKAGE = "${WORKDIR}/npm-package"
47NPM_CACHE = "${WORKDIR}/npm-cache"
48NPM_BUILD = "${WORKDIR}/npm-build"
49NPM_REGISTRY = "${WORKDIR}/npm-registry"
50
51def npm_global_configs(d):
52 """Get the npm global configuration"""
53 configs = []
54 # Ensure no network access is done
55 configs.append(("offline", "true"))
56 configs.append(("proxy", "http://invalid"))
57 configs.append(("funds", False))
58 configs.append(("audit", False))
59 # Configure the cache directory
60 configs.append(("cache", d.getVar("NPM_CACHE")))
61 return configs
62
63## 'npm pack' runs 'prepare' and 'prepack' scripts. Support for
64## 'ignore-scripts' which prevents this behavior has been removed
65## from nodejs 16. Use simple 'tar' instead of.
66def npm_pack(env, srcdir, workdir):
67 """Emulate 'npm pack' on a specified directory"""
68 import subprocess
69 import os
70 import json
71
72 src = os.path.join(srcdir, 'package.json')
73 with open(src) as f:
74 j = json.load(f)
75
76 # base does not really matter and is for documentation purposes
77 # only. But the 'version' part must exist because other parts of
78 # the bbclass rely on it.
79 base = j['name'].split('/')[-1]
80 tarball = os.path.join(workdir, "%s-%s.tgz" % (base, j['version']));
81
82 # TODO: real 'npm pack' does not include directories while 'tar'
83 # does. But this does not seem to matter...
84 subprocess.run(['tar', 'czf', tarball,
85 '--exclude', './node-modules',
86 '--exclude-vcs',
87 '--transform', 's,^\./,package/,',
88 '--mtime', '1985-10-26T08:15:00.000Z',
89 '.'],
90 check = True, cwd = srcdir)
91
92 return (tarball, j)
93
94python npm_do_configure() {
95 """
96 Step one: configure the npm cache and the main npm package
97
98 Every dependencies have been fetched and patched in the source directory.
99 They have to be packed (this remove unneeded files) and added to the npm
100 cache to be available for the next step.
101
102 The main package and its associated manifest file and shrinkwrap file have
103 to be configured to take into account these cached dependencies.
104 """
105 import base64
106 import copy
107 import json
108 import re
109 import shlex
110 import stat
111 import tempfile
112 from bb.fetch2.npm import NpmEnvironment
113 from bb.fetch2.npm import npm_unpack
114 from bb.fetch2.npmsw import foreach_dependencies
115 from bb.progress import OutOfProgressHandler
116 from oe.npm_registry import NpmRegistry
117
118 bb.utils.remove(d.getVar("NPM_CACHE"), recurse=True)
119 bb.utils.remove(d.getVar("NPM_PACKAGE"), recurse=True)
120
121 env = NpmEnvironment(d, configs=npm_global_configs(d))
122 registry = NpmRegistry(d.getVar('NPM_REGISTRY'), d.getVar('NPM_CACHE'))
123
124 def _npm_cache_add(tarball, pkg):
125 """Add tarball to local registry and register it in the
126 cache"""
127 registry.add_pkg(tarball, pkg)
128
129 def _npm_integrity(tarball):
130 """Return the npm integrity of a specified tarball"""
131 sha512 = bb.utils.sha512_file(tarball)
132 return "sha512-" + base64.b64encode(bytes.fromhex(sha512)).decode()
133
134 def _npmsw_dependency_dict(orig, deptree):
135 """
136 Return the sub dictionary in the 'orig' dictionary corresponding to the
137 'deptree' dependency tree. This function follows the shrinkwrap file
138 format.
139 """
140 ptr = orig
141 for dep in deptree:
142 if "dependencies" not in ptr:
143 ptr["dependencies"] = {}
144 ptr = ptr["dependencies"]
145 if dep not in ptr:
146 ptr[dep] = {}
147 ptr = ptr[dep]
148 return ptr
149
150 # Manage the manifest file and shrinkwrap files
151 orig_manifest_file = d.expand("${S}/package.json")
152 orig_shrinkwrap_file = d.expand("${S}/npm-shrinkwrap.json")
153 cached_manifest_file = d.expand("${NPM_PACKAGE}/package.json")
154 cached_shrinkwrap_file = d.expand("${NPM_PACKAGE}/npm-shrinkwrap.json")
155
156 with open(orig_manifest_file, "r") as f:
157 orig_manifest = json.load(f)
158
159 cached_manifest = copy.deepcopy(orig_manifest)
160 cached_manifest.pop("dependencies", None)
161 cached_manifest.pop("devDependencies", None)
162
163 has_shrinkwrap_file = True
164
165 try:
166 with open(orig_shrinkwrap_file, "r") as f:
167 orig_shrinkwrap = json.load(f)
168 except IOError:
169 has_shrinkwrap_file = False
170
171 if has_shrinkwrap_file:
172 cached_shrinkwrap = copy.deepcopy(orig_shrinkwrap)
173 cached_shrinkwrap.pop("dependencies", None)
174
175 # Manage the dependencies
176 progress = OutOfProgressHandler(d, r"^(\d+)/(\d+)$")
177 progress_total = 1 # also count the main package
178 progress_done = 0
179
180 def _count_dependency(name, params, deptree):
181 nonlocal progress_total
182 progress_total += 1
183
184 def _cache_dependency(name, params, deptree):
185 destsubdirs = [os.path.join("node_modules", dep) for dep in deptree]
186 destsuffix = os.path.join(*destsubdirs)
187 with tempfile.TemporaryDirectory() as tmpdir:
188 # Add the dependency to the npm cache
189 destdir = os.path.join(d.getVar("S"), destsuffix)
190 (tarball, pkg) = npm_pack(env, destdir, tmpdir)
191 _npm_cache_add(tarball, pkg)
192 # Add its signature to the cached shrinkwrap
193 dep = _npmsw_dependency_dict(cached_shrinkwrap, deptree)
194 dep["version"] = pkg['version']
195 dep["integrity"] = _npm_integrity(tarball)
196 if params.get("dev", False):
197 dep["dev"] = True
198 # Display progress
199 nonlocal progress_done
200 progress_done += 1
201 progress.write("%d/%d" % (progress_done, progress_total))
202
203 dev = bb.utils.to_boolean(d.getVar("NPM_INSTALL_DEV"), False)
204
205 if has_shrinkwrap_file:
206 foreach_dependencies(orig_shrinkwrap, _count_dependency, dev)
207 foreach_dependencies(orig_shrinkwrap, _cache_dependency, dev)
208
209 # Configure the main package
210 with tempfile.TemporaryDirectory() as tmpdir:
211 (tarball, _) = npm_pack(env, d.getVar("S"), tmpdir)
212 npm_unpack(tarball, d.getVar("NPM_PACKAGE"), d)
213
214 # Configure the cached manifest file and cached shrinkwrap file
215 def _update_manifest(depkey):
216 for name in orig_manifest.get(depkey, {}):
217 version = cached_shrinkwrap["dependencies"][name]["version"]
218 if depkey not in cached_manifest:
219 cached_manifest[depkey] = {}
220 cached_manifest[depkey][name] = version
221
222 if has_shrinkwrap_file:
223 _update_manifest("dependencies")
224
225 if dev:
226 if has_shrinkwrap_file:
227 _update_manifest("devDependencies")
228
229 os.chmod(cached_manifest_file, os.stat(cached_manifest_file).st_mode | stat.S_IWUSR)
230 with open(cached_manifest_file, "w") as f:
231 json.dump(cached_manifest, f, indent=2)
232
233 if has_shrinkwrap_file:
234 with open(cached_shrinkwrap_file, "w") as f:
235 json.dump(cached_shrinkwrap, f, indent=2)
236}
237
238python npm_do_compile() {
239 """
240 Step two: install the npm package
241
242 Use the configured main package and the cached dependencies to run the
243 installation process. The installation is done in a directory which is
244 not the destination directory yet.
245
246 A combination of 'npm pack' and 'npm install' is used to ensure that the
247 installed files are actual copies instead of symbolic links (which is the
248 default npm behavior).
249 """
250 import shlex
251 import tempfile
252 from bb.fetch2.npm import NpmEnvironment
253
254 bb.utils.remove(d.getVar("NPM_BUILD"), recurse=True)
255
256 with tempfile.TemporaryDirectory() as tmpdir:
257 args = []
258 configs = npm_global_configs(d)
259
260 if bb.utils.to_boolean(d.getVar("NPM_INSTALL_DEV"), False):
261 configs.append(("also", "development"))
262 else:
263 configs.append(("only", "production"))
264
265 # Report as many logs as possible for debugging purpose
266 configs.append(("loglevel", "silly"))
267
268 # Configure the installation to be done globally in the build directory
269 configs.append(("global", "true"))
270 configs.append(("prefix", d.getVar("NPM_BUILD")))
271
272 # Add node-gyp configuration
273 configs.append(("arch", d.getVar("NPM_ARCH")))
274 configs.append(("release", "true"))
275 configs.append(("nodedir", d.getVar("NPM_NODEDIR")))
276 configs.append(("python", d.getVar("PYTHON")))
277
278 env = NpmEnvironment(d, configs)
279
280 # Add node-pre-gyp configuration
281 args.append(("target_arch", d.getVar("NPM_ARCH")))
282 args.append(("build-from-source", "true"))
283
284 # Pack and install the main package
285 (tarball, _) = npm_pack(env, d.getVar("NPM_PACKAGE"), tmpdir)
286 cmd = "npm install %s %s" % (shlex.quote(tarball), d.getVar("EXTRA_OENPM"))
287 env.run(cmd, args=args)
288}
289
290npm_do_install() {
291 # Step three: final install
292 #
293 # The previous installation have to be filtered to remove some extra files.
294
295 rm -rf ${D}
296
297 # Copy the entire lib and bin directories
298 install -d ${D}/${nonarch_libdir}
299 cp --no-preserve=ownership --recursive ${NPM_BUILD}/lib/. ${D}/${nonarch_libdir}
300
301 if [ -d "${NPM_BUILD}/bin" ]
302 then
303 install -d ${D}/${bindir}
304 cp --no-preserve=ownership --recursive ${NPM_BUILD}/bin/. ${D}/${bindir}
305 fi
306
307 # If the package (or its dependencies) uses node-gyp to build native addons,
308 # object files, static libraries or other temporary files can be hidden in
309 # the lib directory. To reduce the package size and to avoid QA issues
310 # (staticdev with static library files) these files must be removed.
311 local GYP_REGEX=".*/build/Release/[^/]*.node"
312
313 # Remove any node-gyp directory in ${D} to remove temporary build files
314 for GYP_D_FILE in $(find ${D} -regex "${GYP_REGEX}")
315 do
316 local GYP_D_DIR=${GYP_D_FILE%/Release/*}
317
318 rm --recursive --force ${GYP_D_DIR}
319 done
320
321 # Copy only the node-gyp release files
322 for GYP_B_FILE in $(find ${NPM_BUILD} -regex "${GYP_REGEX}")
323 do
324 local GYP_D_FILE=${D}/${prefix}/${GYP_B_FILE#${NPM_BUILD}}
325
326 install -d ${GYP_D_FILE%/*}
327 install -m 755 ${GYP_B_FILE} ${GYP_D_FILE}
328 done
329
330 # Remove the shrinkwrap file which does not need to be packed
331 rm -f ${D}/${nonarch_libdir}/node_modules/*/npm-shrinkwrap.json
332 rm -f ${D}/${nonarch_libdir}/node_modules/@*/*/npm-shrinkwrap.json
333}
334
335FILES:${PN} += " \
336 ${bindir} \
337 ${nonarch_libdir} \
338"
339
340EXPORT_FUNCTIONS do_configure do_compile do_install
diff --git a/meta/classes-recipe/packagegroup.bbclass b/meta/classes-recipe/packagegroup.bbclass
new file mode 100644
index 0000000000..6f17fc73b0
--- /dev/null
+++ b/meta/classes-recipe/packagegroup.bbclass
@@ -0,0 +1,67 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# Class for packagegroup (package group) recipes
8
9# By default, only the packagegroup package itself is in PACKAGES.
10# -dbg and -dev flavours are handled by the anonfunc below.
11# This means that packagegroup recipes used to build multiple packagegroup
12# packages have to modify PACKAGES after inheriting packagegroup.bbclass.
13PACKAGES = "${PN}"
14
15# By default, packagegroup packages do not depend on a certain architecture.
16# Only if dependencies are modified by MACHINE_FEATURES, packages
17# need to be set to MACHINE_ARCH before inheriting packagegroup.bbclass
18PACKAGE_ARCH ?= "all"
19
20# Fully expanded - so it applies the overrides as well
21PACKAGE_ARCH_EXPANDED := "${PACKAGE_ARCH}"
22
23LICENSE ?= "MIT"
24
25inherit ${@oe.utils.ifelse(d.getVar('PACKAGE_ARCH_EXPANDED') == 'all', 'allarch', '')}
26
27# This automatically adds -dbg and -dev flavours of all PACKAGES
28# to the list. Their dependencies (RRECOMMENDS) are handled as usual
29# by package_depchains in a following step.
30# Also mark all packages as ALLOW_EMPTY
31python () {
32 packages = d.getVar('PACKAGES').split()
33 if d.getVar('PACKAGEGROUP_DISABLE_COMPLEMENTARY') != '1':
34 types = ['', '-dbg', '-dev']
35 if bb.utils.contains('DISTRO_FEATURES', 'ptest', True, False, d):
36 types.append('-ptest')
37 packages = [pkg + suffix for pkg in packages
38 for suffix in types]
39 d.setVar('PACKAGES', ' '.join(packages))
40 for pkg in packages:
41 d.setVar('ALLOW_EMPTY:%s' % pkg, '1')
42}
43
44# We don't want to look at shared library dependencies for the
45# dbg packages
46DEPCHAIN_DBGDEFAULTDEPS = "1"
47
48# We only need the packaging tasks - disable the rest
49deltask do_fetch
50deltask do_unpack
51deltask do_patch
52deltask do_configure
53deltask do_compile
54deltask do_install
55deltask do_populate_sysroot
56
57INHIBIT_DEFAULT_DEPS = "1"
58
59python () {
60 if bb.data.inherits_class('nativesdk', d):
61 return
62 initman = d.getVar("VIRTUAL-RUNTIME_init_manager")
63 if initman and initman in ['sysvinit', 'systemd'] and not bb.utils.contains('DISTRO_FEATURES', initman, True, False, d):
64 bb.fatal("Please ensure that your setting of VIRTUAL-RUNTIME_init_manager (%s) matches the entries enabled in DISTRO_FEATURES" % initman)
65}
66
67CVE_PRODUCT = ""
diff --git a/meta/classes-recipe/perl-version.bbclass b/meta/classes-recipe/perl-version.bbclass
new file mode 100644
index 0000000000..269ac9eb31
--- /dev/null
+++ b/meta/classes-recipe/perl-version.bbclass
@@ -0,0 +1,72 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7PERL_OWN_DIR = ""
8
9# Determine the staged version of perl from the perl configuration file
10# Assign vardepvalue, because otherwise signature is changed before and after
11# perl is built (from None to real version in config.sh).
12get_perl_version[vardepvalue] = "${PERL_OWN_DIR}"
13def get_perl_version(d):
14 import re
15 cfg = d.expand('${STAGING_LIBDIR}${PERL_OWN_DIR}/perl5/config.sh')
16 try:
17 f = open(cfg, 'r')
18 except IOError:
19 return None
20 l = f.readlines();
21 f.close();
22 r = re.compile(r"^version='(\d*\.\d*\.\d*)'")
23 for s in l:
24 m = r.match(s)
25 if m:
26 return m.group(1)
27 return None
28
29PERLVERSION := "${@get_perl_version(d)}"
30PERLVERSION[vardepvalue] = ""
31
32
33# Determine the staged arch of perl from the perl configuration file
34# Assign vardepvalue, because otherwise signature is changed before and after
35# perl is built (from None to real version in config.sh).
36def get_perl_arch(d):
37 import re
38 cfg = d.expand('${STAGING_LIBDIR}${PERL_OWN_DIR}/perl5/config.sh')
39 try:
40 f = open(cfg, 'r')
41 except IOError:
42 return None
43 l = f.readlines();
44 f.close();
45 r = re.compile("^archname='([^']*)'")
46 for s in l:
47 m = r.match(s)
48 if m:
49 return m.group(1)
50 return None
51
52PERLARCH := "${@get_perl_arch(d)}"
53PERLARCH[vardepvalue] = ""
54
55# Determine the staged arch of perl-native from the perl configuration file
56# Assign vardepvalue, because otherwise signature is changed before and after
57# perl is built (from None to real version in config.sh).
58def get_perl_hostarch(d):
59 import re
60 cfg = d.expand('${STAGING_LIBDIR_NATIVE}/perl5/config.sh')
61 try:
62 f = open(cfg, 'r')
63 except IOError:
64 return None
65 l = f.readlines();
66 f.close();
67 r = re.compile("^archname='([^']*)'")
68 for s in l:
69 m = r.match(s)
70 if m:
71 return m.group(1)
72 return None
diff --git a/meta/classes-recipe/perlnative.bbclass b/meta/classes-recipe/perlnative.bbclass
new file mode 100644
index 0000000000..d56ec4ae72
--- /dev/null
+++ b/meta/classes-recipe/perlnative.bbclass
@@ -0,0 +1,9 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7EXTRANATIVEPATH += "perl-native"
8DEPENDS += "perl-native"
9OECMAKE_PERLNATIVE_DIR = "${STAGING_BINDIR_NATIVE}/perl-native"
diff --git a/meta/classes-recipe/pixbufcache.bbclass b/meta/classes-recipe/pixbufcache.bbclass
new file mode 100644
index 0000000000..107e38885e
--- /dev/null
+++ b/meta/classes-recipe/pixbufcache.bbclass
@@ -0,0 +1,69 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7#
8# This class will generate the proper postinst/postrm scriptlets for pixbuf
9# packages.
10#
11
12DEPENDS:append:class-target = " qemu-native"
13inherit qemu
14
15PIXBUF_PACKAGES ??= "${PN}"
16
17PACKAGE_WRITE_DEPS += "qemu-native gdk-pixbuf-native"
18
19pixbufcache_common() {
20if [ "x$D" != "x" ]; then
21 $INTERCEPT_DIR/postinst_intercept update_pixbuf_cache ${PKG} mlprefix=${MLPREFIX} binprefix=${MLPREFIX} libdir=${libdir} \
22 bindir=${bindir} base_libdir=${base_libdir}
23else
24
25 # Update the pixbuf loaders in case they haven't been registered yet
26 ${libdir}/gdk-pixbuf-2.0/gdk-pixbuf-query-loaders --update-cache
27
28 if [ -x ${bindir}/gtk-update-icon-cache ] && [ -d ${datadir}/icons ]; then
29 for icondir in /usr/share/icons/*; do
30 if [ -d ${icondir} ]; then
31 gtk-update-icon-cache -t -q ${icondir}
32 fi
33 done
34 fi
35fi
36}
37
38python populate_packages:append() {
39 pixbuf_pkgs = d.getVar('PIXBUF_PACKAGES').split()
40
41 for pkg in pixbuf_pkgs:
42 bb.note("adding pixbuf postinst and postrm scripts to %s" % pkg)
43 postinst = d.getVar('pkg_postinst:%s' % pkg) or d.getVar('pkg_postinst')
44 if not postinst:
45 postinst = '#!/bin/sh\n'
46 postinst += d.getVar('pixbufcache_common')
47 d.setVar('pkg_postinst:%s' % pkg, postinst)
48
49 postrm = d.getVar('pkg_postrm:%s' % pkg) or d.getVar('pkg_postrm')
50 if not postrm:
51 postrm = '#!/bin/sh\n'
52 postrm += d.getVar('pixbufcache_common')
53 d.setVar('pkg_postrm:%s' % pkg, postrm)
54}
55
56gdkpixbuf_complete() {
57GDK_PIXBUF_FATAL_LOADER=1 ${STAGING_LIBDIR_NATIVE}/gdk-pixbuf-2.0/gdk-pixbuf-query-loaders --update-cache || exit 1
58}
59
60DEPENDS:append:class-native = " gdk-pixbuf-native"
61SYSROOT_PREPROCESS_FUNCS:append:class-native = " pixbufcache_sstate_postinst"
62
63pixbufcache_sstate_postinst() {
64 mkdir -p ${SYSROOT_DESTDIR}${bindir}
65 dest=${SYSROOT_DESTDIR}${bindir}/postinst-${PN}
66 echo '#!/bin/sh' > $dest
67 echo "${gdkpixbuf_complete}" >> $dest
68 chmod 0755 $dest
69}
diff --git a/meta/classes-recipe/pkgconfig.bbclass b/meta/classes-recipe/pkgconfig.bbclass
new file mode 100644
index 0000000000..1e1f3824dd
--- /dev/null
+++ b/meta/classes-recipe/pkgconfig.bbclass
@@ -0,0 +1,8 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7DEPENDS:prepend = "pkgconfig-native "
8
diff --git a/meta/classes-recipe/populate_sdk.bbclass b/meta/classes-recipe/populate_sdk.bbclass
new file mode 100644
index 0000000000..caeef5d2b2
--- /dev/null
+++ b/meta/classes-recipe/populate_sdk.bbclass
@@ -0,0 +1,13 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# The majority of populate_sdk is located in populate_sdk_base
8# This chunk simply facilitates compatibility with SDK only recipes.
9
10inherit populate_sdk_base
11
12addtask populate_sdk after do_install before do_build
13
diff --git a/meta/classes-recipe/populate_sdk_base.bbclass b/meta/classes-recipe/populate_sdk_base.bbclass
new file mode 100644
index 0000000000..0be108ad98
--- /dev/null
+++ b/meta/classes-recipe/populate_sdk_base.bbclass
@@ -0,0 +1,384 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7PACKAGES = ""
8
9inherit image-postinst-intercepts image-artifact-names
10
11# Wildcards specifying complementary packages to install for every package that has been explicitly
12# installed into the rootfs
13COMPLEMENTARY_GLOB[dev-pkgs] = '*-dev'
14COMPLEMENTARY_GLOB[staticdev-pkgs] = '*-staticdev'
15COMPLEMENTARY_GLOB[doc-pkgs] = '*-doc'
16COMPLEMENTARY_GLOB[dbg-pkgs] = '*-dbg'
17COMPLEMENTARY_GLOB[src-pkgs] = '*-src'
18COMPLEMENTARY_GLOB[ptest-pkgs] = '*-ptest'
19COMPLEMENTARY_GLOB[bash-completion-pkgs] = '*-bash-completion'
20
21def complementary_globs(featurevar, d):
22 all_globs = d.getVarFlags('COMPLEMENTARY_GLOB')
23 globs = []
24 features = set((d.getVar(featurevar) or '').split())
25 for name, glob in all_globs.items():
26 if name in features:
27 globs.append(glob)
28 return ' '.join(globs)
29
30SDKIMAGE_FEATURES ??= "dev-pkgs dbg-pkgs src-pkgs ${@bb.utils.contains('DISTRO_FEATURES', 'api-documentation', 'doc-pkgs', '', d)}"
31SDKIMAGE_INSTALL_COMPLEMENTARY = '${@complementary_globs("SDKIMAGE_FEATURES", d)}'
32SDKIMAGE_INSTALL_COMPLEMENTARY[vardeps] += "SDKIMAGE_FEATURES"
33
34PACKAGE_ARCHS:append:task-populate-sdk = " sdk-provides-dummy-target"
35SDK_PACKAGE_ARCHS += "sdk-provides-dummy-${SDKPKGSUFFIX}"
36
37# List of locales to install, or "all" for all of them, or unset for none.
38SDKIMAGE_LINGUAS ?= "all"
39
40inherit rootfs_${IMAGE_PKGTYPE}
41
42SDK_DIR = "${WORKDIR}/sdk"
43SDK_OUTPUT = "${SDK_DIR}/image"
44SDK_DEPLOY = "${DEPLOY_DIR}/sdk"
45
46SDKDEPLOYDIR = "${WORKDIR}/${SDKMACHINE}-deploy-${PN}-populate-sdk"
47
48B:task-populate-sdk = "${SDK_DIR}"
49
50SDKTARGETSYSROOT = "${SDKPATH}/sysroots/${REAL_MULTIMACH_TARGET_SYS}"
51
52SDK_TOOLCHAIN_LANGS ??= ""
53SDK_TOOLCHAIN_LANGS:remove:sdkmingw32 = "rust"
54# libstd-rs doesn't build for mips n32 with compiler constraint errors
55SDK_TOOLCHAIN_LANGS:remove:mipsarchn32 = "rust"
56
57TOOLCHAIN_HOST_TASK ?= " \
58 nativesdk-packagegroup-sdk-host \
59 packagegroup-cross-canadian-${MACHINE} \
60 ${@bb.utils.contains('SDK_TOOLCHAIN_LANGS', 'go', 'packagegroup-go-cross-canadian-${MACHINE}', '', d)} \
61 ${@bb.utils.contains('SDK_TOOLCHAIN_LANGS', 'rust', 'packagegroup-rust-cross-canadian-${MACHINE}', '', d)} \
62"
63TOOLCHAIN_HOST_TASK_ATTEMPTONLY ?= ""
64TOOLCHAIN_TARGET_TASK ?= " \
65 ${@multilib_pkg_extend(d, 'packagegroup-core-standalone-sdk-target')} \
66 ${@bb.utils.contains('SDK_TOOLCHAIN_LANGS', 'go', multilib_pkg_extend(d, 'packagegroup-go-sdk-target'), '', d)} \
67 ${@bb.utils.contains('SDK_TOOLCHAIN_LANGS', 'rust', multilib_pkg_extend(d, 'libstd-rs'), '', d)} \
68 target-sdk-provides-dummy \
69"
70TOOLCHAIN_TARGET_TASK_ATTEMPTONLY ?= ""
71TOOLCHAIN_OUTPUTNAME ?= "${SDK_NAME}-toolchain-${SDK_VERSION}"
72
73# Default archived SDK's suffix
74SDK_ARCHIVE_TYPE ?= "tar.xz"
75SDK_XZ_COMPRESSION_LEVEL ?= "-9"
76SDK_XZ_OPTIONS ?= "${XZ_DEFAULTS} ${SDK_XZ_COMPRESSION_LEVEL}"
77
78# To support different sdk type according to SDK_ARCHIVE_TYPE, now support zip and tar.xz
79python () {
80 if d.getVar('SDK_ARCHIVE_TYPE') == 'zip':
81 d.setVar('SDK_ARCHIVE_DEPENDS', 'zip-native')
82 # SDK_ARCHIVE_CMD used to generate archived sdk ${TOOLCHAIN_OUTPUTNAME}.${SDK_ARCHIVE_TYPE} from input dir ${SDK_OUTPUT}/${SDKPATH} to output dir ${SDKDEPLOYDIR}
83 # recommand to cd into input dir first to avoid archive with buildpath
84 d.setVar('SDK_ARCHIVE_CMD', 'cd ${SDK_OUTPUT}/${SDKPATH}; zip -r -y ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.${SDK_ARCHIVE_TYPE} .')
85 else:
86 d.setVar('SDK_ARCHIVE_DEPENDS', 'xz-native')
87 d.setVar('SDK_ARCHIVE_CMD', 'cd ${SDK_OUTPUT}/${SDKPATH}; tar ${SDKTAROPTS} -cf - . | xz ${SDK_XZ_OPTIONS} > ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.${SDK_ARCHIVE_TYPE}')
88}
89
90SDK_RDEPENDS = "${TOOLCHAIN_TARGET_TASK} ${TOOLCHAIN_HOST_TASK}"
91SDK_DEPENDS = "virtual/fakeroot-native ${SDK_ARCHIVE_DEPENDS} cross-localedef-native nativesdk-qemuwrapper-cross ${@' '.join(["%s-qemuwrapper-cross" % m for m in d.getVar("MULTILIB_VARIANTS").split()])} qemuwrapper-cross"
92PATH:prepend = "${WORKDIR}/recipe-sysroot/${SDKPATHNATIVE}${bindir}/crossscripts:${@":".join(all_multilib_tune_values(d, 'STAGING_BINDIR_CROSS').split())}:"
93SDK_DEPENDS += "nativesdk-glibc-locale"
94
95# We want the MULTIARCH_TARGET_SYS to point to the TUNE_PKGARCH, not PACKAGE_ARCH as it
96# could be set to the MACHINE_ARCH
97REAL_MULTIMACH_TARGET_SYS = "${TUNE_PKGARCH}${TARGET_VENDOR}-${TARGET_OS}"
98
99PID = "${@os.getpid()}"
100
101EXCLUDE_FROM_WORLD = "1"
102
103SDK_PACKAGING_FUNC ?= "create_shar"
104SDK_PRE_INSTALL_COMMAND ?= ""
105SDK_POST_INSTALL_COMMAND ?= ""
106SDK_RELOCATE_AFTER_INSTALL ?= "1"
107
108SDKEXTPATH ??= "~/${@d.getVar('DISTRO')}_sdk"
109SDK_TITLE ??= "${@d.getVar('DISTRO_NAME') or d.getVar('DISTRO')} SDK"
110
111SDK_TARGET_MANIFEST = "${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.target.manifest"
112SDK_HOST_MANIFEST = "${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.host.manifest"
113SDK_EXT_TARGET_MANIFEST = "${SDK_DEPLOY}/${TOOLCHAINEXT_OUTPUTNAME}.target.manifest"
114SDK_EXT_HOST_MANIFEST = "${SDK_DEPLOY}/${TOOLCHAINEXT_OUTPUTNAME}.host.manifest"
115
116SDK_PRUNE_SYSROOT_DIRS ?= "/dev"
117
118python write_target_sdk_manifest () {
119 from oe.sdk import sdk_list_installed_packages
120 from oe.utils import format_pkg_list
121 sdkmanifestdir = os.path.dirname(d.getVar("SDK_TARGET_MANIFEST"))
122 pkgs = sdk_list_installed_packages(d, True)
123 if not os.path.exists(sdkmanifestdir):
124 bb.utils.mkdirhier(sdkmanifestdir)
125 with open(d.getVar('SDK_TARGET_MANIFEST'), 'w') as output:
126 output.write(format_pkg_list(pkgs, 'ver'))
127}
128
129sdk_prune_dirs () {
130 for d in ${SDK_PRUNE_SYSROOT_DIRS}; do
131 rm -rf ${SDK_OUTPUT}${SDKTARGETSYSROOT}$d
132 done
133}
134
135python write_sdk_test_data() {
136 from oe.data import export2json
137 testdata = "%s/%s.testdata.json" % (d.getVar('SDKDEPLOYDIR'), d.getVar('TOOLCHAIN_OUTPUTNAME'))
138 bb.utils.mkdirhier(os.path.dirname(testdata))
139 export2json(d, testdata)
140}
141
142python write_host_sdk_manifest () {
143 from oe.sdk import sdk_list_installed_packages
144 from oe.utils import format_pkg_list
145 sdkmanifestdir = os.path.dirname(d.getVar("SDK_HOST_MANIFEST"))
146 pkgs = sdk_list_installed_packages(d, False)
147 if not os.path.exists(sdkmanifestdir):
148 bb.utils.mkdirhier(sdkmanifestdir)
149 with open(d.getVar('SDK_HOST_MANIFEST'), 'w') as output:
150 output.write(format_pkg_list(pkgs, 'ver'))
151}
152
153POPULATE_SDK_POST_TARGET_COMMAND:append = " write_sdk_test_data ; "
154POPULATE_SDK_POST_TARGET_COMMAND:append:task-populate-sdk = " write_target_sdk_manifest; sdk_prune_dirs; "
155POPULATE_SDK_POST_HOST_COMMAND:append:task-populate-sdk = " write_host_sdk_manifest; "
156
157SDK_PACKAGING_COMMAND = "${@'${SDK_PACKAGING_FUNC};' if '${SDK_PACKAGING_FUNC}' else ''}"
158SDK_POSTPROCESS_COMMAND = " create_sdk_files; check_sdk_sysroots; archive_sdk; ${SDK_PACKAGING_COMMAND} "
159
160def populate_sdk_common(d):
161 from oe.sdk import populate_sdk
162 from oe.manifest import create_manifest, Manifest
163
164 # Handle package exclusions
165 excl_pkgs = (d.getVar("PACKAGE_EXCLUDE") or "").split()
166 inst_pkgs = (d.getVar("PACKAGE_INSTALL") or "").split()
167 inst_attempt_pkgs = (d.getVar("PACKAGE_INSTALL_ATTEMPTONLY") or "").split()
168
169 d.setVar('PACKAGE_INSTALL_ORIG', ' '.join(inst_pkgs))
170 d.setVar('PACKAGE_INSTALL_ATTEMPTONLY', ' '.join(inst_attempt_pkgs))
171
172 for pkg in excl_pkgs:
173 if pkg in inst_pkgs:
174 bb.warn("Package %s, set to be excluded, is in %s PACKAGE_INSTALL (%s). It will be removed from the list." % (pkg, d.getVar('PN'), inst_pkgs))
175 inst_pkgs.remove(pkg)
176
177 if pkg in inst_attempt_pkgs:
178 bb.warn("Package %s, set to be excluded, is in %s PACKAGE_INSTALL_ATTEMPTONLY (%s). It will be removed from the list." % (pkg, d.getVar('PN'), inst_pkgs))
179 inst_attempt_pkgs.remove(pkg)
180
181 d.setVar("PACKAGE_INSTALL", ' '.join(inst_pkgs))
182 d.setVar("PACKAGE_INSTALL_ATTEMPTONLY", ' '.join(inst_attempt_pkgs))
183
184 pn = d.getVar('PN')
185 runtime_mapping_rename("TOOLCHAIN_TARGET_TASK", pn, d)
186 runtime_mapping_rename("TOOLCHAIN_TARGET_TASK_ATTEMPTONLY", pn, d)
187
188 ld = bb.data.createCopy(d)
189 ld.setVar("PKGDATA_DIR", "${STAGING_DIR}/${SDK_ARCH}-${SDKPKGSUFFIX}${SDK_VENDOR}-${SDK_OS}/pkgdata")
190 runtime_mapping_rename("TOOLCHAIN_HOST_TASK", pn, ld)
191 runtime_mapping_rename("TOOLCHAIN_HOST_TASK_ATTEMPTONLY", pn, ld)
192 d.setVar("TOOLCHAIN_HOST_TASK", ld.getVar("TOOLCHAIN_HOST_TASK"))
193 d.setVar("TOOLCHAIN_HOST_TASK_ATTEMPTONLY", ld.getVar("TOOLCHAIN_HOST_TASK_ATTEMPTONLY"))
194
195 # create target/host SDK manifests
196 create_manifest(d, manifest_dir=d.getVar('SDK_DIR'),
197 manifest_type=Manifest.MANIFEST_TYPE_SDK_HOST)
198 create_manifest(d, manifest_dir=d.getVar('SDK_DIR'),
199 manifest_type=Manifest.MANIFEST_TYPE_SDK_TARGET)
200
201 populate_sdk(d)
202
203fakeroot python do_populate_sdk() {
204 populate_sdk_common(d)
205}
206SSTATETASKS += "do_populate_sdk"
207SSTATE_SKIP_CREATION:task-populate-sdk = '1'
208do_populate_sdk[cleandirs] = "${SDKDEPLOYDIR}"
209do_populate_sdk[sstate-inputdirs] = "${SDKDEPLOYDIR}"
210do_populate_sdk[sstate-outputdirs] = "${SDK_DEPLOY}"
211do_populate_sdk[stamp-extra-info] = "${MACHINE_ARCH}${SDKMACHINE}"
212python do_populate_sdk_setscene () {
213 sstate_setscene(d)
214}
215addtask do_populate_sdk_setscene
216
217PSEUDO_IGNORE_PATHS .= ",${SDKDEPLOYDIR},${WORKDIR}/oe-sdk-repo,${WORKDIR}/sstate-build-populate_sdk"
218
219fakeroot create_sdk_files() {
220 cp ${COREBASE}/scripts/relocate_sdk.py ${SDK_OUTPUT}/${SDKPATH}/
221
222 # Replace the ##DEFAULT_INSTALL_DIR## with the correct pattern.
223 # Escape special characters like '+' and '.' in the SDKPATH
224 escaped_sdkpath=$(echo ${SDKPATH} |sed -e "s:[\+\.]:\\\\\\\\\0:g")
225 sed -i -e "s:##DEFAULT_INSTALL_DIR##:$escaped_sdkpath:" ${SDK_OUTPUT}/${SDKPATH}/relocate_sdk.py
226
227 mkdir -p ${SDK_OUTPUT}/${SDKPATHNATIVE}${sysconfdir}/
228 echo '${SDKPATHNATIVE}${libdir_nativesdk}
229${SDKPATHNATIVE}${base_libdir_nativesdk}
230include /etc/ld.so.conf' > ${SDK_OUTPUT}/${SDKPATHNATIVE}${sysconfdir}/ld.so.conf
231}
232
233python check_sdk_sysroots() {
234 # Fails build if there are broken or dangling symlinks in SDK sysroots
235
236 if d.getVar('CHECK_SDK_SYSROOTS') != '1':
237 # disabled, bail out
238 return
239
240 def norm_path(path):
241 return os.path.abspath(path)
242
243 # Get scan root
244 SCAN_ROOT = norm_path("%s/%s/sysroots/" % (d.getVar('SDK_OUTPUT'),
245 d.getVar('SDKPATH')))
246
247 bb.note('Checking SDK sysroots at ' + SCAN_ROOT)
248
249 def check_symlink(linkPath):
250 if not os.path.islink(linkPath):
251 return
252
253 linkDirPath = os.path.dirname(linkPath)
254
255 targetPath = os.readlink(linkPath)
256 if not os.path.isabs(targetPath):
257 targetPath = os.path.join(linkDirPath, targetPath)
258 targetPath = norm_path(targetPath)
259
260 if SCAN_ROOT != os.path.commonprefix( [SCAN_ROOT, targetPath] ):
261 bb.error("Escaping symlink {0!s} --> {1!s}".format(linkPath, targetPath))
262 return
263
264 if not os.path.exists(targetPath):
265 bb.error("Broken symlink {0!s} --> {1!s}".format(linkPath, targetPath))
266 return
267
268 if os.path.isdir(targetPath):
269 dir_walk(targetPath)
270
271 def walk_error_handler(e):
272 bb.error(str(e))
273
274 def dir_walk(rootDir):
275 for dirPath,subDirEntries,fileEntries in os.walk(rootDir, followlinks=False, onerror=walk_error_handler):
276 entries = subDirEntries + fileEntries
277 for e in entries:
278 ePath = os.path.join(dirPath, e)
279 check_symlink(ePath)
280
281 # start
282 dir_walk(SCAN_ROOT)
283}
284
285SDKTAROPTS = "--owner=root --group=root"
286
287fakeroot archive_sdk() {
288 # Package it up
289 mkdir -p ${SDKDEPLOYDIR}
290 ${SDK_ARCHIVE_CMD}
291}
292
293TOOLCHAIN_SHAR_EXT_TMPL ?= "${COREBASE}/meta/files/toolchain-shar-extract.sh"
294TOOLCHAIN_SHAR_REL_TMPL ?= "${COREBASE}/meta/files/toolchain-shar-relocate.sh"
295
296fakeroot create_shar() {
297 # copy in the template shar extractor script
298 cp ${TOOLCHAIN_SHAR_EXT_TMPL} ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.sh
299
300 rm -f ${T}/pre_install_command ${T}/post_install_command
301
302 if [ "${SDK_RELOCATE_AFTER_INSTALL}" = "1" ] ; then
303 cp ${TOOLCHAIN_SHAR_REL_TMPL} ${T}/post_install_command
304 fi
305 cat << "EOF" >> ${T}/pre_install_command
306${SDK_PRE_INSTALL_COMMAND}
307EOF
308
309 cat << "EOF" >> ${T}/post_install_command
310${SDK_POST_INSTALL_COMMAND}
311EOF
312 sed -i -e '/@SDK_PRE_INSTALL_COMMAND@/r ${T}/pre_install_command' \
313 -e '/@SDK_POST_INSTALL_COMMAND@/r ${T}/post_install_command' \
314 ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.sh
315
316 # substitute variables
317 sed -i -e 's#@SDK_ARCH@#${SDK_ARCH}#g' \
318 -e 's#@SDKPATH@#${SDKPATH}#g' \
319 -e 's#@SDKPATHINSTALL@#${SDKPATHINSTALL}#g' \
320 -e 's#@SDKEXTPATH@#${SDKEXTPATH}#g' \
321 -e 's#@OLDEST_KERNEL@#${SDK_OLDEST_KERNEL}#g' \
322 -e 's#@REAL_MULTIMACH_TARGET_SYS@#${REAL_MULTIMACH_TARGET_SYS}#g' \
323 -e 's#@SDK_TITLE@#${@d.getVar("SDK_TITLE").replace('&', '\\&')}#g' \
324 -e 's#@SDK_VERSION@#${SDK_VERSION}#g' \
325 -e '/@SDK_PRE_INSTALL_COMMAND@/d' \
326 -e '/@SDK_POST_INSTALL_COMMAND@/d' \
327 -e 's#@SDK_GCC_VER@#${@oe.utils.host_gcc_version(d, taskcontextonly=True)}#g' \
328 -e 's#@SDK_ARCHIVE_TYPE@#${SDK_ARCHIVE_TYPE}#g' \
329 ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.sh
330
331 # add execution permission
332 chmod +x ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.sh
333
334 # append the SDK tarball
335 cat ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.${SDK_ARCHIVE_TYPE} >> ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.sh
336
337 # delete the old tarball, we don't need it anymore
338 rm ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.${SDK_ARCHIVE_TYPE}
339}
340
341populate_sdk_log_check() {
342 for target in $*
343 do
344 lf_path="`dirname ${BB_LOGFILE}`/log.do_$target.${PID}"
345
346 echo "log_check: Using $lf_path as logfile"
347
348 if [ -e "$lf_path" ]; then
349 ${IMAGE_PKGTYPE}_log_check $target $lf_path
350 else
351 echo "Cannot find logfile [$lf_path]"
352 fi
353 echo "Logfile is clean"
354 done
355}
356
357def sdk_command_variables(d):
358 return ['OPKG_PREPROCESS_COMMANDS','OPKG_POSTPROCESS_COMMANDS','POPULATE_SDK_POST_HOST_COMMAND','POPULATE_SDK_PRE_TARGET_COMMAND','POPULATE_SDK_POST_TARGET_COMMAND','SDK_POSTPROCESS_COMMAND','RPM_PREPROCESS_COMMANDS','RPM_POSTPROCESS_COMMANDS']
359
360def sdk_variables(d):
361 variables = ['BUILD_IMAGES_FROM_FEEDS','SDK_OS','SDK_OUTPUT','SDKPATHNATIVE','SDKTARGETSYSROOT','SDK_DIR','SDK_VENDOR','SDKIMAGE_INSTALL_COMPLEMENTARY','SDK_PACKAGE_ARCHS','SDK_OUTPUT',
362 'SDKTARGETSYSROOT','MULTILIB_VARIANTS','MULTILIBS','ALL_MULTILIB_PACKAGE_ARCHS','MULTILIB_GLOBAL_VARIANTS','BAD_RECOMMENDATIONS','NO_RECOMMENDATIONS','PACKAGE_ARCHS',
363 'PACKAGE_CLASSES','TARGET_VENDOR','TARGET_VENDOR','TARGET_ARCH','TARGET_OS','BBEXTENDVARIANT','FEED_DEPLOYDIR_BASE_URI', 'PACKAGE_EXCLUDE_COMPLEMENTARY', 'IMAGE_INSTALL_DEBUGFS']
364 variables.extend(sdk_command_variables(d))
365 return " ".join(variables)
366
367do_populate_sdk[vardeps] += "${@sdk_variables(d)}"
368
369python () {
370 variables = sdk_command_variables(d)
371 for var in variables:
372 if d.getVar(var, False):
373 d.setVarFlag(var, 'func', '1')
374}
375
376do_populate_sdk[file-checksums] += "${TOOLCHAIN_SHAR_REL_TMPL}:True \
377 ${TOOLCHAIN_SHAR_EXT_TMPL}:True"
378
379do_populate_sdk[dirs] = "${PKGDATA_DIR} ${TOPDIR}"
380do_populate_sdk[depends] += "${@' '.join([x + ':do_populate_sysroot' for x in d.getVar('SDK_DEPENDS').split()])} ${@d.getVarFlag('do_rootfs', 'depends', False)}"
381do_populate_sdk[rdepends] = "${@' '.join([x + ':do_package_write_${IMAGE_PKGTYPE} ' + x + ':do_packagedata' for x in d.getVar('SDK_RDEPENDS').split()])}"
382do_populate_sdk[recrdeptask] += "do_packagedata do_package_write_rpm do_package_write_ipk do_package_write_deb"
383do_populate_sdk[file-checksums] += "${POSTINST_INTERCEPT_CHECKSUMS}"
384addtask populate_sdk
diff --git a/meta/classes-recipe/populate_sdk_ext.bbclass b/meta/classes-recipe/populate_sdk_ext.bbclass
new file mode 100644
index 0000000000..56e24c4eed
--- /dev/null
+++ b/meta/classes-recipe/populate_sdk_ext.bbclass
@@ -0,0 +1,842 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# Extensible SDK
8
9inherit populate_sdk_base
10
11# Used to override TOOLCHAIN_HOST_TASK in the eSDK case
12TOOLCHAIN_HOST_TASK_ESDK = " \
13 meta-environment-extsdk-${MACHINE} \
14 "
15
16SDK_RELOCATE_AFTER_INSTALL:task-populate-sdk-ext = "0"
17
18SDK_EXT = ""
19SDK_EXT:task-populate-sdk-ext = "-ext"
20
21# Options are full or minimal
22SDK_EXT_TYPE ?= "full"
23SDK_INCLUDE_PKGDATA ?= "0"
24SDK_INCLUDE_TOOLCHAIN ?= "${@'1' if d.getVar('SDK_EXT_TYPE') == 'full' else '0'}"
25SDK_INCLUDE_NATIVESDK ?= "0"
26SDK_INCLUDE_BUILDTOOLS ?= '1'
27
28SDK_RECRDEP_TASKS ?= ""
29SDK_CUSTOM_TEMPLATECONF ?= "0"
30
31ESDK_LOCALCONF_ALLOW ?= ""
32ESDK_LOCALCONF_REMOVE ?= "CONF_VERSION \
33 BB_NUMBER_THREADS \
34 BB_NUMBER_PARSE_THREADS \
35 PARALLEL_MAKE \
36 PRSERV_HOST \
37 SSTATE_MIRRORS \
38 DL_DIR \
39 SSTATE_DIR \
40 TMPDIR \
41 BB_SERVER_TIMEOUT \
42 "
43ESDK_CLASS_INHERIT_DISABLE ?= "buildhistory icecc"
44SDK_UPDATE_URL ?= ""
45
46SDK_TARGETS ?= "${PN}"
47
48def get_sdk_install_targets(d, images_only=False):
49 sdk_install_targets = ''
50 if images_only or d.getVar('SDK_EXT_TYPE') != 'minimal':
51 sdk_install_targets = d.getVar('SDK_TARGETS')
52
53 depd = d.getVar('BB_TASKDEPDATA', False)
54 tasklist = bb.build.tasksbetween('do_image_complete', 'do_build', d)
55 tasklist.remove('do_build')
56 for v in depd.values():
57 if v[1] in tasklist:
58 if v[0] not in sdk_install_targets:
59 sdk_install_targets += ' {}'.format(v[0])
60
61 if not images_only:
62 if d.getVar('SDK_INCLUDE_PKGDATA') == '1':
63 sdk_install_targets += ' meta-world-pkgdata:do_allpackagedata'
64 if d.getVar('SDK_INCLUDE_TOOLCHAIN') == '1':
65 sdk_install_targets += ' meta-extsdk-toolchain:do_populate_sysroot'
66
67 return sdk_install_targets
68
69get_sdk_install_targets[vardepsexclude] = "BB_TASKDEPDATA"
70
71OE_INIT_ENV_SCRIPT ?= "oe-init-build-env"
72
73# The files from COREBASE that you want preserved in the COREBASE copied
74# into the sdk. This allows someone to have their own setup scripts in
75# COREBASE be preserved as well as untracked files.
76COREBASE_FILES ?= " \
77 oe-init-build-env \
78 scripts \
79 LICENSE \
80 .templateconf \
81"
82
83SDK_DIR:task-populate-sdk-ext = "${WORKDIR}/sdk-ext"
84B:task-populate-sdk-ext = "${SDK_DIR}"
85TOOLCHAINEXT_OUTPUTNAME ?= "${SDK_NAME}-toolchain-ext-${SDK_VERSION}"
86TOOLCHAIN_OUTPUTNAME:task-populate-sdk-ext = "${TOOLCHAINEXT_OUTPUTNAME}"
87
88SDK_EXT_TARGET_MANIFEST = "${SDK_DEPLOY}/${TOOLCHAINEXT_OUTPUTNAME}.target.manifest"
89SDK_EXT_HOST_MANIFEST = "${SDK_DEPLOY}/${TOOLCHAINEXT_OUTPUTNAME}.host.manifest"
90
91python write_target_sdk_ext_manifest () {
92 from oe.sdk import get_extra_sdkinfo
93 sstate_dir = d.expand('${SDK_OUTPUT}/${SDKPATH}/sstate-cache')
94 extra_info = get_extra_sdkinfo(sstate_dir)
95
96 target = d.getVar('TARGET_SYS')
97 target_multimach = d.getVar('MULTIMACH_TARGET_SYS')
98 real_target_multimach = d.getVar('REAL_MULTIMACH_TARGET_SYS')
99
100 pkgs = {}
101 os.makedirs(os.path.dirname(d.getVar('SDK_EXT_TARGET_MANIFEST')), exist_ok=True)
102 with open(d.getVar('SDK_EXT_TARGET_MANIFEST'), 'w') as f:
103 for fn in extra_info['filesizes']:
104 info = fn.split(':')
105 if info[2] in (target, target_multimach, real_target_multimach) \
106 or info[5] == 'allarch':
107 if not info[1] in pkgs:
108 f.write("%s %s %s\n" % (info[1], info[2], info[3]))
109 pkgs[info[1]] = {}
110}
111python write_host_sdk_ext_manifest () {
112 from oe.sdk import get_extra_sdkinfo
113 sstate_dir = d.expand('${SDK_OUTPUT}/${SDKPATH}/sstate-cache')
114 extra_info = get_extra_sdkinfo(sstate_dir)
115 host = d.getVar('BUILD_SYS')
116 with open(d.getVar('SDK_EXT_HOST_MANIFEST'), 'w') as f:
117 for fn in extra_info['filesizes']:
118 info = fn.split(':')
119 if info[2] == host:
120 f.write("%s %s %s\n" % (info[1], info[2], info[3]))
121}
122
123SDK_POSTPROCESS_COMMAND:append:task-populate-sdk-ext = "write_target_sdk_ext_manifest; write_host_sdk_ext_manifest; "
124
125SDK_TITLE:task-populate-sdk-ext = "${@d.getVar('DISTRO_NAME') or d.getVar('DISTRO')} Extensible SDK"
126
127def clean_esdk_builddir(d, sdkbasepath):
128 """Clean up traces of the fake build for create_filtered_tasklist()"""
129 import shutil
130 cleanpaths = ['cache', 'tmp']
131 for pth in cleanpaths:
132 fullpth = os.path.join(sdkbasepath, pth)
133 if os.path.isdir(fullpth):
134 shutil.rmtree(fullpth)
135 elif os.path.isfile(fullpth):
136 os.remove(fullpth)
137
138def create_filtered_tasklist(d, sdkbasepath, tasklistfile, conf_initpath):
139 """
140 Create a filtered list of tasks. Also double-checks that the build system
141 within the SDK basically works and required sstate artifacts are available.
142 """
143 import tempfile
144 import shutil
145 import oe.copy_buildsystem
146
147 # Create a temporary build directory that we can pass to the env setup script
148 shutil.copyfile(sdkbasepath + '/conf/local.conf', sdkbasepath + '/conf/local.conf.bak')
149 try:
150 with open(sdkbasepath + '/conf/local.conf', 'a') as f:
151 # Force the use of sstate from the build system
152 f.write('\nSSTATE_DIR:forcevariable = "%s"\n' % d.getVar('SSTATE_DIR'))
153 f.write('SSTATE_MIRRORS:forcevariable = "file://universal/(.*) file://universal-4.9/\\1 file://universal-4.9/(.*) file://universal-4.8/\\1"\n')
154 # Ensure TMPDIR is the default so that clean_esdk_builddir() can delete it
155 f.write('TMPDIR:forcevariable = "${TOPDIR}/tmp"\n')
156 f.write('TCLIBCAPPEND:forcevariable = ""\n')
157 # Drop uninative if the build isn't using it (or else NATIVELSBSTRING will
158 # be different and we won't be able to find our native sstate)
159 if not bb.data.inherits_class('uninative', d):
160 f.write('INHERIT:remove = "uninative"\n')
161
162 # Unfortunately the default SDKPATH (or even a custom value) may contain characters that bitbake
163 # will not allow in its COREBASE path, so we need to rename the directory temporarily
164 temp_sdkbasepath = d.getVar('SDK_OUTPUT') + '/tmp-renamed-sdk'
165 # Delete any existing temp dir
166 try:
167 shutil.rmtree(temp_sdkbasepath)
168 except FileNotFoundError:
169 pass
170 bb.utils.rename(sdkbasepath, temp_sdkbasepath)
171 cmdprefix = '. %s .; ' % conf_initpath
172 logfile = d.getVar('WORKDIR') + '/tasklist_bb_log.txt'
173 try:
174 oe.copy_buildsystem.check_sstate_task_list(d, get_sdk_install_targets(d), tasklistfile, cmdprefix=cmdprefix, cwd=temp_sdkbasepath, logfile=logfile)
175 except bb.process.ExecutionError as e:
176 msg = 'Failed to generate filtered task list for extensible SDK:\n%s' % e.stdout.rstrip()
177 if 'attempted to execute unexpectedly and should have been setscened' in e.stdout:
178 msg += '\n----------\n\nNOTE: "attempted to execute unexpectedly and should have been setscened" errors indicate this may be caused by missing sstate artifacts that were likely produced in earlier builds, but have been subsequently deleted for some reason.\n'
179 bb.fatal(msg)
180 bb.utils.rename(temp_sdkbasepath, sdkbasepath)
181 # Clean out residue of running bitbake, which check_sstate_task_list()
182 # will effectively do
183 clean_esdk_builddir(d, sdkbasepath)
184 finally:
185 localconf = sdkbasepath + '/conf/local.conf'
186 if os.path.exists(localconf + '.bak'):
187 os.replace(localconf + '.bak', localconf)
188
189python copy_buildsystem () {
190 import re
191 import shutil
192 import glob
193 import oe.copy_buildsystem
194
195 oe_init_env_script = d.getVar('OE_INIT_ENV_SCRIPT')
196
197 conf_bbpath = ''
198 conf_initpath = ''
199 core_meta_subdir = ''
200
201 # Copy in all metadata layers + bitbake (as repositories)
202 buildsystem = oe.copy_buildsystem.BuildSystem('extensible SDK', d)
203 baseoutpath = d.getVar('SDK_OUTPUT') + '/' + d.getVar('SDKPATH')
204
205 #check if custome templateconf path is set
206 use_custom_templateconf = d.getVar('SDK_CUSTOM_TEMPLATECONF')
207
208 # Determine if we're building a derivative extensible SDK (from devtool build-sdk)
209 derivative = (d.getVar('SDK_DERIVATIVE') or '') == '1'
210 if derivative:
211 workspace_name = 'orig-workspace'
212 else:
213 workspace_name = None
214
215 corebase, sdkbblayers = buildsystem.copy_bitbake_and_layers(baseoutpath + '/layers', workspace_name)
216 conf_bbpath = os.path.join('layers', corebase, 'bitbake')
217
218 for path in os.listdir(baseoutpath + '/layers'):
219 relpath = os.path.join('layers', path, oe_init_env_script)
220 if os.path.exists(os.path.join(baseoutpath, relpath)):
221 conf_initpath = relpath
222
223 relpath = os.path.join('layers', path, 'scripts', 'devtool')
224 if os.path.exists(os.path.join(baseoutpath, relpath)):
225 scriptrelpath = os.path.dirname(relpath)
226
227 relpath = os.path.join('layers', path, 'meta')
228 if os.path.exists(os.path.join(baseoutpath, relpath, 'lib', 'oe')):
229 core_meta_subdir = relpath
230
231 d.setVar('oe_init_build_env_path', conf_initpath)
232 d.setVar('scriptrelpath', scriptrelpath)
233
234 # Write out config file for devtool
235 import configparser
236 config = configparser.SafeConfigParser()
237 config.add_section('General')
238 config.set('General', 'bitbake_subdir', conf_bbpath)
239 config.set('General', 'init_path', conf_initpath)
240 config.set('General', 'core_meta_subdir', core_meta_subdir)
241 config.add_section('SDK')
242 config.set('SDK', 'sdk_targets', d.getVar('SDK_TARGETS'))
243 updateurl = d.getVar('SDK_UPDATE_URL')
244 if updateurl:
245 config.set('SDK', 'updateserver', updateurl)
246 bb.utils.mkdirhier(os.path.join(baseoutpath, 'conf'))
247 with open(os.path.join(baseoutpath, 'conf', 'devtool.conf'), 'w') as f:
248 config.write(f)
249
250 unlockedsigs = os.path.join(baseoutpath, 'conf', 'unlocked-sigs.inc')
251 with open(unlockedsigs, 'w') as f:
252 pass
253
254 # Create a layer for new recipes / appends
255 bbpath = d.getVar('BBPATH')
256 env = os.environ.copy()
257 env['PYTHONDONTWRITEBYTECODE'] = '1'
258 bb.process.run(['devtool', '--bbpath', bbpath, '--basepath', baseoutpath, 'create-workspace', '--create-only', os.path.join(baseoutpath, 'workspace')], env=env)
259
260 # Create bblayers.conf
261 bb.utils.mkdirhier(baseoutpath + '/conf')
262 with open(baseoutpath + '/conf/bblayers.conf', 'w') as f:
263 f.write('# WARNING: this configuration has been automatically generated and in\n')
264 f.write('# most cases should not be edited. If you need more flexibility than\n')
265 f.write('# this configuration provides, it is strongly suggested that you set\n')
266 f.write('# up a proper instance of the full build system and use that instead.\n\n')
267
268 # LCONF_VERSION may not be set, for example when using meta-poky
269 # so don't error if it isn't found
270 lconf_version = d.getVar('LCONF_VERSION', False)
271 if lconf_version is not None:
272 f.write('LCONF_VERSION = "%s"\n\n' % lconf_version)
273
274 f.write('BBPATH = "$' + '{TOPDIR}"\n')
275 f.write('SDKBASEMETAPATH = "$' + '{TOPDIR}"\n')
276 f.write('BBLAYERS := " \\\n')
277 for layerrelpath in sdkbblayers:
278 f.write(' $' + '{SDKBASEMETAPATH}/layers/%s \\\n' % layerrelpath)
279 f.write(' $' + '{SDKBASEMETAPATH}/workspace \\\n')
280 f.write(' "\n')
281
282 # Copy uninative tarball
283 # For now this is where uninative.bbclass expects the tarball
284 if bb.data.inherits_class('uninative', d):
285 uninative_file = d.expand('${UNINATIVE_DLDIR}/' + d.getVarFlag("UNINATIVE_CHECKSUM", d.getVar("BUILD_ARCH")) + '/${UNINATIVE_TARBALL}')
286 uninative_checksum = bb.utils.sha256_file(uninative_file)
287 uninative_outdir = '%s/downloads/uninative/%s' % (baseoutpath, uninative_checksum)
288 bb.utils.mkdirhier(uninative_outdir)
289 shutil.copy(uninative_file, uninative_outdir)
290
291 env_passthrough = (d.getVar('BB_ENV_PASSTHROUGH_ADDITIONS') or '').split()
292 env_passthrough_values = {}
293
294 # Create local.conf
295 builddir = d.getVar('TOPDIR')
296 if derivative and os.path.exists(builddir + '/conf/site.conf'):
297 shutil.copyfile(builddir + '/conf/site.conf', baseoutpath + '/conf/site.conf')
298 if derivative and os.path.exists(builddir + '/conf/auto.conf'):
299 shutil.copyfile(builddir + '/conf/auto.conf', baseoutpath + '/conf/auto.conf')
300 if derivative:
301 shutil.copyfile(builddir + '/conf/local.conf', baseoutpath + '/conf/local.conf')
302 else:
303 local_conf_allowed = (d.getVar('ESDK_LOCALCONF_ALLOW') or '').split()
304 local_conf_remove = (d.getVar('ESDK_LOCALCONF_REMOVE') or '').split()
305 def handle_var(varname, origvalue, op, newlines):
306 if varname in local_conf_remove or (origvalue.strip().startswith('/') and not varname in local_conf_allowed):
307 newlines.append('# Removed original setting of %s\n' % varname)
308 return None, op, 0, True
309 else:
310 if varname in env_passthrough:
311 env_passthrough_values[varname] = origvalue
312 return origvalue, op, 0, True
313 varlist = ['[^#=+ ]*']
314 oldlines = []
315 if os.path.exists(builddir + '/conf/site.conf'):
316 with open(builddir + '/conf/site.conf', 'r') as f:
317 oldlines += f.readlines()
318 if os.path.exists(builddir + '/conf/auto.conf'):
319 with open(builddir + '/conf/auto.conf', 'r') as f:
320 oldlines += f.readlines()
321 if os.path.exists(builddir + '/conf/local.conf'):
322 with open(builddir + '/conf/local.conf', 'r') as f:
323 oldlines += f.readlines()
324 (updated, newlines) = bb.utils.edit_metadata(oldlines, varlist, handle_var)
325
326 with open(baseoutpath + '/conf/local.conf', 'w') as f:
327 f.write('# WARNING: this configuration has been automatically generated and in\n')
328 f.write('# most cases should not be edited. If you need more flexibility than\n')
329 f.write('# this configuration provides, it is strongly suggested that you set\n')
330 f.write('# up a proper instance of the full build system and use that instead.\n\n')
331 for line in newlines:
332 if line.strip() and not line.startswith('#'):
333 f.write(line)
334 # Write a newline just in case there's none at the end of the original
335 f.write('\n')
336
337 f.write('TMPDIR = "${TOPDIR}/tmp"\n')
338 f.write('TCLIBCAPPEND = ""\n')
339 f.write('DL_DIR = "${TOPDIR}/downloads"\n')
340
341 if bb.data.inherits_class('uninative', d):
342 f.write('INHERIT += "%s"\n' % 'uninative')
343 f.write('UNINATIVE_CHECKSUM[%s] = "%s"\n\n' % (d.getVar('BUILD_ARCH'), uninative_checksum))
344 f.write('CONF_VERSION = "%s"\n\n' % d.getVar('CONF_VERSION', False))
345
346 # Some classes are not suitable for SDK, remove them from INHERIT
347 f.write('INHERIT:remove = "%s"\n' % d.getVar('ESDK_CLASS_INHERIT_DISABLE', False))
348
349 # Bypass the default connectivity check if any
350 f.write('CONNECTIVITY_CHECK_URIS = ""\n\n')
351
352 # This warning will come out if reverse dependencies for a task
353 # don't have sstate as well as the task itself. We already know
354 # this will be the case for the extensible sdk, so turn off the
355 # warning.
356 f.write('SIGGEN_LOCKEDSIGS_SSTATE_EXISTS_CHECK = "none"\n\n')
357
358 # Warn if the sigs in the locked-signature file don't match
359 # the sig computed from the metadata.
360 f.write('SIGGEN_LOCKEDSIGS_TASKSIG_CHECK = "warn"\n\n')
361
362 # We want to be able to set this without a full reparse
363 f.write('BB_HASHCONFIG_IGNORE_VARS:append = " SIGGEN_UNLOCKED_RECIPES"\n\n')
364
365 # Set up which tasks are ignored for run on install
366 f.write('BB_SETSCENE_ENFORCE_IGNORE_TASKS = "%:* *:do_shared_workdir *:do_rm_work wic-tools:* *:do_addto_recipe_sysroot"\n\n')
367
368 # Hide the config information from bitbake output (since it's fixed within the SDK)
369 f.write('BUILDCFG_HEADER = ""\n\n')
370
371 # Write METADATA_REVISION
372 f.write('METADATA_REVISION = "%s"\n\n' % d.getVar('METADATA_REVISION'))
373
374 f.write('# Provide a flag to indicate we are in the EXT_SDK Context\n')
375 f.write('WITHIN_EXT_SDK = "1"\n\n')
376
377 # Map gcc-dependent uninative sstate cache for installer usage
378 f.write('SSTATE_MIRRORS += " file://universal/(.*) file://universal-4.9/\\1 file://universal-4.9/(.*) file://universal-4.8/\\1"\n\n')
379
380 if d.getVar("PRSERV_HOST"):
381 # Override this, we now include PR data, so it should only point ot the local database
382 f.write('PRSERV_HOST = "localhost:0"\n\n')
383
384 # Allow additional config through sdk-extra.conf
385 fn = bb.cookerdata.findConfigFile('sdk-extra.conf', d)
386 if fn:
387 with open(fn, 'r') as xf:
388 for line in xf:
389 f.write(line)
390
391 # If you define a sdk_extraconf() function then it can contain additional config
392 # (Though this is awkward; sdk-extra.conf should probably be used instead)
393 extraconf = (d.getVar('sdk_extraconf') or '').strip()
394 if extraconf:
395 # Strip off any leading / trailing spaces
396 for line in extraconf.splitlines():
397 f.write(line.strip() + '\n')
398
399 f.write('require conf/locked-sigs.inc\n')
400 f.write('require conf/unlocked-sigs.inc\n')
401
402 # Copy multiple configurations if they exist in the users config directory
403 if d.getVar('BBMULTICONFIG') is not None:
404 bb.utils.mkdirhier(os.path.join(baseoutpath, 'conf', 'multiconfig'))
405 for mc in d.getVar('BBMULTICONFIG').split():
406 dest_stub = "/conf/multiconfig/%s.conf" % (mc,)
407 if os.path.exists(builddir + dest_stub):
408 shutil.copyfile(builddir + dest_stub, baseoutpath + dest_stub)
409
410 cachedir = os.path.join(baseoutpath, 'cache')
411 bb.utils.mkdirhier(cachedir)
412 bb.parse.siggen.copy_unitaskhashes(cachedir)
413
414 # If PR Service is in use, we need to export this as well
415 bb.note('Do we have a pr database?')
416 if d.getVar("PRSERV_HOST"):
417 bb.note('Writing PR database...')
418 # Based on the code in classes/prexport.bbclass
419 import oe.prservice
420 #dump meta info of tables
421 localdata = d.createCopy()
422 localdata.setVar('PRSERV_DUMPOPT_COL', "1")
423 localdata.setVar('PRSERV_DUMPDIR', os.path.join(baseoutpath, 'conf'))
424 localdata.setVar('PRSERV_DUMPFILE', '${PRSERV_DUMPDIR}/prserv.inc')
425
426 bb.note('PR Database write to %s' % (localdata.getVar('PRSERV_DUMPFILE')))
427
428 retval = oe.prservice.prserv_dump_db(localdata)
429 if not retval:
430 bb.error("prexport_handler: export failed!")
431 return
432 (metainfo, datainfo) = retval
433 oe.prservice.prserv_export_tofile(localdata, metainfo, datainfo, True)
434
435 # Use templateconf.cfg file from builddir if exists
436 if os.path.exists(builddir + '/conf/templateconf.cfg') and use_custom_templateconf == '1':
437 shutil.copyfile(builddir + '/conf/templateconf.cfg', baseoutpath + '/conf/templateconf.cfg')
438 else:
439 # Write a templateconf.cfg
440 with open(baseoutpath + '/conf/templateconf.cfg', 'w') as f:
441 f.write('meta/conf\n')
442
443 # Ensure any variables set from the external environment (by way of
444 # BB_ENV_PASSTHROUGH_ADDITIONS) are set in the SDK's configuration
445 extralines = []
446 for name, value in env_passthrough_values.items():
447 actualvalue = d.getVar(name) or ''
448 if value != actualvalue:
449 extralines.append('%s = "%s"\n' % (name, actualvalue))
450 if extralines:
451 with open(baseoutpath + '/conf/local.conf', 'a') as f:
452 f.write('\n')
453 f.write('# Extra settings from environment:\n')
454 for line in extralines:
455 f.write(line)
456 f.write('\n')
457
458 # Filter the locked signatures file to just the sstate tasks we are interested in
459 excluded_targets = get_sdk_install_targets(d, images_only=True)
460 sigfile = d.getVar('WORKDIR') + '/locked-sigs.inc'
461 lockedsigs_pruned = baseoutpath + '/conf/locked-sigs.inc'
462 #nativesdk-only sigfile to merge into locked-sigs.inc
463 sdk_include_nativesdk = (d.getVar("SDK_INCLUDE_NATIVESDK") == '1')
464 nativesigfile = d.getVar('WORKDIR') + '/locked-sigs_nativesdk.inc'
465 nativesigfile_pruned = d.getVar('WORKDIR') + '/locked-sigs_nativesdk_pruned.inc'
466
467 if sdk_include_nativesdk:
468 oe.copy_buildsystem.prune_lockedsigs([],
469 excluded_targets.split(),
470 nativesigfile,
471 True,
472 nativesigfile_pruned)
473
474 oe.copy_buildsystem.merge_lockedsigs([],
475 sigfile,
476 nativesigfile_pruned,
477 sigfile)
478
479 oe.copy_buildsystem.prune_lockedsigs([],
480 excluded_targets.split(),
481 sigfile,
482 False,
483 lockedsigs_pruned)
484
485 sstate_out = baseoutpath + '/sstate-cache'
486 bb.utils.remove(sstate_out, True)
487
488 # uninative.bbclass sets NATIVELSBSTRING to 'universal%s' % oe.utils.host_gcc_version(d)
489 fixedlsbstring = "universal%s" % oe.utils.host_gcc_version(d)
490
491 sdk_include_toolchain = (d.getVar('SDK_INCLUDE_TOOLCHAIN') == '1')
492 sdk_ext_type = d.getVar('SDK_EXT_TYPE')
493 if (sdk_ext_type != 'minimal' or sdk_include_toolchain or derivative) and not sdk_include_nativesdk:
494 # Create the filtered task list used to generate the sstate cache shipped with the SDK
495 tasklistfn = d.getVar('WORKDIR') + '/tasklist.txt'
496 create_filtered_tasklist(d, baseoutpath, tasklistfn, conf_initpath)
497 else:
498 tasklistfn = None
499
500
501 cachedir = os.path.join(baseoutpath, 'cache')
502 bb.utils.mkdirhier(cachedir)
503 bb.parse.siggen.copy_unitaskhashes(cachedir)
504
505 # Add packagedata if enabled
506 if d.getVar('SDK_INCLUDE_PKGDATA') == '1':
507 lockedsigs_base = d.getVar('WORKDIR') + '/locked-sigs-base.inc'
508 lockedsigs_copy = d.getVar('WORKDIR') + '/locked-sigs-copy.inc'
509 shutil.move(lockedsigs_pruned, lockedsigs_base)
510 oe.copy_buildsystem.merge_lockedsigs(['do_packagedata'],
511 lockedsigs_base,
512 d.getVar('STAGING_DIR_HOST') + '/world-pkgdata/locked-sigs-pkgdata.inc',
513 lockedsigs_pruned,
514 lockedsigs_copy)
515
516 if sdk_include_toolchain:
517 lockedsigs_base = d.getVar('WORKDIR') + '/locked-sigs-base2.inc'
518 lockedsigs_toolchain = d.expand("${STAGING_DIR}/${TUNE_PKGARCH}/meta-extsdk-toolchain/locked-sigs/locked-sigs-extsdk-toolchain.inc")
519 shutil.move(lockedsigs_pruned, lockedsigs_base)
520 oe.copy_buildsystem.merge_lockedsigs([],
521 lockedsigs_base,
522 lockedsigs_toolchain,
523 lockedsigs_pruned)
524 oe.copy_buildsystem.create_locked_sstate_cache(lockedsigs_toolchain,
525 d.getVar('SSTATE_DIR'),
526 sstate_out, d,
527 fixedlsbstring,
528 filterfile=tasklistfn)
529
530 if sdk_ext_type == 'minimal':
531 if derivative:
532 # Assume the user is not going to set up an additional sstate
533 # mirror, thus we need to copy the additional artifacts (from
534 # workspace recipes) into the derivative SDK
535 lockedsigs_orig = d.getVar('TOPDIR') + '/conf/locked-sigs.inc'
536 if os.path.exists(lockedsigs_orig):
537 lockedsigs_extra = d.getVar('WORKDIR') + '/locked-sigs-extra.inc'
538 oe.copy_buildsystem.merge_lockedsigs(None,
539 lockedsigs_orig,
540 lockedsigs_pruned,
541 None,
542 lockedsigs_extra)
543 oe.copy_buildsystem.create_locked_sstate_cache(lockedsigs_extra,
544 d.getVar('SSTATE_DIR'),
545 sstate_out, d,
546 fixedlsbstring,
547 filterfile=tasklistfn)
548 else:
549 oe.copy_buildsystem.create_locked_sstate_cache(lockedsigs_pruned,
550 d.getVar('SSTATE_DIR'),
551 sstate_out, d,
552 fixedlsbstring,
553 filterfile=tasklistfn)
554
555 # We don't need sstate do_package files
556 for root, dirs, files in os.walk(sstate_out):
557 for name in files:
558 if name.endswith("_package.tar.zst"):
559 f = os.path.join(root, name)
560 os.remove(f)
561
562 # Write manifest file
563 # Note: at the moment we cannot include the env setup script here to keep
564 # it updated, since it gets modified during SDK installation (see
565 # sdk_ext_postinst() below) thus the checksum we take here would always
566 # be different.
567 manifest_file_list = ['conf/*']
568 if d.getVar('BBMULTICONFIG') is not None:
569 manifest_file_list.append('conf/multiconfig/*')
570
571 esdk_manifest_excludes = (d.getVar('ESDK_MANIFEST_EXCLUDES') or '').split()
572 esdk_manifest_excludes_list = []
573 for exclude_item in esdk_manifest_excludes:
574 esdk_manifest_excludes_list += glob.glob(os.path.join(baseoutpath, exclude_item))
575 manifest_file = os.path.join(baseoutpath, 'conf', 'sdk-conf-manifest')
576 with open(manifest_file, 'w') as f:
577 for item in manifest_file_list:
578 for fn in glob.glob(os.path.join(baseoutpath, item)):
579 if fn == manifest_file or os.path.isdir(fn):
580 continue
581 if fn in esdk_manifest_excludes_list:
582 continue
583 chksum = bb.utils.sha256_file(fn)
584 f.write('%s\t%s\n' % (chksum, os.path.relpath(fn, baseoutpath)))
585}
586
587def get_current_buildtools(d):
588 """Get the file name of the current buildtools installer"""
589 import glob
590 btfiles = glob.glob(os.path.join(d.getVar('SDK_DEPLOY'), '*-buildtools-nativesdk-standalone-*.sh'))
591 btfiles.sort(key=os.path.getctime)
592 return os.path.basename(btfiles[-1])
593
594def get_sdk_required_utilities(buildtools_fn, d):
595 """Find required utilities that aren't provided by the buildtools"""
596 sanity_required_utilities = (d.getVar('SANITY_REQUIRED_UTILITIES') or '').split()
597 sanity_required_utilities.append(d.expand('${BUILD_PREFIX}gcc'))
598 sanity_required_utilities.append(d.expand('${BUILD_PREFIX}g++'))
599 if buildtools_fn:
600 buildtools_installer = os.path.join(d.getVar('SDK_DEPLOY'), buildtools_fn)
601 filelist, _ = bb.process.run('%s -l' % buildtools_installer)
602 else:
603 buildtools_installer = None
604 filelist = ""
605 localdata = bb.data.createCopy(d)
606 localdata.setVar('SDKPATH', '.')
607 sdkpathnative = localdata.getVar('SDKPATHNATIVE')
608 sdkbindirs = [localdata.getVar('bindir_nativesdk'),
609 localdata.getVar('sbindir_nativesdk'),
610 localdata.getVar('base_bindir_nativesdk'),
611 localdata.getVar('base_sbindir_nativesdk')]
612 for line in filelist.splitlines():
613 splitline = line.split()
614 if len(splitline) > 5:
615 fn = splitline[5]
616 if not fn.startswith('./'):
617 fn = './%s' % fn
618 if fn.startswith(sdkpathnative):
619 relpth = '/' + os.path.relpath(fn, sdkpathnative)
620 for bindir in sdkbindirs:
621 if relpth.startswith(bindir):
622 relpth = os.path.relpath(relpth, bindir)
623 if relpth in sanity_required_utilities:
624 sanity_required_utilities.remove(relpth)
625 break
626 return ' '.join(sanity_required_utilities)
627
628install_tools() {
629 install -d ${SDK_OUTPUT}/${SDKPATHNATIVE}${bindir_nativesdk}
630 scripts="devtool recipetool oe-find-native-sysroot runqemu* wic"
631 for script in $scripts; do
632 for scriptfn in `find ${SDK_OUTPUT}/${SDKPATH}/${scriptrelpath} -maxdepth 1 -executable -name "$script"`; do
633 targetscriptfn="${SDK_OUTPUT}/${SDKPATHNATIVE}${bindir_nativesdk}/$(basename $scriptfn)"
634 test -e ${targetscriptfn} || ln -rs ${scriptfn} ${targetscriptfn}
635 done
636 done
637 # We can't use the same method as above because files in the sysroot won't exist at this point
638 # (they get populated from sstate on installation)
639 unfsd_path="${SDK_OUTPUT}/${SDKPATHNATIVE}${bindir_nativesdk}/unfsd"
640 if [ "${SDK_INCLUDE_TOOLCHAIN}" = "1" -a ! -e $unfsd_path ] ; then
641 binrelpath=${@os.path.relpath(d.getVar('STAGING_BINDIR_NATIVE'), d.getVar('TMPDIR'))}
642 ln -rs ${SDK_OUTPUT}/${SDKPATH}/tmp/$binrelpath/unfsd $unfsd_path
643 fi
644 touch ${SDK_OUTPUT}/${SDKPATH}/.devtoolbase
645
646 # find latest buildtools-tarball and install it
647 if [ -n "${SDK_BUILDTOOLS_INSTALLER}" ]; then
648 install ${SDK_DEPLOY}/${SDK_BUILDTOOLS_INSTALLER} ${SDK_OUTPUT}/${SDKPATH}
649 fi
650
651 install -m 0644 ${COREBASE}/meta/files/ext-sdk-prepare.py ${SDK_OUTPUT}/${SDKPATH}
652}
653do_populate_sdk_ext[file-checksums] += "${COREBASE}/meta/files/ext-sdk-prepare.py:True"
654
655sdk_ext_preinst() {
656 # Since bitbake won't run as root it doesn't make sense to try and install
657 # the extensible sdk as root.
658 if [ "`id -u`" = "0" ]; then
659 echo "ERROR: The extensible sdk cannot be installed as root."
660 exit 1
661 fi
662 if ! command -v locale > /dev/null; then
663 echo "ERROR: The installer requires the locale command, please install it first"
664 exit 1
665 fi
666 # Check setting of LC_ALL set above
667 canonicalised_locale=`echo $LC_ALL | sed 's/UTF-8/utf8/'`
668 if ! locale -a | grep -q $canonicalised_locale ; then
669 echo "ERROR: the installer requires the $LC_ALL locale to be installed (but not selected), please install it first"
670 exit 1
671 fi
672 # The relocation script used by buildtools installer requires python
673 if ! command -v python3 > /dev/null; then
674 echo "ERROR: The installer requires python3, please install it first"
675 exit 1
676 fi
677 missing_utils=""
678 for util in ${SDK_REQUIRED_UTILITIES}; do
679 if ! command -v $util > /dev/null; then
680 missing_utils="$missing_utils $util"
681 fi
682 done
683 if [ -n "$missing_utils" ] ; then
684 echo "ERROR: the SDK requires the following missing utilities, please install them: $missing_utils"
685 exit 1
686 fi
687 SDK_EXTENSIBLE="1"
688 if [ "$publish" = "1" ] && [ "${SDK_EXT_TYPE}" = "minimal" ] ; then
689 EXTRA_TAR_OPTIONS="$EXTRA_TAR_OPTIONS --exclude=sstate-cache"
690 fi
691}
692SDK_PRE_INSTALL_COMMAND:task-populate-sdk-ext = "${sdk_ext_preinst}"
693
694# FIXME this preparation should be done as part of the SDK construction
695sdk_ext_postinst() {
696 printf "\nExtracting buildtools...\n"
697 cd $target_sdk_dir
698 env_setup_script="$target_sdk_dir/environment-setup-${REAL_MULTIMACH_TARGET_SYS}"
699 if [ -n "${SDK_BUILDTOOLS_INSTALLER}" ]; then
700 printf "buildtools\ny" | ./${SDK_BUILDTOOLS_INSTALLER} > buildtools.log || { printf 'ERROR: buildtools installation failed:\n' ; cat buildtools.log ; echo "printf 'ERROR: this SDK was not fully installed and needs reinstalling\n'" >> $env_setup_script ; exit 1 ; }
701
702 # Delete the buildtools tar file since it won't be used again
703 rm -f ./${SDK_BUILDTOOLS_INSTALLER}
704 # We don't need the log either since it succeeded
705 rm -f buildtools.log
706
707 # Make sure when the user sets up the environment, they also get
708 # the buildtools-tarball tools in their path.
709 echo "# Save and reset OECORE_NATIVE_SYSROOT as buildtools may change it" >> $env_setup_script
710 echo "SAVED=\"\$OECORE_NATIVE_SYSROOT\"" >> $env_setup_script
711 echo ". $target_sdk_dir/buildtools/environment-setup*" >> $env_setup_script
712 echo "OECORE_NATIVE_SYSROOT=\"\$SAVED\"" >> $env_setup_script
713 fi
714
715 # Allow bitbake environment setup to be ran as part of this sdk.
716 echo "export OE_SKIP_SDK_CHECK=1" >> $env_setup_script
717 # Work around runqemu not knowing how to get this information within the eSDK
718 echo "export DEPLOY_DIR_IMAGE=$target_sdk_dir/tmp/${@os.path.relpath(d.getVar('DEPLOY_DIR_IMAGE'), d.getVar('TMPDIR'))}" >> $env_setup_script
719
720 # A bit of another hack, but we need this in the path only for devtool
721 # so put it at the end of $PATH.
722 echo "export PATH=$target_sdk_dir/sysroots/${SDK_SYS}${bindir_nativesdk}:\$PATH" >> $env_setup_script
723
724 echo "printf 'SDK environment now set up; additionally you may now run devtool to perform development tasks.\nRun devtool --help for further details.\n'" >> $env_setup_script
725
726 # Warn if trying to use external bitbake and the ext SDK together
727 echo "(which bitbake > /dev/null 2>&1 && echo 'WARNING: attempting to use the extensible SDK in an environment set up to run bitbake - this may lead to unexpected results. Please source this script in a new shell session instead.') || true" >> $env_setup_script
728
729 if [ "$prepare_buildsystem" != "no" -a -n "${SDK_BUILDTOOLS_INSTALLER}" ]; then
730 printf "Preparing build system...\n"
731 # dash which is /bin/sh on Ubuntu will not preserve the
732 # current working directory when first ran, nor will it set $1 when
733 # sourcing a script. That is why this has to look so ugly.
734 LOGFILE="$target_sdk_dir/preparing_build_system.log"
735 sh -c ". buildtools/environment-setup* > $LOGFILE && cd $target_sdk_dir/`dirname ${oe_init_build_env_path}` && set $target_sdk_dir && . $target_sdk_dir/${oe_init_build_env_path} $target_sdk_dir >> $LOGFILE && python3 $target_sdk_dir/ext-sdk-prepare.py $LOGFILE '${SDK_INSTALL_TARGETS}'" || { echo "printf 'ERROR: this SDK was not fully installed and needs reinstalling\n'" >> $env_setup_script ; exit 1 ; }
736 fi
737 if [ -e $target_sdk_dir/ext-sdk-prepare.py ]; then
738 rm $target_sdk_dir/ext-sdk-prepare.py
739 fi
740 echo done
741}
742
743SDK_POST_INSTALL_COMMAND:task-populate-sdk-ext = "${sdk_ext_postinst}"
744
745SDK_POSTPROCESS_COMMAND:prepend:task-populate-sdk-ext = "copy_buildsystem; install_tools; "
746
747SDK_INSTALL_TARGETS = ""
748fakeroot python do_populate_sdk_ext() {
749 # FIXME hopefully we can remove this restriction at some point, but uninative
750 # currently forces this upon us
751 if d.getVar('SDK_ARCH') != d.getVar('BUILD_ARCH'):
752 bb.fatal('The extensible SDK can currently only be built for the same architecture as the machine being built on - SDK_ARCH is set to %s (likely via setting SDKMACHINE) which is different from the architecture of the build machine (%s). Unable to continue.' % (d.getVar('SDK_ARCH'), d.getVar('BUILD_ARCH')))
753
754 # FIXME hopefully we can remove this restriction at some point, but the eSDK
755 # can only be built for the primary (default) multiconfig
756 if d.getVar('BB_CURRENT_MC') != 'default':
757 bb.fatal('The extensible SDK can currently only be built for the default multiconfig. Currently trying to build for %s.' % d.getVar('BB_CURRENT_MC'))
758
759 # eSDK dependencies don't use the traditional variables and things don't work properly if they are set
760 d.setVar("TOOLCHAIN_HOST_TASK", "${TOOLCHAIN_HOST_TASK_ESDK}")
761 d.setVar("TOOLCHAIN_TARGET_TASK", "")
762
763 d.setVar('SDK_INSTALL_TARGETS', get_sdk_install_targets(d))
764 if d.getVar('SDK_INCLUDE_BUILDTOOLS') == '1':
765 buildtools_fn = get_current_buildtools(d)
766 else:
767 buildtools_fn = None
768 d.setVar('SDK_REQUIRED_UTILITIES', get_sdk_required_utilities(buildtools_fn, d))
769 d.setVar('SDK_BUILDTOOLS_INSTALLER', buildtools_fn)
770 d.setVar('SDKDEPLOYDIR', '${SDKEXTDEPLOYDIR}')
771 # ESDKs have a libc from the buildtools so ensure we don't ship linguas twice
772 d.delVar('SDKIMAGE_LINGUAS')
773 if d.getVar("SDK_INCLUDE_NATIVESDK") == '1':
774 generate_nativesdk_lockedsigs(d)
775 populate_sdk_common(d)
776}
777
778def generate_nativesdk_lockedsigs(d):
779 import oe.copy_buildsystem
780 sigfile = d.getVar('WORKDIR') + '/locked-sigs_nativesdk.inc'
781 oe.copy_buildsystem.generate_locked_sigs(sigfile, d)
782
783def get_ext_sdk_depends(d):
784 # Note: the deps varflag is a list not a string, so we need to specify expand=False
785 deps = d.getVarFlag('do_image_complete', 'deps', False)
786 pn = d.getVar('PN')
787 deplist = ['%s:%s' % (pn, dep) for dep in deps]
788 tasklist = bb.build.tasksbetween('do_image_complete', 'do_build', d)
789 tasklist.append('do_rootfs')
790 for task in tasklist:
791 deplist.extend((d.getVarFlag(task, 'depends') or '').split())
792 return ' '.join(deplist)
793
794python do_sdk_depends() {
795 # We have to do this separately in its own task so we avoid recursing into
796 # dependencies we don't need to (e.g. buildtools-tarball) and bringing those
797 # into the SDK's sstate-cache
798 import oe.copy_buildsystem
799 sigfile = d.getVar('WORKDIR') + '/locked-sigs.inc'
800 oe.copy_buildsystem.generate_locked_sigs(sigfile, d)
801}
802addtask sdk_depends
803
804do_sdk_depends[dirs] = "${WORKDIR}"
805do_sdk_depends[depends] = "${@get_ext_sdk_depends(d)} meta-extsdk-toolchain:do_populate_sysroot"
806do_sdk_depends[recrdeptask] = "${@d.getVarFlag('do_populate_sdk', 'recrdeptask', False)}"
807do_sdk_depends[recrdeptask] += "do_populate_lic do_package_qa do_populate_sysroot do_deploy ${SDK_RECRDEP_TASKS}"
808do_sdk_depends[rdepends] = "${@' '.join([x + ':do_package_write_${IMAGE_PKGTYPE} ' + x + ':do_packagedata' for x in d.getVar('TOOLCHAIN_HOST_TASK_ESDK').split()])}"
809
810do_populate_sdk_ext[dirs] = "${@d.getVarFlag('do_populate_sdk', 'dirs', False)}"
811
812do_populate_sdk_ext[depends] = "${@d.getVarFlag('do_populate_sdk', 'depends', False)} \
813 ${@'buildtools-tarball:do_populate_sdk' if d.getVar('SDK_INCLUDE_BUILDTOOLS') == '1' else ''} \
814 ${@'meta-world-pkgdata:do_collect_packagedata' if d.getVar('SDK_INCLUDE_PKGDATA') == '1' else ''} \
815 ${@'meta-extsdk-toolchain:do_locked_sigs' if d.getVar('SDK_INCLUDE_TOOLCHAIN') == '1' else ''}"
816
817# We must avoid depending on do_build here if rm_work.bbclass is active,
818# because otherwise do_rm_work may run before do_populate_sdk_ext itself.
819# We can't mark do_populate_sdk_ext and do_sdk_depends as having to
820# run before do_rm_work, because then they would also run as part
821# of normal builds.
822do_populate_sdk_ext[rdepends] += "${@' '.join([x + ':' + (d.getVar('RM_WORK_BUILD_WITHOUT') or 'do_build') for x in d.getVar('SDK_TARGETS').split()])}"
823
824# Make sure code changes can result in rebuild
825do_populate_sdk_ext[vardeps] += "copy_buildsystem \
826 sdk_ext_postinst"
827
828# Since any change in the metadata of any layer should cause a rebuild of the
829# sdk(since the layers are put in the sdk) set the task to nostamp so it
830# always runs.
831do_populate_sdk_ext[nostamp] = "1"
832
833SDKEXTDEPLOYDIR = "${WORKDIR}/deploy-${PN}-populate-sdk-ext"
834
835SSTATETASKS += "do_populate_sdk_ext"
836SSTATE_SKIP_CREATION:task-populate-sdk-ext = '1'
837do_populate_sdk_ext[cleandirs] = "${SDKEXTDEPLOYDIR}"
838do_populate_sdk_ext[sstate-inputdirs] = "${SDKEXTDEPLOYDIR}"
839do_populate_sdk_ext[sstate-outputdirs] = "${SDK_DEPLOY}"
840do_populate_sdk_ext[stamp-extra-info] = "${MACHINE_ARCH}"
841
842addtask populate_sdk_ext after do_sdk_depends
diff --git a/meta/classes-recipe/ptest-gnome.bbclass b/meta/classes-recipe/ptest-gnome.bbclass
new file mode 100644
index 0000000000..d4ad22d85d
--- /dev/null
+++ b/meta/classes-recipe/ptest-gnome.bbclass
@@ -0,0 +1,14 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7inherit ptest
8
9EXTRA_OECONF:append = " ${@bb.utils.contains('PTEST_ENABLED', '1', '--enable-installed-tests', '--disable-installed-tests', d)}"
10
11FILES:${PN}-ptest += "${libexecdir}/installed-tests/ \
12 ${datadir}/installed-tests/"
13
14RDEPENDS:${PN}-ptest += "gnome-desktop-testing"
diff --git a/meta/classes-recipe/ptest-perl.bbclass b/meta/classes-recipe/ptest-perl.bbclass
new file mode 100644
index 0000000000..c283fdd1fc
--- /dev/null
+++ b/meta/classes-recipe/ptest-perl.bbclass
@@ -0,0 +1,36 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7inherit ptest
8
9FILESEXTRAPATHS:prepend := "${COREBASE}/meta/files:"
10
11SRC_URI += "file://ptest-perl/run-ptest"
12
13do_install_ptest_perl() {
14 install -d ${D}${PTEST_PATH}
15 if [ ! -f ${D}${PTEST_PATH}/run-ptest ]; then
16 install -m 0755 ${WORKDIR}/ptest-perl/run-ptest ${D}${PTEST_PATH}
17 fi
18 cp -r ${B}/t ${D}${PTEST_PATH}
19 chown -R root:root ${D}${PTEST_PATH}
20}
21
22FILES:${PN}-ptest:prepend = "${PTEST_PATH}/t/* ${PTEST_PATH}/run-ptest "
23
24RDEPENDS:${PN}-ptest:prepend = "perl "
25
26addtask install_ptest_perl after do_install_ptest_base before do_package
27
28python () {
29 if not bb.data.inherits_class('native', d) and not bb.data.inherits_class('cross', d):
30 d.setVarFlag('do_install_ptest_perl', 'fakeroot', '1')
31
32 # Remove all '*ptest_perl' tasks when ptest is not enabled
33 if not(d.getVar('PTEST_ENABLED') == "1"):
34 for i in ['do_install_ptest_perl']:
35 bb.build.deltask(i, d)
36}
diff --git a/meta/classes-recipe/ptest.bbclass b/meta/classes-recipe/ptest.bbclass
new file mode 100644
index 0000000000..0383206a6d
--- /dev/null
+++ b/meta/classes-recipe/ptest.bbclass
@@ -0,0 +1,142 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7SUMMARY:${PN}-ptest ?= "${SUMMARY} - Package test files"
8DESCRIPTION:${PN}-ptest ?= "${DESCRIPTION} \
9This package contains a test directory ${PTEST_PATH} for package test purposes."
10
11PTEST_PATH ?= "${libdir}/${BPN}/ptest"
12PTEST_BUILD_HOST_FILES ?= "Makefile"
13PTEST_BUILD_HOST_PATTERN ?= ""
14PTEST_PARALLEL_MAKE ?= "${PARALLEL_MAKE}"
15PTEST_PARALLEL_MAKEINST ?= "${PARALLEL_MAKEINST}"
16EXTRA_OEMAKE:prepend:task-compile-ptest-base = "${PTEST_PARALLEL_MAKE} "
17EXTRA_OEMAKE:prepend:task-install-ptest-base = "${PTEST_PARALLEL_MAKEINST} "
18
19FILES:${PN}-ptest += "${PTEST_PATH}"
20SECTION:${PN}-ptest = "devel"
21ALLOW_EMPTY:${PN}-ptest = "1"
22PTEST_ENABLED = "${@bb.utils.contains('DISTRO_FEATURES', 'ptest', '1', '0', d)}"
23PTEST_ENABLED:class-native = ""
24PTEST_ENABLED:class-nativesdk = ""
25PTEST_ENABLED:class-cross-canadian = ""
26RDEPENDS:${PN}-ptest += "${PN}"
27RDEPENDS:${PN}-ptest:class-native = ""
28RDEPENDS:${PN}-ptest:class-nativesdk = ""
29RRECOMMENDS:${PN}-ptest += "ptest-runner"
30
31PACKAGES =+ "${@bb.utils.contains('PTEST_ENABLED', '1', '${PN}-ptest', '', d)}"
32
33require conf/distro/include/ptest-packagelists.inc
34
35do_configure_ptest() {
36 :
37}
38
39do_configure_ptest_base() {
40 do_configure_ptest
41}
42
43do_compile_ptest() {
44 :
45}
46
47do_compile_ptest_base() {
48 do_compile_ptest
49}
50
51do_install_ptest() {
52 :
53}
54
55do_install_ptest_base() {
56 if [ -f ${WORKDIR}/run-ptest ]; then
57 install -D ${WORKDIR}/run-ptest ${D}${PTEST_PATH}/run-ptest
58 fi
59 if grep -q install-ptest: Makefile; then
60 oe_runmake DESTDIR=${D}${PTEST_PATH} install-ptest
61 fi
62 do_install_ptest
63 chown -R root:root ${D}${PTEST_PATH}
64
65 # Strip build host paths from any installed Makefile
66 for filename in ${PTEST_BUILD_HOST_FILES}; do
67 for installed_ptest_file in $(find ${D}${PTEST_PATH} -type f -name $filename); do
68 bbnote "Stripping host paths from: $installed_ptest_file"
69 sed -e 's#${HOSTTOOLS_DIR}/*##g' \
70 -e 's#${WORKDIR}/*=#.=#g' \
71 -e 's#${WORKDIR}/*##g' \
72 -i $installed_ptest_file
73 if [ -n "${PTEST_BUILD_HOST_PATTERN}" ]; then
74 sed -E '/${PTEST_BUILD_HOST_PATTERN}/d' \
75 -i $installed_ptest_file
76 fi
77 done
78 done
79}
80
81PTEST_BINDIR_PKGD_PATH = "${PKGD}${PTEST_PATH}/bin"
82
83# This function needs to run after apply_update_alternative_renames because the
84# aforementioned function will update the ALTERNATIVE_LINK_NAME flag. Append is
85# used here to make this function to run as late as possible.
86PACKAGE_PREPROCESS_FUNCS:append = "${@bb.utils.contains('PTEST_BINDIR', '1', \
87 bb.utils.contains('PTEST_ENABLED', '1', ' ptest_update_alternatives', '', d), '', d)}"
88
89python ptest_update_alternatives() {
90 """
91 This function will generate the symlinks in the PTEST_BINDIR_PKGD_PATH
92 to match the renamed binaries by update-alternatives.
93 """
94
95 if not bb.data.inherits_class('update-alternatives', d) \
96 or not update_alternatives_enabled(d):
97 return
98
99 bb.note("Generating symlinks for ptest")
100 bin_paths = { d.getVar("bindir"), d.getVar("base_bindir"),
101 d.getVar("sbindir"), d.getVar("base_sbindir") }
102 ptest_bindir = d.getVar("PTEST_BINDIR_PKGD_PATH")
103 os.mkdir(ptest_bindir)
104 for pkg in (d.getVar('PACKAGES') or "").split():
105 alternatives = update_alternatives_alt_targets(d, pkg)
106 for alt_name, alt_link, alt_target, _ in alternatives:
107 # Some alternatives are for man pages,
108 # check if the alternative is in PATH
109 if os.path.dirname(alt_link) in bin_paths:
110 os.symlink(alt_target, os.path.join(ptest_bindir, alt_name))
111}
112
113do_configure_ptest_base[dirs] = "${B}"
114do_compile_ptest_base[dirs] = "${B}"
115do_install_ptest_base[dirs] = "${B}"
116do_install_ptest_base[cleandirs] = "${D}${PTEST_PATH}"
117
118addtask configure_ptest_base after do_configure before do_compile
119addtask compile_ptest_base after do_compile before do_install
120addtask install_ptest_base after do_install before do_package do_populate_sysroot
121
122python () {
123 if not bb.data.inherits_class('native', d) and not bb.data.inherits_class('cross', d):
124 d.setVarFlag('do_install_ptest_base', 'fakeroot', '1')
125 d.setVarFlag('do_install_ptest_base', 'umask', '022')
126
127 # Remove all '*ptest_base' tasks when ptest is not enabled
128 if not(d.getVar('PTEST_ENABLED') == "1"):
129 for i in ['do_configure_ptest_base', 'do_compile_ptest_base', 'do_install_ptest_base']:
130 bb.build.deltask(i, d)
131}
132
133QARECIPETEST[missing-ptest] = "package_qa_check_missing_ptest"
134def package_qa_check_missing_ptest(pn, d, messages):
135 # This checks that ptest package is actually included
136 # in standard oe-core ptest images - only for oe-core recipes
137 if not 'meta/recipes' in d.getVar('FILE') or not(d.getVar('PTEST_ENABLED') == "1"):
138 return
139
140 enabled_ptests = " ".join([d.getVar('PTESTS_FAST'), d.getVar('PTESTS_SLOW'), d.getVar('PTESTS_PROBLEMS')]).split()
141 if (pn + "-ptest").replace(d.getVar('MLPREFIX'), '') not in enabled_ptests:
142 oe.qa.handle_error("missing-ptest", "supports ptests but is not included in oe-core's ptest-packagelists.inc", d)
diff --git a/meta/classes-recipe/pypi.bbclass b/meta/classes-recipe/pypi.bbclass
new file mode 100644
index 0000000000..aab04c638f
--- /dev/null
+++ b/meta/classes-recipe/pypi.bbclass
@@ -0,0 +1,34 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7def pypi_package(d):
8 bpn = d.getVar('BPN')
9 if bpn.startswith('python-'):
10 return bpn[7:]
11 elif bpn.startswith('python3-'):
12 return bpn[8:]
13 return bpn
14
15PYPI_PACKAGE ?= "${@pypi_package(d)}"
16PYPI_PACKAGE_EXT ?= "tar.gz"
17PYPI_ARCHIVE_NAME ?= "${PYPI_PACKAGE}-${PV}.${PYPI_PACKAGE_EXT}"
18
19def pypi_src_uri(d):
20 package = d.getVar('PYPI_PACKAGE')
21 archive_name = d.getVar('PYPI_ARCHIVE_NAME')
22 return 'https://files.pythonhosted.org/packages/source/%s/%s/%s' % (package[0], package, archive_name)
23
24PYPI_SRC_URI ?= "${@pypi_src_uri(d)}"
25
26HOMEPAGE ?= "https://pypi.python.org/pypi/${PYPI_PACKAGE}/"
27SECTION = "devel/python"
28SRC_URI:prepend = "${PYPI_SRC_URI} "
29S = "${WORKDIR}/${PYPI_PACKAGE}-${PV}"
30
31UPSTREAM_CHECK_URI ?= "https://pypi.org/project/${PYPI_PACKAGE}/"
32UPSTREAM_CHECK_REGEX ?= "/${PYPI_PACKAGE}/(?P<pver>(\d+[\.\-_]*)+)/"
33
34CVE_PRODUCT ?= "python:${PYPI_PACKAGE}"
diff --git a/meta/classes-recipe/python3-dir.bbclass b/meta/classes-recipe/python3-dir.bbclass
new file mode 100644
index 0000000000..912c67253c
--- /dev/null
+++ b/meta/classes-recipe/python3-dir.bbclass
@@ -0,0 +1,11 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7PYTHON_BASEVERSION = "3.10"
8PYTHON_ABI = ""
9PYTHON_DIR = "python${PYTHON_BASEVERSION}"
10PYTHON_PN = "python3"
11PYTHON_SITEPACKAGES_DIR = "${libdir}/${PYTHON_DIR}/site-packages"
diff --git a/meta/classes-recipe/python3native.bbclass b/meta/classes-recipe/python3native.bbclass
new file mode 100644
index 0000000000..654a002fdb
--- /dev/null
+++ b/meta/classes-recipe/python3native.bbclass
@@ -0,0 +1,30 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7inherit python3-dir
8
9PYTHON="${STAGING_BINDIR_NATIVE}/python3-native/python3"
10EXTRANATIVEPATH += "python3-native"
11DEPENDS:append = " python3-native "
12
13# python-config and other scripts are using sysconfig modules
14# which we patch to access these variables
15export STAGING_INCDIR
16export STAGING_LIBDIR
17
18# Packages can use
19# find_package(PythonInterp REQUIRED)
20# find_package(PythonLibs REQUIRED)
21# which ends up using libs/includes from build host
22# Therefore pre-empt that effort
23export PYTHON_LIBRARY="${STAGING_LIBDIR}/lib${PYTHON_DIR}${PYTHON_ABI}.so"
24export PYTHON_INCLUDE_DIR="${STAGING_INCDIR}/${PYTHON_DIR}${PYTHON_ABI}"
25
26# suppress host user's site-packages dirs.
27export PYTHONNOUSERSITE = "1"
28
29# autoconf macros will use their internal default preference otherwise
30export PYTHON
diff --git a/meta/classes-recipe/python3targetconfig.bbclass b/meta/classes-recipe/python3targetconfig.bbclass
new file mode 100644
index 0000000000..3f89e5e09e
--- /dev/null
+++ b/meta/classes-recipe/python3targetconfig.bbclass
@@ -0,0 +1,35 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7inherit python3native
8
9EXTRA_PYTHON_DEPENDS ?= ""
10EXTRA_PYTHON_DEPENDS:class-target = "python3"
11DEPENDS:append = " ${EXTRA_PYTHON_DEPENDS}"
12
13do_configure:prepend:class-target() {
14 export _PYTHON_SYSCONFIGDATA_NAME="_sysconfigdata"
15}
16
17do_compile:prepend:class-target() {
18 export _PYTHON_SYSCONFIGDATA_NAME="_sysconfigdata"
19}
20
21do_install:prepend:class-target() {
22 export _PYTHON_SYSCONFIGDATA_NAME="_sysconfigdata"
23}
24
25do_configure:prepend:class-nativesdk() {
26 export _PYTHON_SYSCONFIGDATA_NAME="_sysconfigdata"
27}
28
29do_compile:prepend:class-nativesdk() {
30 export _PYTHON_SYSCONFIGDATA_NAME="_sysconfigdata"
31}
32
33do_install:prepend:class-nativesdk() {
34 export _PYTHON_SYSCONFIGDATA_NAME="_sysconfigdata"
35}
diff --git a/meta/classes-recipe/python_flit_core.bbclass b/meta/classes-recipe/python_flit_core.bbclass
new file mode 100644
index 0000000000..a0b1feb70a
--- /dev/null
+++ b/meta/classes-recipe/python_flit_core.bbclass
@@ -0,0 +1,14 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7inherit python_pep517 python3native python3-dir setuptools3-base
8
9DEPENDS += "python3 python3-flit-core-native"
10
11python_flit_core_do_manual_build () {
12 cd ${PEP517_SOURCE_PATH}
13 nativepython3 -m flit_core.wheel --outdir ${PEP517_WHEEL_PATH} .
14}
diff --git a/meta/classes-recipe/python_hatchling.bbclass b/meta/classes-recipe/python_hatchling.bbclass
new file mode 100644
index 0000000000..b9e6582eb5
--- /dev/null
+++ b/meta/classes-recipe/python_hatchling.bbclass
@@ -0,0 +1,9 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7inherit python_pep517 python3native python3-dir setuptools3-base
8
9DEPENDS += "python3-hatchling-native"
diff --git a/meta/classes-recipe/python_pep517.bbclass b/meta/classes-recipe/python_pep517.bbclass
new file mode 100644
index 0000000000..202dde0bc3
--- /dev/null
+++ b/meta/classes-recipe/python_pep517.bbclass
@@ -0,0 +1,60 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# Common infrastructure for Python packages that use PEP-517 compliant packaging.
8# https://www.python.org/dev/peps/pep-0517/
9#
10# This class will build a wheel in do_compile, and use pypa/installer to install
11# it in do_install.
12
13DEPENDS:append = " python3-picobuild-native python3-installer-native"
14
15# Where to execute the build process from
16PEP517_SOURCE_PATH ?= "${S}"
17
18# The directory where wheels will be written
19PEP517_WHEEL_PATH ?= "${WORKDIR}/dist"
20
21PEP517_PICOBUILD_OPTS ?= ""
22
23# The interpreter to use for installed scripts
24PEP517_INSTALL_PYTHON = "python3"
25PEP517_INSTALL_PYTHON:class-native = "nativepython3"
26
27# pypa/installer option to control the bytecode compilation
28INSTALL_WHEEL_COMPILE_BYTECODE ?= "--compile-bytecode=0"
29
30# PEP517 doesn't have a specific configure step, so set an empty do_configure to avoid
31# running base_do_configure.
32python_pep517_do_configure () {
33 :
34}
35
36# When we have Python 3.11 we can parse pyproject.toml to determine the build
37# API entry point directly
38python_pep517_do_compile () {
39 nativepython3 -m picobuild --source ${PEP517_SOURCE_PATH} --dest ${PEP517_WHEEL_PATH} --wheel ${PEP517_PICOBUILD_OPTS}
40}
41do_compile[cleandirs] += "${PEP517_WHEEL_PATH}"
42
43python_pep517_do_install () {
44 COUNT=$(find ${PEP517_WHEEL_PATH} -name '*.whl' | wc -l)
45 if test $COUNT -eq 0; then
46 bbfatal No wheels found in ${PEP517_WHEEL_PATH}
47 elif test $COUNT -gt 1; then
48 bbfatal More than one wheel found in ${PEP517_WHEEL_PATH}, this should not happen
49 fi
50
51 nativepython3 -m installer ${INSTALL_WHEEL_COMPILE_BYTECODE} --interpreter "${USRBINPATH}/env ${PEP517_INSTALL_PYTHON}" --destdir=${D} ${PEP517_WHEEL_PATH}/*.whl
52}
53
54# A manual do_install that just uses unzip for bootstrapping purposes. Callers should DEPEND on unzip-native.
55python_pep517_do_bootstrap_install () {
56 install -d ${D}${PYTHON_SITEPACKAGES_DIR}
57 unzip -d ${D}${PYTHON_SITEPACKAGES_DIR} ${PEP517_WHEEL_PATH}/*.whl
58}
59
60EXPORT_FUNCTIONS do_configure do_compile do_install
diff --git a/meta/classes-recipe/python_poetry_core.bbclass b/meta/classes-recipe/python_poetry_core.bbclass
new file mode 100644
index 0000000000..c7dc5d0382
--- /dev/null
+++ b/meta/classes-recipe/python_poetry_core.bbclass
@@ -0,0 +1,9 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7inherit python_pep517 python3native setuptools3-base
8
9DEPENDS += "python3-poetry-core-native"
diff --git a/meta/classes-recipe/python_pyo3.bbclass b/meta/classes-recipe/python_pyo3.bbclass
new file mode 100644
index 0000000000..9a32eac6fd
--- /dev/null
+++ b/meta/classes-recipe/python_pyo3.bbclass
@@ -0,0 +1,36 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7#
8# This class helps make sure that Python extensions built with PyO3
9# and setuptools_rust properly set up the environment for cross compilation
10#
11
12inherit cargo python3-dir siteinfo
13
14export PYO3_CROSS="1"
15export PYO3_CROSS_PYTHON_VERSION="${PYTHON_BASEVERSION}"
16export PYO3_CROSS_LIB_DIR="${STAGING_LIBDIR}"
17export CARGO_BUILD_TARGET="${RUST_HOST_SYS}"
18export RUSTFLAGS
19export PYO3_PYTHON="${PYTHON}"
20export PYO3_CONFIG_FILE="${WORKDIR}/pyo3.config"
21
22python_pyo3_do_configure () {
23 cat > ${WORKDIR}/pyo3.config << EOF
24implementation=CPython
25version=${PYTHON_BASEVERSION}
26shared=true
27abi3=false
28lib_name=${PYTHON_DIR}
29lib_dir=${STAGING_LIBDIR}
30pointer_width=${SITEINFO_BITS}
31build_flags=WITH_THREAD
32suppress_build_script_link_lines=false
33EOF
34}
35
36EXPORT_FUNCTIONS do_configure
diff --git a/meta/classes-recipe/python_setuptools3_rust.bbclass b/meta/classes-recipe/python_setuptools3_rust.bbclass
new file mode 100644
index 0000000000..d6ce2edb96
--- /dev/null
+++ b/meta/classes-recipe/python_setuptools3_rust.bbclass
@@ -0,0 +1,17 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7inherit python_pyo3 setuptools3
8
9DEPENDS += "python3-setuptools-rust-native"
10
11python_setuptools3_rust_do_configure() {
12 python_pyo3_do_configure
13 cargo_common_do_configure
14 setuptools3_do_configure
15}
16
17EXPORT_FUNCTIONS do_configure
diff --git a/meta/classes-recipe/python_setuptools_build_meta.bbclass b/meta/classes-recipe/python_setuptools_build_meta.bbclass
new file mode 100644
index 0000000000..4c84d1e8d0
--- /dev/null
+++ b/meta/classes-recipe/python_setuptools_build_meta.bbclass
@@ -0,0 +1,9 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7inherit setuptools3-base python_pep517
8
9DEPENDS += "python3-setuptools-native python3-wheel-native"
diff --git a/meta/classes-recipe/qemu.bbclass b/meta/classes-recipe/qemu.bbclass
new file mode 100644
index 0000000000..874b15127c
--- /dev/null
+++ b/meta/classes-recipe/qemu.bbclass
@@ -0,0 +1,77 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7#
8# This class contains functions for recipes that need QEMU or test for its
9# existence.
10#
11
12def qemu_target_binary(data):
13 package_arch = data.getVar("PACKAGE_ARCH")
14 qemu_target_binary = (data.getVar("QEMU_TARGET_BINARY_%s" % package_arch) or "")
15 if qemu_target_binary:
16 return qemu_target_binary
17
18 target_arch = data.getVar("TARGET_ARCH")
19 if target_arch in ("i486", "i586", "i686"):
20 target_arch = "i386"
21 elif target_arch == "powerpc":
22 target_arch = "ppc"
23 elif target_arch == "powerpc64":
24 target_arch = "ppc64"
25 elif target_arch == "powerpc64le":
26 target_arch = "ppc64le"
27
28 return "qemu-" + target_arch
29
30def qemu_wrapper_cmdline(data, rootfs_path, library_paths):
31 import string
32
33 qemu_binary = qemu_target_binary(data)
34 if qemu_binary == "qemu-allarch":
35 qemu_binary = "qemuwrapper"
36
37 qemu_options = data.getVar("QEMU_OPTIONS")
38
39 return "PSEUDO_UNLOAD=1 " + qemu_binary + " " + qemu_options + " -L " + rootfs_path\
40 + " -E LD_LIBRARY_PATH=" + ":".join(library_paths) + " "
41
42# Next function will return a string containing the command that is needed to
43# to run a certain binary through qemu. For example, in order to make a certain
44# postinstall scriptlet run at do_rootfs time and running the postinstall is
45# architecture dependent, we can run it through qemu. For example, in the
46# postinstall scriptlet, we could use the following:
47#
48# ${@qemu_run_binary(d, '$D', '/usr/bin/test_app')} [test_app arguments]
49#
50def qemu_run_binary(data, rootfs_path, binary):
51 libdir = rootfs_path + data.getVar("libdir", False)
52 base_libdir = rootfs_path + data.getVar("base_libdir", False)
53
54 return qemu_wrapper_cmdline(data, rootfs_path, [libdir, base_libdir]) + rootfs_path + binary
55
56# QEMU_EXTRAOPTIONS is not meant to be directly used, the extensions are
57# PACKAGE_ARCH, *NOT* overrides.
58# In some cases (e.g. ppc) simply being arch specific (apparently) isn't good
59# enough and a PACKAGE_ARCH specific -cpu option is needed (hence we have to do
60# this dance). For others (e.g. arm) a -cpu option is not necessary, since the
61# qemu-arm default CPU supports all required architecture levels.
62
63QEMU_OPTIONS = "-r ${OLDEST_KERNEL} ${@d.getVar("QEMU_EXTRAOPTIONS_%s" % d.getVar('PACKAGE_ARCH')) or ""}"
64QEMU_OPTIONS[vardeps] += "QEMU_EXTRAOPTIONS_${PACKAGE_ARCH}"
65
66QEMU_EXTRAOPTIONS_ppce500v2 = " -cpu e500v2"
67QEMU_EXTRAOPTIONS_ppce500mc = " -cpu e500mc"
68QEMU_EXTRAOPTIONS_ppce5500 = " -cpu e500mc"
69QEMU_EXTRAOPTIONS_ppc64e5500 = " -cpu e500mc"
70QEMU_EXTRAOPTIONS_ppce6500 = " -cpu e500mc"
71QEMU_EXTRAOPTIONS_ppc64e6500 = " -cpu e500mc"
72QEMU_EXTRAOPTIONS_ppc7400 = " -cpu 7400"
73QEMU_EXTRAOPTIONS_powerpc64le = " -cpu POWER9"
74# Some packages e.g. fwupd sets PACKAGE_ARCH = MACHINE_ARCH and uses meson which
75# needs right options to usermode qemu
76QEMU_EXTRAOPTIONS_qemuppc = " -cpu 7400"
77QEMU_EXTRAOPTIONS_qemuppc64 = " -cpu POWER9"
diff --git a/meta/classes-recipe/qemuboot.bbclass b/meta/classes-recipe/qemuboot.bbclass
new file mode 100644
index 0000000000..018c000ca2
--- /dev/null
+++ b/meta/classes-recipe/qemuboot.bbclass
@@ -0,0 +1,171 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# Help runqemu boot target board, "QB" means Qemu Boot, the following
8# vars can be set in conf files, such as <bsp.conf> to make it can be
9# boot by runqemu:
10#
11# QB_SYSTEM_NAME: qemu name, e.g., "qemu-system-i386"
12#
13# QB_OPT_APPEND: options to append to qemu, e.g., "-device usb-mouse"
14#
15# QB_DEFAULT_KERNEL: default kernel to boot, e.g., "bzImage"
16#
17# QB_DEFAULT_FSTYPE: default FSTYPE to boot, e.g., "ext4"
18#
19# QB_MEM: memory, e.g., "-m 512"
20#
21# QB_MACHINE: qemu machine, e.g., "-machine virt"
22#
23# QB_CPU: qemu cpu, e.g., "-cpu qemu32"
24#
25# QB_CPU_KVM: the similar to QB_CPU, but used when kvm, e.g., '-cpu kvm64',
26# set it when support kvm.
27#
28# QB_SMP: amount of CPU cores inside qemu guest, each mapped to a thread on the host,
29# e.g. "-smp 8".
30#
31# QB_KERNEL_CMDLINE_APPEND: options to append to kernel's -append
32# option, e.g., "console=ttyS0 console=tty"
33#
34# QB_DTB: qemu dtb name
35#
36# QB_AUDIO_DRV: qemu audio driver, e.g., "alsa", set it when support audio
37#
38# QB_AUDIO_OPT: qemu audio option, e.g., "-device AC97", used
39# when QB_AUDIO_DRV is set.
40#
41# QB_RNG: Pass-through for host random number generator, it can speedup boot
42# in system mode, where system is experiencing entropy starvation
43#
44# QB_KERNEL_ROOT: kernel's root, e.g., /dev/vda
45# By default "/dev/vda rw" gets passed to the kernel.
46# To mount the rootfs read-only QB_KERNEL_ROOT can be set to e.g. "/dev/vda ro".
47#
48# QB_NETWORK_DEVICE: network device, e.g., "-device virtio-net-pci,netdev=net0,mac=@MAC@",
49# it needs work with QB_TAP_OPT and QB_SLIRP_OPT.
50# Note, runqemu will replace @MAC@ with a predefined mac, you can set
51# a custom one, but that may cause conflicts when multiple qemus are
52# running on the same host.
53# Note: If more than one interface of type -device virtio-net-device gets added,
54# QB_NETWORK_DEVICE:prepend might be used, since Qemu enumerates the eth*
55# devices in reverse order to -device arguments.
56#
57# QB_TAP_OPT: network option for 'tap' mode, e.g.,
58# "-netdev tap,id=net0,ifname=@TAP@,script=no,downscript=no"
59# Note, runqemu will replace "@TAP@" with the one which is used, such as tap0, tap1 ...
60#
61# QB_SLIRP_OPT: network option for SLIRP mode, e.g., -netdev user,id=net0"
62#
63# QB_CMDLINE_IP_SLIRP: If QB_NETWORK_DEVICE adds more than one network interface to qemu, usually the
64# ip= kernel comand line argument needs to be changed accordingly. Details are documented
65# in the kernel docuemntation https://www.kernel.org/doc/Documentation/filesystems/nfs/nfsroot.txt
66# Example to configure only the first interface: "ip=eth0:dhcp"
67# QB_CMDLINE_IP_TAP: This parameter is similar to the QB_CMDLINE_IP_SLIRP parameter. Since the tap interface requires
68# static IP configuration @CLIENT@ and @GATEWAY@ place holders are replaced by the IP and the gateway
69# address of the qemu guest by runqemu.
70# Example: "ip=192.168.7.@CLIENT@::192.168.7.@GATEWAY@:255.255.255.0::eth0"
71#
72# QB_ROOTFS_OPT: used as rootfs, e.g.,
73# "-drive id=disk0,file=@ROOTFS@,if=none,format=raw -device virtio-blk-device,drive=disk0"
74# Note, runqemu will replace "@ROOTFS@" with the one which is used, such as core-image-minimal-qemuarm64.ext4.
75#
76# QB_SERIAL_OPT: serial port, e.g., "-serial mon:stdio"
77#
78# QB_TCPSERIAL_OPT: tcp serial port option, e.g.,
79# " -device virtio-serial-device -chardev socket,id=virtcon,port=@PORT@,host=127.0.0.1 -device virtconsole,chardev=virtcon"
80# Note, runqemu will replace "@PORT@" with the port number which is used.
81#
82# QB_ROOTFS_EXTRA_OPT: extra options to be appended to the rootfs device in case there is none specified by QB_ROOTFS_OPT.
83# Can be used to automatically determine the image from the other variables
84# but define things link 'bootindex' when booting from EFI or 'readonly' when using squashfs
85# without the need to specify a dedicated qemu configuration
86#
87# QB_GRAPHICS: QEMU video card type (e.g. "-vga std")
88#
89# Usage:
90# IMAGE_CLASSES += "qemuboot"
91# See "runqemu help" for more info
92
93QB_MEM ?= "-m 256"
94QB_SMP ?= ""
95QB_SERIAL_OPT ?= "-serial mon:stdio -serial null"
96QB_DEFAULT_KERNEL ?= "${KERNEL_IMAGETYPE}"
97QB_DEFAULT_FSTYPE ?= "ext4"
98QB_RNG ?= "-object rng-random,filename=/dev/urandom,id=rng0 -device virtio-rng-pci,rng=rng0"
99QB_OPT_APPEND ?= ""
100QB_NETWORK_DEVICE ?= "-device virtio-net-pci,netdev=net0,mac=@MAC@"
101QB_CMDLINE_IP_SLIRP ?= "ip=dhcp"
102QB_CMDLINE_IP_TAP ?= "ip=192.168.7.@CLIENT@::192.168.7.@GATEWAY@:255.255.255.0::eth0:off:8.8.8.8"
103QB_ROOTFS_EXTRA_OPT ?= ""
104QB_GRAPHICS ?= ""
105
106# This should be kept align with ROOT_VM
107QB_DRIVE_TYPE ?= "/dev/sd"
108
109inherit image-artifact-names
110
111# Create qemuboot.conf
112addtask do_write_qemuboot_conf after do_rootfs before do_image
113
114def qemuboot_vars(d):
115 build_vars = ['MACHINE', 'TUNE_ARCH', 'DEPLOY_DIR_IMAGE',
116 'KERNEL_IMAGETYPE', 'IMAGE_NAME', 'IMAGE_LINK_NAME',
117 'STAGING_DIR_NATIVE', 'STAGING_BINDIR_NATIVE',
118 'STAGING_DIR_HOST', 'SERIAL_CONSOLES', 'UNINATIVE_LOADER']
119 return build_vars + [k for k in d.keys() if k.startswith('QB_')]
120
121do_write_qemuboot_conf[vardeps] += "${@' '.join(qemuboot_vars(d))}"
122do_write_qemuboot_conf[vardepsexclude] += "TOPDIR"
123python do_write_qemuboot_conf() {
124 import configparser
125
126 qemuboot = "%s/%s.qemuboot.conf" % (d.getVar('IMGDEPLOYDIR'), d.getVar('IMAGE_NAME'))
127 if d.getVar('IMAGE_LINK_NAME'):
128 qemuboot_link = "%s/%s.qemuboot.conf" % (d.getVar('IMGDEPLOYDIR'), d.getVar('IMAGE_LINK_NAME'))
129 else:
130 qemuboot_link = ""
131 finalpath = d.getVar("DEPLOY_DIR_IMAGE")
132 topdir = d.getVar('TOPDIR')
133 cf = configparser.ConfigParser()
134 cf.add_section('config_bsp')
135 for k in sorted(qemuboot_vars(d)):
136 if ":" in k:
137 continue
138 # qemu-helper-native sysroot is not removed by rm_work and
139 # contains all tools required by runqemu
140 if k == 'STAGING_BINDIR_NATIVE':
141 val = os.path.join(d.getVar('BASE_WORKDIR'), d.getVar('BUILD_SYS'),
142 'qemu-helper-native/1.0-r1/recipe-sysroot-native/usr/bin/')
143 else:
144 val = d.getVar(k)
145 if val is None:
146 continue
147 # we only want to write out relative paths so that we can relocate images
148 # and still run them
149 if val.startswith(topdir):
150 val = os.path.relpath(val, finalpath)
151 cf.set('config_bsp', k, '%s' % val)
152
153 # QB_DEFAULT_KERNEL's value of KERNEL_IMAGETYPE is the name of a symlink
154 # to the kernel file, which hinders relocatability of the qb conf.
155 # Read the link and replace it with the full filename of the target.
156 kernel_link = os.path.join(d.getVar('DEPLOY_DIR_IMAGE'), d.getVar('QB_DEFAULT_KERNEL'))
157 kernel = os.path.realpath(kernel_link)
158 # we only want to write out relative paths so that we can relocate images
159 # and still run them
160 kernel = os.path.relpath(kernel, finalpath)
161 cf.set('config_bsp', 'QB_DEFAULT_KERNEL', kernel)
162
163 bb.utils.mkdirhier(os.path.dirname(qemuboot))
164 with open(qemuboot, 'w') as f:
165 cf.write(f)
166
167 if qemuboot_link and qemuboot_link != qemuboot:
168 if os.path.lexists(qemuboot_link):
169 os.remove(qemuboot_link)
170 os.symlink(os.path.basename(qemuboot), qemuboot_link)
171}
diff --git a/meta/classes-recipe/rootfs-postcommands.bbclass b/meta/classes-recipe/rootfs-postcommands.bbclass
new file mode 100644
index 0000000000..d40adf5f0e
--- /dev/null
+++ b/meta/classes-recipe/rootfs-postcommands.bbclass
@@ -0,0 +1,440 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# Zap the root password if debug-tweaks and empty-root-password features are not enabled
8ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'empty-root-password' ], "", "zap_empty_root_password; ",d)}'
9
10# Allow dropbear/openssh to accept logins from accounts with an empty password string if debug-tweaks or allow-empty-password is enabled
11ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'allow-empty-password' ], "ssh_allow_empty_password; ", "",d)}'
12
13# Allow dropbear/openssh to accept root logins if debug-tweaks or allow-root-login is enabled
14ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'allow-root-login' ], "ssh_allow_root_login; ", "",d)}'
15
16# Enable postinst logging if debug-tweaks or post-install-logging is enabled
17ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'post-install-logging' ], "postinst_enable_logging; ", "",d)}'
18
19# Create /etc/timestamp during image construction to give a reasonably sane default time setting
20ROOTFS_POSTPROCESS_COMMAND += "rootfs_update_timestamp; "
21
22# Tweak the mount options for rootfs in /etc/fstab if read-only-rootfs is enabled
23ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs", "read_only_rootfs_hook; ", "",d)}'
24
25# We also need to do the same for the kernel boot parameters,
26# otherwise kernel or initramfs end up mounting the rootfs read/write
27# (the default) if supported by the underlying storage.
28#
29# We do this with :append because the default value might get set later with ?=
30# and we don't want to disable such a default that by setting a value here.
31APPEND:append = '${@bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs", " ro", "", d)}'
32
33# Generates test data file with data store variables expanded in json format
34ROOTFS_POSTPROCESS_COMMAND += "write_image_test_data; "
35
36# Write manifest
37IMAGE_MANIFEST = "${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.manifest"
38ROOTFS_POSTUNINSTALL_COMMAND =+ "write_image_manifest ; "
39# Set default postinst log file
40POSTINST_LOGFILE ?= "${localstatedir}/log/postinstall.log"
41# Set default target for systemd images
42SYSTEMD_DEFAULT_TARGET ?= '${@bb.utils.contains_any("IMAGE_FEATURES", [ "x11-base", "weston" ], "graphical.target", "multi-user.target", d)}'
43ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("DISTRO_FEATURES", "systemd", "set_systemd_default_target; systemd_create_users;", "", d)}'
44
45ROOTFS_POSTPROCESS_COMMAND += 'empty_var_volatile;'
46
47ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("DISTRO_FEATURES", "overlayfs", "overlayfs_qa_check; overlayfs_postprocess;", "", d)}'
48
49inherit image-artifact-names
50
51# Sort the user and group entries in /etc by ID in order to make the content
52# deterministic. Package installs are not deterministic, causing the ordering
53# of entries to change between builds. In case that this isn't desired,
54# the command can be overridden.
55#
56# Note that useradd-staticids.bbclass has to be used to ensure that
57# the numeric IDs of dynamically created entries remain stable.
58#
59# We want this to run as late as possible, in particular after
60# systemd_sysusers_create and set_user_group. Using :append is not
61# enough for that, set_user_group is added that way and would end
62# up running after us.
63SORT_PASSWD_POSTPROCESS_COMMAND ??= " sort_passwd; "
64python () {
65 d.appendVar('ROOTFS_POSTPROCESS_COMMAND', '${SORT_PASSWD_POSTPROCESS_COMMAND}')
66 d.appendVar('ROOTFS_POSTPROCESS_COMMAND', 'rootfs_reproducible;')
67}
68
69systemd_create_users () {
70 for conffile in ${IMAGE_ROOTFS}/usr/lib/sysusers.d/*.conf; do
71 [ -e $conffile ] || continue
72 grep -v "^#" $conffile | sed -e '/^$/d' | while read type name id comment; do
73 if [ "$type" = "u" ]; then
74 useradd_params="--shell /sbin/nologin"
75 [ "$id" != "-" ] && useradd_params="$useradd_params --uid $id"
76 [ "$comment" != "-" ] && useradd_params="$useradd_params --comment $comment"
77 useradd_params="$useradd_params --system $name"
78 eval useradd --root ${IMAGE_ROOTFS} $useradd_params || true
79 elif [ "$type" = "g" ]; then
80 groupadd_params=""
81 [ "$id" != "-" ] && groupadd_params="$groupadd_params --gid $id"
82 groupadd_params="$groupadd_params --system $name"
83 eval groupadd --root ${IMAGE_ROOTFS} $groupadd_params || true
84 elif [ "$type" = "m" ]; then
85 group=$id
86 eval groupadd --root ${IMAGE_ROOTFS} --system $group || true
87 eval useradd --root ${IMAGE_ROOTFS} --shell /sbin/nologin --system $name --no-user-group || true
88 eval usermod --root ${IMAGE_ROOTFS} -a -G $group $name
89 fi
90 done
91 done
92}
93
94#
95# A hook function to support read-only-rootfs IMAGE_FEATURES
96#
97read_only_rootfs_hook () {
98 # Tweak the mount option and fs_passno for rootfs in fstab
99 if [ -f ${IMAGE_ROOTFS}/etc/fstab ]; then
100 sed -i -e '/^[#[:space:]]*\/dev\/root/{s/defaults/ro/;s/\([[:space:]]*[[:digit:]]\)\([[:space:]]*\)[[:digit:]]$/\1\20/}' ${IMAGE_ROOTFS}/etc/fstab
101 fi
102
103 # Tweak the "mount -o remount,rw /" command in busybox-inittab inittab
104 if [ -f ${IMAGE_ROOTFS}/etc/inittab ]; then
105 sed -i 's|/bin/mount -o remount,rw /|/bin/mount -o remount,ro /|' ${IMAGE_ROOTFS}/etc/inittab
106 fi
107
108 # If we're using openssh and the /etc/ssh directory has no pre-generated keys,
109 # we should configure openssh to use the configuration file /etc/ssh/sshd_config_readonly
110 # and the keys under /var/run/ssh.
111 if [ -d ${IMAGE_ROOTFS}/etc/ssh ]; then
112 if [ -e ${IMAGE_ROOTFS}/etc/ssh/ssh_host_rsa_key ]; then
113 echo "SYSCONFDIR=\${SYSCONFDIR:-/etc/ssh}" >> ${IMAGE_ROOTFS}/etc/default/ssh
114 echo "SSHD_OPTS=" >> ${IMAGE_ROOTFS}/etc/default/ssh
115 else
116 echo "SYSCONFDIR=\${SYSCONFDIR:-/var/run/ssh}" >> ${IMAGE_ROOTFS}/etc/default/ssh
117 echo "SSHD_OPTS='-f /etc/ssh/sshd_config_readonly'" >> ${IMAGE_ROOTFS}/etc/default/ssh
118 fi
119 fi
120
121 # Also tweak the key location for dropbear in the same way.
122 if [ -d ${IMAGE_ROOTFS}/etc/dropbear ]; then
123 if [ ! -e ${IMAGE_ROOTFS}/etc/dropbear/dropbear_rsa_host_key ]; then
124 echo "DROPBEAR_RSAKEY_DIR=/var/lib/dropbear" >> ${IMAGE_ROOTFS}/etc/default/dropbear
125 fi
126 fi
127
128 if ${@bb.utils.contains("DISTRO_FEATURES", "sysvinit", "true", "false", d)}; then
129 # Change the value of ROOTFS_READ_ONLY in /etc/default/rcS to yes
130 if [ -e ${IMAGE_ROOTFS}/etc/default/rcS ]; then
131 sed -i 's/ROOTFS_READ_ONLY=no/ROOTFS_READ_ONLY=yes/' ${IMAGE_ROOTFS}/etc/default/rcS
132 fi
133 # Run populate-volatile.sh at rootfs time to set up basic files
134 # and directories to support read-only rootfs.
135 if [ -x ${IMAGE_ROOTFS}/etc/init.d/populate-volatile.sh ]; then
136 ${IMAGE_ROOTFS}/etc/init.d/populate-volatile.sh
137 fi
138 fi
139
140 if ${@bb.utils.contains("DISTRO_FEATURES", "systemd", "true", "false", d)}; then
141 # Create machine-id
142 # 20:12 < mezcalero> koen: you have three options: a) run systemd-machine-id-setup at install time, b) have / read-only and an empty file there (for stateless) and c) boot with / writable
143 touch ${IMAGE_ROOTFS}${sysconfdir}/machine-id
144 fi
145}
146
147#
148# This function disallows empty root passwords
149#
150zap_empty_root_password () {
151 if [ -e ${IMAGE_ROOTFS}/etc/shadow ]; then
152 sed -i 's%^root::%root:*:%' ${IMAGE_ROOTFS}/etc/shadow
153 fi
154 if [ -e ${IMAGE_ROOTFS}/etc/passwd ]; then
155 sed -i 's%^root::%root:*:%' ${IMAGE_ROOTFS}/etc/passwd
156 fi
157}
158
159#
160# allow dropbear/openssh to accept logins from accounts with an empty password string
161#
162ssh_allow_empty_password () {
163 for config in sshd_config sshd_config_readonly; do
164 if [ -e ${IMAGE_ROOTFS}${sysconfdir}/ssh/$config ]; then
165 sed -i 's/^[#[:space:]]*PermitEmptyPasswords.*/PermitEmptyPasswords yes/' ${IMAGE_ROOTFS}${sysconfdir}/ssh/$config
166 fi
167 done
168
169 if [ -e ${IMAGE_ROOTFS}${sbindir}/dropbear ] ; then
170 if grep -q DROPBEAR_EXTRA_ARGS ${IMAGE_ROOTFS}${sysconfdir}/default/dropbear 2>/dev/null ; then
171 if ! grep -q "DROPBEAR_EXTRA_ARGS=.*-B" ${IMAGE_ROOTFS}${sysconfdir}/default/dropbear ; then
172 sed -i 's/^DROPBEAR_EXTRA_ARGS="*\([^"]*\)"*/DROPBEAR_EXTRA_ARGS="\1 -B"/' ${IMAGE_ROOTFS}${sysconfdir}/default/dropbear
173 fi
174 else
175 printf '\nDROPBEAR_EXTRA_ARGS="-B"\n' >> ${IMAGE_ROOTFS}${sysconfdir}/default/dropbear
176 fi
177 fi
178
179 if [ -d ${IMAGE_ROOTFS}${sysconfdir}/pam.d ] ; then
180 for f in `find ${IMAGE_ROOTFS}${sysconfdir}/pam.d/* -type f -exec test -e {} \; -print`
181 do
182 sed -i 's/nullok_secure/nullok/' $f
183 done
184 fi
185}
186
187#
188# allow dropbear/openssh to accept root logins
189#
190ssh_allow_root_login () {
191 for config in sshd_config sshd_config_readonly; do
192 if [ -e ${IMAGE_ROOTFS}${sysconfdir}/ssh/$config ]; then
193 sed -i 's/^[#[:space:]]*PermitRootLogin.*/PermitRootLogin yes/' ${IMAGE_ROOTFS}${sysconfdir}/ssh/$config
194 fi
195 done
196
197 if [ -e ${IMAGE_ROOTFS}${sbindir}/dropbear ] ; then
198 if grep -q DROPBEAR_EXTRA_ARGS ${IMAGE_ROOTFS}${sysconfdir}/default/dropbear 2>/dev/null ; then
199 sed -i '/^DROPBEAR_EXTRA_ARGS=/ s/-w//' ${IMAGE_ROOTFS}${sysconfdir}/default/dropbear
200 fi
201 fi
202}
203
204python sort_passwd () {
205 import rootfspostcommands
206 rootfspostcommands.sort_passwd(d.expand('${IMAGE_ROOTFS}${sysconfdir}'))
207}
208
209#
210# Enable postinst logging
211#
212postinst_enable_logging () {
213 mkdir -p ${IMAGE_ROOTFS}${sysconfdir}/default
214 echo "POSTINST_LOGGING=1" >> ${IMAGE_ROOTFS}${sysconfdir}/default/postinst
215 echo "LOGFILE=${POSTINST_LOGFILE}" >> ${IMAGE_ROOTFS}${sysconfdir}/default/postinst
216}
217
218#
219# Modify systemd default target
220#
221set_systemd_default_target () {
222 if [ -d ${IMAGE_ROOTFS}${sysconfdir}/systemd/system -a -e ${IMAGE_ROOTFS}${systemd_system_unitdir}/${SYSTEMD_DEFAULT_TARGET} ]; then
223 ln -sf ${systemd_system_unitdir}/${SYSTEMD_DEFAULT_TARGET} ${IMAGE_ROOTFS}${sysconfdir}/systemd/system/default.target
224 fi
225}
226
227# If /var/volatile is not empty, we have seen problems where programs such as the
228# journal make assumptions based on the contents of /var/volatile. The journal
229# would then write to /var/volatile before it was mounted, thus hiding the
230# items previously written.
231#
232# This change is to attempt to fix those types of issues in a way that doesn't
233# affect users that may not be using /var/volatile.
234empty_var_volatile () {
235 if [ -e ${IMAGE_ROOTFS}/etc/fstab ]; then
236 match=`awk '$1 !~ "#" && $2 ~ /\/var\/volatile/{print $2}' ${IMAGE_ROOTFS}/etc/fstab 2> /dev/null`
237 if [ -n "$match" ]; then
238 find ${IMAGE_ROOTFS}/var/volatile -mindepth 1 -delete
239 fi
240 fi
241}
242
243# Turn any symbolic /sbin/init link into a file
244remove_init_link () {
245 if [ -h ${IMAGE_ROOTFS}/sbin/init ]; then
246 LINKFILE=${IMAGE_ROOTFS}`readlink ${IMAGE_ROOTFS}/sbin/init`
247 rm ${IMAGE_ROOTFS}/sbin/init
248 cp $LINKFILE ${IMAGE_ROOTFS}/sbin/init
249 fi
250}
251
252make_zimage_symlink_relative () {
253 if [ -L ${IMAGE_ROOTFS}/boot/zImage ]; then
254 (cd ${IMAGE_ROOTFS}/boot/ && for i in `ls zImage-* | sort`; do ln -sf $i zImage; done)
255 fi
256}
257
258python write_image_manifest () {
259 from oe.rootfs import image_list_installed_packages
260 from oe.utils import format_pkg_list
261
262 deploy_dir = d.getVar('IMGDEPLOYDIR')
263 link_name = d.getVar('IMAGE_LINK_NAME')
264 manifest_name = d.getVar('IMAGE_MANIFEST')
265
266 if not manifest_name:
267 return
268
269 pkgs = image_list_installed_packages(d)
270 with open(manifest_name, 'w+') as image_manifest:
271 image_manifest.write(format_pkg_list(pkgs, "ver"))
272
273 if os.path.exists(manifest_name) and link_name:
274 manifest_link = deploy_dir + "/" + link_name + ".manifest"
275 if manifest_link != manifest_name:
276 if os.path.lexists(manifest_link):
277 os.remove(manifest_link)
278 os.symlink(os.path.basename(manifest_name), manifest_link)
279}
280
281# Can be used to create /etc/timestamp during image construction to give a reasonably
282# sane default time setting
283rootfs_update_timestamp () {
284 if [ "${REPRODUCIBLE_TIMESTAMP_ROOTFS}" != "" ]; then
285 # Convert UTC into %4Y%2m%2d%2H%2M%2S
286 sformatted=`date -u -d @${REPRODUCIBLE_TIMESTAMP_ROOTFS} +%4Y%2m%2d%2H%2M%2S`
287 else
288 sformatted=`date -u +%4Y%2m%2d%2H%2M%2S`
289 fi
290 echo $sformatted > ${IMAGE_ROOTFS}/etc/timestamp
291 bbnote "rootfs_update_timestamp: set /etc/timestamp to $sformatted"
292}
293
294# Prevent X from being started
295rootfs_no_x_startup () {
296 if [ -f ${IMAGE_ROOTFS}/etc/init.d/xserver-nodm ]; then
297 chmod a-x ${IMAGE_ROOTFS}/etc/init.d/xserver-nodm
298 fi
299}
300
301rootfs_trim_schemas () {
302 for schema in ${IMAGE_ROOTFS}/etc/gconf/schemas/*.schemas
303 do
304 # Need this in case no files exist
305 if [ -e $schema ]; then
306 oe-trim-schemas $schema > $schema.new
307 mv $schema.new $schema
308 fi
309 done
310}
311
312rootfs_check_host_user_contaminated () {
313 contaminated="${S}/host-user-contaminated.txt"
314 HOST_USER_UID="$(PSEUDO_UNLOAD=1 id -u)"
315 HOST_USER_GID="$(PSEUDO_UNLOAD=1 id -g)"
316
317 find "${IMAGE_ROOTFS}" -path "${IMAGE_ROOTFS}/home" -prune -o \
318 -user "$HOST_USER_UID" -print -o -group "$HOST_USER_GID" -print >"$contaminated"
319
320 sed -e "s,${IMAGE_ROOTFS},," $contaminated | while read line; do
321 bbwarn "Path in the rootfs is owned by the same user or group as the user running bitbake:" $line `ls -lan ${IMAGE_ROOTFS}/$line`
322 done
323
324 if [ -s "$contaminated" ]; then
325 bbwarn "/etc/passwd:" `cat ${IMAGE_ROOTFS}/etc/passwd`
326 bbwarn "/etc/group:" `cat ${IMAGE_ROOTFS}/etc/group`
327 fi
328}
329
330# Make any absolute links in a sysroot relative
331rootfs_sysroot_relativelinks () {
332 sysroot-relativelinks.py ${SDK_OUTPUT}/${SDKTARGETSYSROOT}
333}
334
335# Generated test data json file
336python write_image_test_data() {
337 from oe.data import export2json
338
339 deploy_dir = d.getVar('IMGDEPLOYDIR')
340 link_name = d.getVar('IMAGE_LINK_NAME')
341 testdata_name = os.path.join(deploy_dir, "%s.testdata.json" % d.getVar('IMAGE_NAME'))
342
343 searchString = "%s/"%(d.getVar("TOPDIR")).replace("//","/")
344 export2json(d, testdata_name, searchString=searchString, replaceString="")
345
346 if os.path.exists(testdata_name) and link_name:
347 testdata_link = os.path.join(deploy_dir, "%s.testdata.json" % link_name)
348 if testdata_link != testdata_name:
349 if os.path.lexists(testdata_link):
350 os.remove(testdata_link)
351 os.symlink(os.path.basename(testdata_name), testdata_link)
352}
353write_image_test_data[vardepsexclude] += "TOPDIR"
354
355# Check for unsatisfied recommendations (RRECOMMENDS)
356python rootfs_log_check_recommends() {
357 log_path = d.expand("${T}/log.do_rootfs")
358 with open(log_path, 'r') as log:
359 for line in log:
360 if 'log_check' in line:
361 continue
362
363 if 'unsatisfied recommendation for' in line:
364 bb.warn('[log_check] %s: %s' % (d.getVar('PN'), line))
365}
366
367# Perform any additional adjustments needed to make rootf binary reproducible
368rootfs_reproducible () {
369 if [ "${REPRODUCIBLE_TIMESTAMP_ROOTFS}" != "" ]; then
370 # Convert UTC into %4Y%2m%2d%2H%2M%2S
371 sformatted=`date -u -d @${REPRODUCIBLE_TIMESTAMP_ROOTFS} +%4Y%2m%2d%2H%2M%2S`
372 echo $sformatted > ${IMAGE_ROOTFS}/etc/version
373 bbnote "rootfs_reproducible: set /etc/version to $sformatted"
374
375 if [ -d ${IMAGE_ROOTFS}${sysconfdir}/gconf ]; then
376 find ${IMAGE_ROOTFS}${sysconfdir}/gconf -name '%gconf.xml' -print0 | xargs -0r \
377 sed -i -e 's@\bmtime="[0-9][0-9]*"@mtime="'${REPRODUCIBLE_TIMESTAMP_ROOTFS}'"@g'
378 fi
379 fi
380}
381
382# Perform a dumb check for unit existence, not its validity
383python overlayfs_qa_check() {
384 from oe.overlayfs import mountUnitName
385
386 overlayMountPoints = d.getVarFlags("OVERLAYFS_MOUNT_POINT") or {}
387 imagepath = d.getVar("IMAGE_ROOTFS")
388 sysconfdir = d.getVar("sysconfdir")
389 searchpaths = [oe.path.join(imagepath, sysconfdir, "systemd", "system"),
390 oe.path.join(imagepath, d.getVar("systemd_system_unitdir"))]
391 fstabpath = oe.path.join(imagepath, sysconfdir, "fstab")
392
393 if not any(os.path.exists(path) for path in [*searchpaths, fstabpath]):
394 return
395
396 fstabDevices = []
397 if os.path.isfile(fstabpath):
398 with open(fstabpath, 'r') as f:
399 for line in f:
400 if line[0] == '#':
401 continue
402 path = line.split(maxsplit=2)
403 if len(path) > 2:
404 fstabDevices.append(path[1])
405
406 allUnitExist = True;
407 for mountPoint in overlayMountPoints:
408 qaSkip = (d.getVarFlag("OVERLAYFS_QA_SKIP", mountPoint) or "").split()
409 if "mount-configured" in qaSkip:
410 continue
411
412 mountPath = d.getVarFlag('OVERLAYFS_MOUNT_POINT', mountPoint)
413 if mountPath in fstabDevices:
414 continue
415
416 mountUnit = mountUnitName(mountPath)
417 if any(os.path.isfile(oe.path.join(dirpath, mountUnit))
418 for dirpath in searchpaths):
419 continue
420
421 bb.warn(f'Mount path {mountPath} not found in fstab and unit '
422 f'{mountUnit} not found in systemd unit directories.')
423 bb.warn(f'Skip this check by setting OVERLAYFS_QA_SKIP[{mountPoint}] = '
424 '"mount-configured"')
425 allUnitExist = False;
426
427 if not allUnitExist:
428 bb.fatal('Not all mount paths and units are installed in the image')
429}
430
431python overlayfs_postprocess() {
432 import shutil
433
434 # install helper script
435 helperScriptName = "overlayfs-create-dirs.sh"
436 helperScriptSource = oe.path.join(d.getVar("COREBASE"), "meta/files", helperScriptName)
437 helperScriptDest = oe.path.join(d.getVar("IMAGE_ROOTFS"), "/usr/sbin/", helperScriptName)
438 shutil.copyfile(helperScriptSource, helperScriptDest)
439 os.chmod(helperScriptDest, 0o755)
440}
diff --git a/meta/classes-recipe/rootfs_deb.bbclass b/meta/classes-recipe/rootfs_deb.bbclass
new file mode 100644
index 0000000000..c5c6426abb
--- /dev/null
+++ b/meta/classes-recipe/rootfs_deb.bbclass
@@ -0,0 +1,41 @@
1#
2# Copyright 2006-2007 Openedhand Ltd.
3#
4# SPDX-License-Identifier: MIT
5#
6
7ROOTFS_PKGMANAGE = "dpkg apt"
8
9do_rootfs[depends] += "dpkg-native:do_populate_sysroot apt-native:do_populate_sysroot"
10do_populate_sdk[depends] += "dpkg-native:do_populate_sysroot apt-native:do_populate_sysroot bzip2-native:do_populate_sysroot"
11do_rootfs[recrdeptask] += "do_package_write_deb do_package_qa"
12do_rootfs[vardeps] += "PACKAGE_FEED_URIS PACKAGE_FEED_BASE_PATHS PACKAGE_FEED_ARCHS"
13
14do_rootfs[lockfiles] += "${DEPLOY_DIR_DEB}/deb.lock"
15do_populate_sdk[lockfiles] += "${DEPLOY_DIR_DEB}/deb.lock"
16do_populate_sdk_ext[lockfiles] += "${DEPLOY_DIR_DEB}/deb.lock"
17
18python rootfs_deb_bad_recommendations() {
19 if d.getVar("BAD_RECOMMENDATIONS"):
20 bb.warn("Debian package install does not support BAD_RECOMMENDATIONS")
21}
22do_rootfs[prefuncs] += "rootfs_deb_bad_recommendations"
23
24DEB_POSTPROCESS_COMMANDS = ""
25
26opkglibdir = "${localstatedir}/lib/opkg"
27
28python () {
29 # Map TARGET_ARCH to Debian's ideas about architectures
30 darch = d.getVar('SDK_ARCH')
31 if darch in ["x86", "i486", "i586", "i686", "pentium"]:
32 d.setVar('DEB_SDK_ARCH', 'i386')
33 elif darch == "x86_64":
34 d.setVar('DEB_SDK_ARCH', 'amd64')
35 elif darch == "arm":
36 d.setVar('DEB_SDK_ARCH', 'armel')
37 elif darch == "aarch64":
38 d.setVar('DEB_SDK_ARCH', 'arm64')
39 else:
40 bb.fatal("Unhandled SDK_ARCH %s" % darch)
41}
diff --git a/meta/classes-recipe/rootfs_ipk.bbclass b/meta/classes-recipe/rootfs_ipk.bbclass
new file mode 100644
index 0000000000..a48ad07dfc
--- /dev/null
+++ b/meta/classes-recipe/rootfs_ipk.bbclass
@@ -0,0 +1,44 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7#
8# Creates a root filesystem out of IPKs
9#
10# This rootfs can be mounted via root-nfs or it can be put into an cramfs/jffs etc.
11# See image.bbclass for a usage of this.
12#
13
14EXTRAOPKGCONFIG ?= ""
15ROOTFS_PKGMANAGE = "opkg ${EXTRAOPKGCONFIG}"
16
17do_rootfs[depends] += "opkg-native:do_populate_sysroot opkg-utils-native:do_populate_sysroot"
18do_populate_sdk[depends] += "opkg-native:do_populate_sysroot opkg-utils-native:do_populate_sysroot"
19do_rootfs[recrdeptask] += "do_package_write_ipk do_package_qa"
20do_rootfs[vardeps] += "PACKAGE_FEED_URIS PACKAGE_FEED_BASE_PATHS PACKAGE_FEED_ARCHS"
21
22do_rootfs[lockfiles] += "${WORKDIR}/ipk.lock"
23do_populate_sdk[lockfiles] += "${WORKDIR}/sdk-ipk.lock"
24do_populate_sdk_ext[lockfiles] += "${WORKDIR}/sdk-ipk.lock"
25
26OPKG_PREPROCESS_COMMANDS = ""
27
28OPKG_POSTPROCESS_COMMANDS = ""
29
30OPKGLIBDIR ??= "${localstatedir}/lib"
31
32MULTILIBRE_ALLOW_REP = "${OPKGLIBDIR}/opkg|/usr/lib/opkg"
33
34python () {
35
36 if d.getVar('BUILD_IMAGES_FROM_FEEDS'):
37 flags = d.getVarFlag('do_rootfs', 'recrdeptask')
38 flags = flags.replace("do_package_write_ipk", "")
39 flags = flags.replace("do_deploy", "")
40 flags = flags.replace("do_populate_sysroot", "")
41 d.setVarFlag('do_rootfs', 'recrdeptask', flags)
42 d.setVar('OPKG_PREPROCESS_COMMANDS', "")
43 d.setVar('OPKG_POSTPROCESS_COMMANDS', '')
44}
diff --git a/meta/classes-recipe/rootfs_rpm.bbclass b/meta/classes-recipe/rootfs_rpm.bbclass
new file mode 100644
index 0000000000..6eccd5a959
--- /dev/null
+++ b/meta/classes-recipe/rootfs_rpm.bbclass
@@ -0,0 +1,45 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7#
8# Creates a root filesystem out of rpm packages
9#
10
11ROOTFS_PKGMANAGE = "rpm dnf"
12
13# dnf is using our custom sysconfig module, and so will fail without these
14export STAGING_INCDIR
15export STAGING_LIBDIR
16
17# Add 100Meg of extra space for dnf
18IMAGE_ROOTFS_EXTRA_SPACE:append = "${@bb.utils.contains("PACKAGE_INSTALL", "dnf", " + 102400", "", d)}"
19
20# Dnf is python based, so be sure python3-native is available to us.
21EXTRANATIVEPATH += "python3-native"
22
23# opkg is needed for update-alternatives
24RPMROOTFSDEPENDS = "rpm-native:do_populate_sysroot \
25 dnf-native:do_populate_sysroot \
26 createrepo-c-native:do_populate_sysroot \
27 opkg-native:do_populate_sysroot"
28
29do_rootfs[depends] += "${RPMROOTFSDEPENDS}"
30do_populate_sdk[depends] += "${RPMROOTFSDEPENDS}"
31
32do_rootfs[recrdeptask] += "do_package_write_rpm do_package_qa"
33do_rootfs[vardeps] += "PACKAGE_FEED_URIS PACKAGE_FEED_BASE_PATHS PACKAGE_FEED_ARCHS"
34
35python () {
36 if d.getVar('BUILD_IMAGES_FROM_FEEDS'):
37 flags = d.getVarFlag('do_rootfs', 'recrdeptask')
38 flags = flags.replace("do_package_write_rpm", "")
39 flags = flags.replace("do_deploy", "")
40 flags = flags.replace("do_populate_sysroot", "")
41 d.setVarFlag('do_rootfs', 'recrdeptask', flags)
42 d.setVar('RPM_PREPROCESS_COMMANDS', '')
43 d.setVar('RPM_POSTPROCESS_COMMANDS', '')
44
45}
diff --git a/meta/classes-recipe/rootfsdebugfiles.bbclass b/meta/classes-recipe/rootfsdebugfiles.bbclass
new file mode 100644
index 0000000000..cbcf876479
--- /dev/null
+++ b/meta/classes-recipe/rootfsdebugfiles.bbclass
@@ -0,0 +1,47 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# This class installs additional files found on the build host
8# directly into the rootfs.
9#
10# One use case is to install a constant ssh host key in
11# an image that gets created for just one machine. This
12# solves two issues:
13# - host key generation on the device can stall when the
14# kernel has not gathered enough entropy yet (seen in practice
15# under qemu)
16# - ssh complains by default when the host key changes
17#
18# For dropbear, with the ssh host key store along side the local.conf:
19# 1. Extend local.conf:
20# INHERIT += "rootfsdebugfiles"
21# ROOTFS_DEBUG_FILES += "${TOPDIR}/conf/dropbear_rsa_host_key ${IMAGE_ROOTFS}/etc/dropbear/dropbear_rsa_host_key ;"
22# 2. Boot the image once, copy the dropbear_rsa_host_key from
23# the device into your build conf directory.
24# 3. A optional parameter can be used to set file mode
25# of the copied target, for instance:
26# ROOTFS_DEBUG_FILES += "${TOPDIR}/conf/dropbear_rsa_host_key ${IMAGE_ROOTFS}/etc/dropbear/dropbear_rsa_host_key 0600;"
27# in case they might be required to have a specific mode. (Shoundn't be too open, for example)
28#
29# Do not use for production images! It bypasses several
30# core build mechanisms (updating the image when one
31# of the files changes, license tracking in the image
32# manifest, ...).
33
34ROOTFS_DEBUG_FILES ?= ""
35ROOTFS_DEBUG_FILES[doc] = "Lists additional files or directories to be installed with 'cp -a' in the format 'source1 target1;source2 target2;...'"
36
37ROOTFS_POSTPROCESS_COMMAND += "rootfs_debug_files;"
38rootfs_debug_files () {
39 #!/bin/sh -e
40 echo "${ROOTFS_DEBUG_FILES}" | sed -e 's/;/\n/g' | while read source target mode; do
41 if [ -e "$source" ]; then
42 mkdir -p $(dirname $target)
43 cp -a $source $target
44 [ -n "$mode" ] && chmod $mode $target
45 fi
46 done
47}
diff --git a/meta/classes-recipe/rust-bin.bbclass b/meta/classes-recipe/rust-bin.bbclass
new file mode 100644
index 0000000000..b8e7ef8191
--- /dev/null
+++ b/meta/classes-recipe/rust-bin.bbclass
@@ -0,0 +1,154 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7inherit rust
8
9RDEPENDS:${PN}:append:class-target = " ${RUSTLIB_DEP}"
10
11RUSTC_ARCHFLAGS += "-C opt-level=3 -g -L ${STAGING_DIR_HOST}/${rustlibdir} -C linker=${RUST_TARGET_CCLD}"
12EXTRA_OEMAKE += 'RUSTC_ARCHFLAGS="${RUSTC_ARCHFLAGS}"'
13
14# Some libraries alias with the standard library but libstd is configured to
15# make it difficult or imposisble to use its version. Unfortunately libstd
16# must be explicitly overridden using extern.
17OVERLAP_LIBS = "\
18 libc \
19 log \
20 getopts \
21 rand \
22"
23def get_overlap_deps(d):
24 deps = d.getVar("DEPENDS").split()
25 overlap_deps = []
26 for o in d.getVar("OVERLAP_LIBS").split():
27 l = len([o for dep in deps if (o + '-rs' in dep)])
28 if l > 0:
29 overlap_deps.append(o)
30 return " ".join(overlap_deps)
31OVERLAP_DEPS = "${@get_overlap_deps(d)}"
32
33# Prevents multiple static copies of standard library modules
34# See https://github.com/rust-lang/rust/issues/19680
35RUSTC_PREFER_DYNAMIC = "-C prefer-dynamic"
36RUSTC_FLAGS += "${RUSTC_PREFER_DYNAMIC}"
37
38CRATE_NAME ?= "${@d.getVar('BPN').replace('-rs', '').replace('-', '_')}"
39BINNAME ?= "${BPN}"
40LIBNAME ?= "lib${CRATE_NAME}-rs"
41CRATE_TYPE ?= "dylib"
42BIN_SRC ?= "${S}/src/main.rs"
43LIB_SRC ?= "${S}/src/lib.rs"
44
45rustbindest ?= "${bindir}"
46rustlibdest ?= "${rustlibdir}"
47RUST_RPATH_ABS ?= "${rustlibdir}:${rustlib}"
48
49def relative_rpaths(paths, base):
50 relpaths = set()
51 for p in paths.split(':'):
52 if p == base:
53 relpaths.add('$ORIGIN')
54 continue
55 relpaths.add(os.path.join('$ORIGIN', os.path.relpath(p, base)))
56 return '-rpath=' + ':'.join(relpaths) if len(relpaths) else ''
57
58RUST_LIB_RPATH_FLAGS ?= "${@relative_rpaths(d.getVar('RUST_RPATH_ABS', True), d.getVar('rustlibdest', True))}"
59RUST_BIN_RPATH_FLAGS ?= "${@relative_rpaths(d.getVar('RUST_RPATH_ABS', True), d.getVar('rustbindest', True))}"
60
61def libfilename(d):
62 if d.getVar('CRATE_TYPE', True) == 'dylib':
63 return d.getVar('LIBNAME', True) + '.so'
64 else:
65 return d.getVar('LIBNAME', True) + '.rlib'
66
67def link_args(d, bin):
68 linkargs = []
69 if bin:
70 rpaths = d.getVar('RUST_BIN_RPATH_FLAGS', False)
71 else:
72 rpaths = d.getVar('RUST_LIB_RPATH_FLAGS', False)
73 if d.getVar('CRATE_TYPE', True) == 'dylib':
74 linkargs.append('-soname')
75 linkargs.append(libfilename(d))
76 if len(rpaths):
77 linkargs.append(rpaths)
78 if len(linkargs):
79 return ' '.join(['-Wl,' + arg for arg in linkargs])
80 else:
81 return ''
82
83get_overlap_externs () {
84 externs=
85 for dep in ${OVERLAP_DEPS}; do
86 extern=$(ls ${STAGING_DIR_HOST}/${rustlibdir}/lib$dep-rs.{so,rlib} 2>/dev/null \
87 | awk '{print $1}');
88 if [ -n "$extern" ]; then
89 externs="$externs --extern $dep=$extern"
90 else
91 echo "$dep in depends but no such library found in ${rustlibdir}!" >&2
92 exit 1
93 fi
94 done
95 echo "$externs"
96}
97
98do_configure () {
99}
100
101oe_runrustc () {
102 bbnote ${RUSTC} ${RUSTC_ARCHFLAGS} ${RUSTC_FLAGS} "$@"
103 "${RUSTC}" ${RUSTC_ARCHFLAGS} ${RUSTC_FLAGS} "$@"
104}
105
106oe_compile_rust_lib () {
107 rm -rf ${LIBNAME}.{rlib,so}
108 local -a link_args
109 if [ -n '${@link_args(d, False)}' ]; then
110 link_args[0]='-C'
111 link_args[1]='link-args=${@link_args(d, False)}'
112 fi
113 oe_runrustc $(get_overlap_externs) \
114 "${link_args[@]}" \
115 ${LIB_SRC} \
116 -o ${@libfilename(d)} \
117 --crate-name=${CRATE_NAME} --crate-type=${CRATE_TYPE} \
118 "$@"
119}
120oe_compile_rust_lib[vardeps] += "get_overlap_externs"
121
122oe_compile_rust_bin () {
123 rm -rf ${BINNAME}
124 local -a link_args
125 if [ -n '${@link_args(d, True)}' ]; then
126 link_args[0]='-C'
127 link_args[1]='link-args=${@link_args(d, True)}'
128 fi
129 oe_runrustc $(get_overlap_externs) \
130 "${link_args[@]}" \
131 ${BIN_SRC} -o ${BINNAME} "$@"
132}
133oe_compile_rust_bin[vardeps] += "get_overlap_externs"
134
135oe_install_rust_lib () {
136 for lib in $(ls ${LIBNAME}.{so,rlib} 2>/dev/null); do
137 echo Installing $lib
138 install -D -m 755 $lib ${D}/${rustlibdest}/$lib
139 done
140}
141
142oe_install_rust_bin () {
143 echo Installing ${BINNAME}
144 install -D -m 755 ${BINNAME} ${D}/${rustbindest}/${BINNAME}
145}
146
147do_rust_bin_fixups() {
148 for f in `find ${PKGD} -name '*.so*'`; do
149 echo "Strip rust note: $f"
150 ${OBJCOPY} -R .note.rustc $f $f
151 done
152}
153PACKAGE_PREPROCESS_FUNCS += "do_rust_bin_fixups"
154
diff --git a/meta/classes-recipe/rust-common.bbclass b/meta/classes-recipe/rust-common.bbclass
new file mode 100644
index 0000000000..93bf6c8be6
--- /dev/null
+++ b/meta/classes-recipe/rust-common.bbclass
@@ -0,0 +1,177 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7inherit python3native
8inherit rust-target-config
9
10# Common variables used by all Rust builds
11export rustlibdir = "${libdir}/rustlib/${RUST_HOST_SYS}/lib"
12FILES:${PN} += "${rustlibdir}/*.so"
13FILES:${PN}-dev += "${rustlibdir}/*.rlib ${rustlibdir}/*.rmeta"
14FILES:${PN}-dbg += "${rustlibdir}/.debug"
15
16RUSTLIB = "-L ${STAGING_DIR_HOST}${rustlibdir}"
17RUST_DEBUG_REMAP = "--remap-path-prefix=${WORKDIR}=/usr/src/debug/${PN}/${EXTENDPE}${PV}-${PR}"
18RUSTFLAGS += "${RUSTLIB} ${RUST_DEBUG_REMAP}"
19RUSTLIB_DEP ?= "libstd-rs"
20RUST_PANIC_STRATEGY ?= "unwind"
21
22def target_is_armv7(d):
23 '''Determine if target is armv7'''
24 # TUNE_FEATURES may include arm* even if the target is not arm
25 # in the case of *-native packages
26 if d.getVar('TARGET_ARCH') != 'arm':
27 return False
28
29 feat = d.getVar('TUNE_FEATURES')
30 feat = frozenset(feat.split())
31 mach_overrides = d.getVar('MACHINEOVERRIDES')
32 mach_overrides = frozenset(mach_overrides.split(':'))
33
34 v7=frozenset(['armv7a', 'armv7r', 'armv7m', 'armv7ve'])
35 if mach_overrides.isdisjoint(v7) and feat.isdisjoint(v7):
36 return False
37 else:
38 return True
39target_is_armv7[vardepvalue] = "${@target_is_armv7(d)}"
40
41# Responsible for taking Yocto triples and converting it to Rust triples
42def rust_base_triple(d, thing):
43 '''
44 Mangle bitbake's *_SYS into something that rust might support (see
45 rust/mk/cfg/* for a list)
46
47 Note that os is assumed to be some linux form
48 '''
49
50 # The llvm-target for armv7 is armv7-unknown-linux-gnueabihf
51 if d.getVar('{}_ARCH'.format(thing)) == d.getVar('TARGET_ARCH') and target_is_armv7(d):
52 arch = "armv7"
53 else:
54 arch = oe.rust.arch_to_rust_arch(d.getVar('{}_ARCH'.format(thing)))
55
56 # When bootstrapping rust-native, BUILD must be the same as upstream snapshot tarballs
57 bpn = d.getVar('BPN')
58 if thing == "BUILD" and bpn in ["rust"]:
59 return arch + "-unknown-linux-gnu"
60
61 vendor = d.getVar('{}_VENDOR'.format(thing))
62
63 # Default to glibc
64 libc = "-gnu"
65 os = d.getVar('{}_OS'.format(thing))
66 # This catches ARM targets and appends the necessary hard float bits
67 if os == "linux-gnueabi" or os == "linux-musleabi":
68 libc = bb.utils.contains('TUNE_FEATURES', 'callconvention-hard', 'hf', '', d)
69 elif "musl" in os:
70 libc = "-musl"
71 os = "linux"
72
73 return arch + vendor + '-' + os + libc
74
75
76# In some cases uname and the toolchain differ on their idea of the arch name
77RUST_BUILD_ARCH = "${@oe.rust.arch_to_rust_arch(d.getVar('BUILD_ARCH'))}"
78
79# Naming explanation
80# Yocto
81# - BUILD_SYS - Yocto triple of the build environment
82# - HOST_SYS - What we're building for in Yocto
83# - TARGET_SYS - What we're building for in Yocto
84#
85# So when building '-native' packages BUILD_SYS == HOST_SYS == TARGET_SYS
86# When building packages for the image HOST_SYS == TARGET_SYS
87# This is a gross over simplification as there are other modes but
88# currently this is all that's supported.
89#
90# Rust
91# - TARGET - the system where the binary will run
92# - HOST - the system where the binary is being built
93#
94# Rust additionally will use two additional cases:
95# - undecorated (e.g. CC) - equivalent to TARGET
96# - triple suffix (e.g. CC:x86_64_unknown_linux_gnu) - both
97# see: https://github.com/alexcrichton/gcc-rs
98# The way that Rust's internal triples and Yocto triples are mapped together
99# its likely best to not use the triple suffix due to potential confusion.
100
101RUST_BUILD_SYS = "${@rust_base_triple(d, 'BUILD')}"
102RUST_BUILD_SYS[vardepvalue] = "${RUST_BUILD_SYS}"
103RUST_HOST_SYS = "${@rust_base_triple(d, 'HOST')}"
104RUST_HOST_SYS[vardepvalue] = "${RUST_HOST_SYS}"
105RUST_TARGET_SYS = "${@rust_base_triple(d, 'TARGET')}"
106RUST_TARGET_SYS[vardepvalue] = "${RUST_TARGET_SYS}"
107
108# wrappers to get around the fact that Rust needs a single
109# binary but Yocto's compiler and linker commands have
110# arguments. Technically the archiver is always one command but
111# this is necessary for builds that determine the prefix and then
112# use those commands based on the prefix.
113WRAPPER_DIR = "${WORKDIR}/wrapper"
114RUST_BUILD_CC = "${WRAPPER_DIR}/build-rust-cc"
115RUST_BUILD_CXX = "${WRAPPER_DIR}/build-rust-cxx"
116RUST_BUILD_CCLD = "${WRAPPER_DIR}/build-rust-ccld"
117RUST_BUILD_AR = "${WRAPPER_DIR}/build-rust-ar"
118RUST_TARGET_CC = "${WRAPPER_DIR}/target-rust-cc"
119RUST_TARGET_CXX = "${WRAPPER_DIR}/target-rust-cxx"
120RUST_TARGET_CCLD = "${WRAPPER_DIR}/target-rust-ccld"
121RUST_TARGET_AR = "${WRAPPER_DIR}/target-rust-ar"
122
123create_wrapper_rust () {
124 file="$1"
125 shift
126 extras="$1"
127 shift
128
129 cat <<- EOF > "${file}"
130 #!/usr/bin/env python3
131 import os, sys
132 orig_binary = "$@"
133 extras = "${extras}"
134 binary = orig_binary.split()[0]
135 args = orig_binary.split() + sys.argv[1:]
136 if extras:
137 args.append(extras)
138 os.execvp(binary, args)
139 EOF
140 chmod +x "${file}"
141}
142
143WRAPPER_TARGET_CC = "${CC}"
144WRAPPER_TARGET_CXX = "${CXX}"
145WRAPPER_TARGET_CCLD = "${CCLD}"
146WRAPPER_TARGET_LDFLAGS = "${LDFLAGS}"
147WRAPPER_TARGET_EXTRALD = ""
148WRAPPER_TARGET_AR = "${AR}"
149
150# compiler is used by gcc-rs
151# linker is used by rustc/cargo
152# archiver is used by the build of libstd-rs
153do_rust_create_wrappers () {
154 mkdir -p "${WRAPPER_DIR}"
155
156 # Yocto Build / Rust Host C compiler
157 create_wrapper_rust "${RUST_BUILD_CC}" "" "${BUILD_CC}"
158 # Yocto Build / Rust Host C++ compiler
159 create_wrapper_rust "${RUST_BUILD_CXX}" "" "${BUILD_CXX}"
160 # Yocto Build / Rust Host linker
161 create_wrapper_rust "${RUST_BUILD_CCLD}" "" "${BUILD_CCLD}" "${BUILD_LDFLAGS}"
162 # Yocto Build / Rust Host archiver
163 create_wrapper_rust "${RUST_BUILD_AR}" "" "${BUILD_AR}"
164
165 # Yocto Target / Rust Target C compiler
166 create_wrapper_rust "${RUST_TARGET_CC}" "${WRAPPER_TARGET_EXTRALD}" "${WRAPPER_TARGET_CC}" "${WRAPPER_TARGET_LDFLAGS}"
167 # Yocto Target / Rust Target C++ compiler
168 create_wrapper_rust "${RUST_TARGET_CXX}" "${WRAPPER_TARGET_EXTRALD}" "${WRAPPER_TARGET_CXX}" "${CXXFLAGS}"
169 # Yocto Target / Rust Target linker
170 create_wrapper_rust "${RUST_TARGET_CCLD}" "${WRAPPER_TARGET_EXTRALD}" "${WRAPPER_TARGET_CCLD}" "${WRAPPER_TARGET_LDFLAGS}"
171 # Yocto Target / Rust Target archiver
172 create_wrapper_rust "${RUST_TARGET_AR}" "" "${WRAPPER_TARGET_AR}"
173
174}
175
176addtask rust_create_wrappers before do_configure after do_patch do_prepare_recipe_sysroot
177do_rust_create_wrappers[dirs] += "${WRAPPER_DIR}"
diff --git a/meta/classes-recipe/rust-target-config.bbclass b/meta/classes-recipe/rust-target-config.bbclass
new file mode 100644
index 0000000000..3405086402
--- /dev/null
+++ b/meta/classes-recipe/rust-target-config.bbclass
@@ -0,0 +1,391 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# Right now this is focused on arm-specific tune features.
8# We get away with this for now as one can only use x86-64 as the build host
9# (not arm).
10# Note that TUNE_FEATURES is _always_ refering to the target, so we really
11# don't want to use this for the host/build.
12def llvm_features_from_tune(d):
13 f = []
14 feat = d.getVar('TUNE_FEATURES')
15 if not feat:
16 return []
17 feat = frozenset(feat.split())
18
19 mach_overrides = d.getVar('MACHINEOVERRIDES')
20 mach_overrides = frozenset(mach_overrides.split(':'))
21
22 if 'vfpv4' in feat:
23 f.append("+vfp4")
24 if 'vfpv3' in feat:
25 f.append("+vfp3")
26 if 'vfpv3d16' in feat:
27 f.append("+d16")
28
29 if 'vfpv2' in feat or 'vfp' in feat:
30 f.append("+vfp2")
31
32 if 'neon' in feat:
33 f.append("+neon")
34
35 if 'mips32' in feat:
36 f.append("+mips32")
37
38 if 'mips32r2' in feat:
39 f.append("+mips32r2")
40
41 if target_is_armv7(d):
42 f.append('+v7')
43
44 if ('armv6' in mach_overrides) or ('armv6' in feat):
45 f.append("+v6")
46 if 'armv5te' in feat:
47 f.append("+strict-align")
48 f.append("+v5te")
49 elif 'armv5' in feat:
50 f.append("+strict-align")
51 f.append("+v5")
52
53 if ('armv4' in mach_overrides) or ('armv4' in feat):
54 f.append("+strict-align")
55
56 if 'dsp' in feat:
57 f.append("+dsp")
58
59 if 'thumb' in feat:
60 if d.getVar('ARM_THUMB_OPT') == "thumb":
61 if target_is_armv7(d):
62 f.append('+thumb2')
63 f.append("+thumb-mode")
64
65 if 'cortexa5' in feat:
66 f.append("+a5")
67 if 'cortexa7' in feat:
68 f.append("+a7")
69 if 'cortexa9' in feat:
70 f.append("+a9")
71 if 'cortexa15' in feat:
72 f.append("+a15")
73 if 'cortexa17' in feat:
74 f.append("+a17")
75 if ('riscv64' in feat) or ('riscv32' in feat):
76 f.append("+a,+c,+d,+f,+m")
77 return f
78llvm_features_from_tune[vardepvalue] = "${@llvm_features_from_tune(d)}"
79
80# TARGET_CC_ARCH changes from build/cross/target so it'll do the right thing
81# this should go away when https://github.com/rust-lang/rust/pull/31709 is
82# stable (1.9.0?)
83def llvm_features_from_cc_arch(d):
84 f = []
85 feat = d.getVar('TARGET_CC_ARCH')
86 if not feat:
87 return []
88 feat = frozenset(feat.split())
89
90 if '-mmmx' in feat:
91 f.append("+mmx")
92 if '-msse' in feat:
93 f.append("+sse")
94 if '-msse2' in feat:
95 f.append("+sse2")
96 if '-msse3' in feat:
97 f.append("+sse3")
98 if '-mssse3' in feat:
99 f.append("+ssse3")
100 if '-msse4.1' in feat:
101 f.append("+sse4.1")
102 if '-msse4.2' in feat:
103 f.append("+sse4.2")
104 if '-msse4a' in feat:
105 f.append("+sse4a")
106 if '-mavx' in feat:
107 f.append("+avx")
108 if '-mavx2' in feat:
109 f.append("+avx2")
110
111 return f
112
113def llvm_features_from_target_fpu(d):
114 # TARGET_FPU can be hard or soft. +soft-float tell llvm to use soft float
115 # ABI. There is no option for hard.
116
117 fpu = d.getVar('TARGET_FPU', True)
118 return ["+soft-float"] if fpu == "soft" else []
119
120def llvm_features(d):
121 return ','.join(llvm_features_from_tune(d) +
122 llvm_features_from_cc_arch(d) +
123 llvm_features_from_target_fpu(d))
124
125llvm_features[vardepvalue] = "${@llvm_features(d)}"
126
127## arm-unknown-linux-gnueabihf
128DATA_LAYOUT[arm-eabi] = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
129TARGET_ENDIAN[arm-eabi] = "little"
130TARGET_POINTER_WIDTH[arm-eabi] = "32"
131TARGET_C_INT_WIDTH[arm-eabi] = "32"
132MAX_ATOMIC_WIDTH[arm-eabi] = "64"
133FEATURES[arm-eabi] = "+v6,+vfp2"
134
135## armv7-unknown-linux-gnueabihf
136DATA_LAYOUT[armv7-eabi] = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
137TARGET_ENDIAN[armv7-eabi] = "little"
138TARGET_POINTER_WIDTH[armv7-eabi] = "32"
139TARGET_C_INT_WIDTH[armv7-eabi] = "32"
140MAX_ATOMIC_WIDTH[armv7-eabi] = "64"
141FEATURES[armv7-eabi] = "+v7,+vfp2,+thumb2"
142
143## aarch64-unknown-linux-{gnu, musl}
144DATA_LAYOUT[aarch64] = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
145TARGET_ENDIAN[aarch64] = "little"
146TARGET_POINTER_WIDTH[aarch64] = "64"
147TARGET_C_INT_WIDTH[aarch64] = "32"
148MAX_ATOMIC_WIDTH[aarch64] = "128"
149
150## x86_64-unknown-linux-{gnu, musl}
151DATA_LAYOUT[x86_64] = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
152TARGET_ENDIAN[x86_64] = "little"
153TARGET_POINTER_WIDTH[x86_64] = "64"
154TARGET_C_INT_WIDTH[x86_64] = "32"
155MAX_ATOMIC_WIDTH[x86_64] = "64"
156
157## x86_64-unknown-linux-gnux32
158DATA_LAYOUT[x86_64-x32] = "e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
159TARGET_ENDIAN[x86_64-x32] = "little"
160TARGET_POINTER_WIDTH[x86_64-x32] = "32"
161TARGET_C_INT_WIDTH[x86_64-x32] = "32"
162MAX_ATOMIC_WIDTH[x86_64-x32] = "64"
163
164## i686-unknown-linux-{gnu, musl}
165DATA_LAYOUT[i686] = "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128"
166TARGET_ENDIAN[i686] = "little"
167TARGET_POINTER_WIDTH[i686] = "32"
168TARGET_C_INT_WIDTH[i686] = "32"
169MAX_ATOMIC_WIDTH[i686] = "64"
170
171## XXX: a bit of a hack so qemux86 builds, clone of i686-unknown-linux-{gnu, musl} above
172DATA_LAYOUT[i586] = "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128"
173TARGET_ENDIAN[i586] = "little"
174TARGET_POINTER_WIDTH[i586] = "32"
175TARGET_C_INT_WIDTH[i586] = "32"
176MAX_ATOMIC_WIDTH[i586] = "64"
177
178## mips-unknown-linux-{gnu, musl}
179DATA_LAYOUT[mips] = "E-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32-S64"
180TARGET_ENDIAN[mips] = "big"
181TARGET_POINTER_WIDTH[mips] = "32"
182TARGET_C_INT_WIDTH[mips] = "32"
183MAX_ATOMIC_WIDTH[mips] = "32"
184
185## mipsel-unknown-linux-{gnu, musl}
186DATA_LAYOUT[mipsel] = "e-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32-S64"
187TARGET_ENDIAN[mipsel] = "little"
188TARGET_POINTER_WIDTH[mipsel] = "32"
189TARGET_C_INT_WIDTH[mipsel] = "32"
190MAX_ATOMIC_WIDTH[mipsel] = "32"
191
192## mips64-unknown-linux-{gnu, musl}
193DATA_LAYOUT[mips64] = "E-m:e-i8:8:32-i16:16:32-i64:64-n32:64-S128"
194TARGET_ENDIAN[mips64] = "big"
195TARGET_POINTER_WIDTH[mips64] = "64"
196TARGET_C_INT_WIDTH[mips64] = "64"
197MAX_ATOMIC_WIDTH[mips64] = "64"
198
199## mips64-n32-unknown-linux-{gnu, musl}
200DATA_LAYOUT[mips64-n32] = "E-m:e-p:32:32-i8:8:32-i16:16:32-i64:64-n32:64-S128"
201TARGET_ENDIAN[mips64-n32] = "big"
202TARGET_POINTER_WIDTH[mips64-n32] = "32"
203TARGET_C_INT_WIDTH[mips64-n32] = "32"
204MAX_ATOMIC_WIDTH[mips64-n32] = "64"
205
206## mips64el-unknown-linux-{gnu, musl}
207DATA_LAYOUT[mips64el] = "e-m:e-i8:8:32-i16:16:32-i64:64-n32:64-S128"
208TARGET_ENDIAN[mips64el] = "little"
209TARGET_POINTER_WIDTH[mips64el] = "64"
210TARGET_C_INT_WIDTH[mips64el] = "64"
211MAX_ATOMIC_WIDTH[mips64el] = "64"
212
213## powerpc-unknown-linux-{gnu, musl}
214DATA_LAYOUT[powerpc] = "E-m:e-p:32:32-i64:64-n32"
215TARGET_ENDIAN[powerpc] = "big"
216TARGET_POINTER_WIDTH[powerpc] = "32"
217TARGET_C_INT_WIDTH[powerpc] = "32"
218MAX_ATOMIC_WIDTH[powerpc] = "32"
219
220## powerpc64-unknown-linux-{gnu, musl}
221DATA_LAYOUT[powerpc64] = "E-m:e-i64:64-n32:64-S128-v256:256:256-v512:512:512"
222TARGET_ENDIAN[powerpc64] = "big"
223TARGET_POINTER_WIDTH[powerpc64] = "64"
224TARGET_C_INT_WIDTH[powerpc64] = "64"
225MAX_ATOMIC_WIDTH[powerpc64] = "64"
226
227## powerpc64le-unknown-linux-{gnu, musl}
228DATA_LAYOUT[powerpc64le] = "e-m:e-i64:64-n32:64-v256:256:256-v512:512:512"
229TARGET_ENDIAN[powerpc64le] = "little"
230TARGET_POINTER_WIDTH[powerpc64le] = "64"
231TARGET_C_INT_WIDTH[powerpc64le] = "64"
232MAX_ATOMIC_WIDTH[powerpc64le] = "64"
233
234## riscv32-unknown-linux-{gnu, musl}
235DATA_LAYOUT[riscv32] = "e-m:e-p:32:32-i64:64-n32-S128"
236TARGET_ENDIAN[riscv32] = "little"
237TARGET_POINTER_WIDTH[riscv32] = "32"
238TARGET_C_INT_WIDTH[riscv32] = "32"
239MAX_ATOMIC_WIDTH[riscv32] = "32"
240
241## riscv64-unknown-linux-{gnu, musl}
242DATA_LAYOUT[riscv64] = "e-m:e-p:64:64-i64:64-i128:128-n64-S128"
243TARGET_ENDIAN[riscv64] = "little"
244TARGET_POINTER_WIDTH[riscv64] = "64"
245TARGET_C_INT_WIDTH[riscv64] = "64"
246MAX_ATOMIC_WIDTH[riscv64] = "64"
247
248# Convert a normal arch (HOST_ARCH, TARGET_ARCH, BUILD_ARCH, etc) to something
249# rust's internals won't choke on.
250def arch_to_rust_target_arch(arch):
251 if arch == "i586" or arch == "i686":
252 return "x86"
253 elif arch == "mipsel":
254 return "mips"
255 elif arch == "mip64sel":
256 return "mips64"
257 elif arch == "armv7":
258 return "arm"
259 elif arch == "powerpc64le":
260 return "powerpc64"
261 else:
262 return arch
263
264# generates our target CPU value
265def llvm_cpu(d):
266 cpu = d.getVar('PACKAGE_ARCH')
267 target = d.getVar('TRANSLATED_TARGET_ARCH')
268
269 trans = {}
270 trans['corei7-64'] = "corei7"
271 trans['core2-32'] = "core2"
272 trans['x86-64'] = "x86-64"
273 trans['i686'] = "i686"
274 trans['i586'] = "i586"
275 trans['powerpc'] = "powerpc"
276 trans['mips64'] = "mips64"
277 trans['mips64el'] = "mips64"
278 trans['riscv64'] = "generic-rv64"
279 trans['riscv32'] = "generic-rv32"
280
281 if target in ["mips", "mipsel"]:
282 feat = frozenset(d.getVar('TUNE_FEATURES').split())
283 if "mips32r2" in feat:
284 trans['mipsel'] = "mips32r2"
285 trans['mips'] = "mips32r2"
286 elif "mips32" in feat:
287 trans['mipsel'] = "mips32"
288 trans['mips'] = "mips32"
289
290 try:
291 return trans[cpu]
292 except:
293 return trans.get(target, "generic")
294
295llvm_cpu[vardepvalue] = "${@llvm_cpu(d)}"
296
297def rust_gen_target(d, thing, wd, arch):
298 import json
299
300 build_sys = d.getVar('BUILD_SYS')
301 target_sys = d.getVar('TARGET_SYS')
302
303 sys = d.getVar('{}_SYS'.format(thing))
304 prefix = d.getVar('{}_PREFIX'.format(thing))
305 rustsys = d.getVar('RUST_{}_SYS'.format(thing))
306
307 abi = None
308 cpu = "generic"
309 features = ""
310
311 # Need to apply the target tuning consitently, only if the triplet applies to the target
312 # and not in the native case
313 if sys == target_sys and sys != build_sys:
314 abi = d.getVar('ABIEXTENSION')
315 cpu = llvm_cpu(d)
316 if bb.data.inherits_class('native', d):
317 features = ','.join(llvm_features_from_cc_arch(d))
318 else:
319 features = llvm_features(d) or ""
320 # arm and armv7 have different targets in llvm
321 if arch == "arm" and target_is_armv7(d):
322 arch = 'armv7'
323
324 rust_arch = oe.rust.arch_to_rust_arch(arch)
325
326 if abi:
327 arch_abi = "{}-{}".format(rust_arch, abi)
328 else:
329 arch_abi = rust_arch
330
331 features = features or d.getVarFlag('FEATURES', arch_abi) or ""
332 features = features.strip()
333
334 # build tspec
335 tspec = {}
336 tspec['llvm-target'] = rustsys
337 tspec['data-layout'] = d.getVarFlag('DATA_LAYOUT', arch_abi)
338 if tspec['data-layout'] is None:
339 bb.fatal("No rust target defined for %s" % arch_abi)
340 tspec['max-atomic-width'] = int(d.getVarFlag('MAX_ATOMIC_WIDTH', arch_abi))
341 tspec['target-pointer-width'] = d.getVarFlag('TARGET_POINTER_WIDTH', arch_abi)
342 tspec['target-c-int-width'] = d.getVarFlag('TARGET_C_INT_WIDTH', arch_abi)
343 tspec['target-endian'] = d.getVarFlag('TARGET_ENDIAN', arch_abi)
344 tspec['arch'] = arch_to_rust_target_arch(rust_arch)
345 tspec['os'] = "linux"
346 if "musl" in tspec['llvm-target']:
347 tspec['env'] = "musl"
348 else:
349 tspec['env'] = "gnu"
350 if "riscv64" in tspec['llvm-target']:
351 tspec['llvm-abiname'] = "lp64d"
352 if "riscv32" in tspec['llvm-target']:
353 tspec['llvm-abiname'] = "ilp32d"
354 tspec['vendor'] = "unknown"
355 tspec['target-family'] = "unix"
356 tspec['linker'] = "{}{}gcc".format(d.getVar('CCACHE'), prefix)
357 tspec['cpu'] = cpu
358 if features != "":
359 tspec['features'] = features
360 tspec['dynamic-linking'] = True
361 tspec['executables'] = True
362 tspec['linker-is-gnu'] = True
363 tspec['linker-flavor'] = "gcc"
364 tspec['has-rpath'] = True
365 tspec['has-elf-tls'] = True
366 tspec['position-independent-executables'] = True
367 tspec['panic-strategy'] = d.getVar("RUST_PANIC_STRATEGY")
368
369 # write out the target spec json file
370 with open(wd + rustsys + '.json', 'w') as f:
371 json.dump(tspec, f, indent=4)
372
373# These are accounted for in tmpdir path names so don't need to be in the task sig
374rust_gen_target[vardepsexclude] += "ABIEXTENSION llvm_cpu"
375
376do_rust_gen_targets[vardeps] += "DATA_LAYOUT TARGET_ENDIAN TARGET_POINTER_WIDTH TARGET_C_INT_WIDTH MAX_ATOMIC_WIDTH FEATURES"
377
378RUST_TARGETS_DIR = "${WORKDIR}/rust-targets/"
379export RUST_TARGET_PATH = "${RUST_TARGETS_DIR}"
380
381python do_rust_gen_targets () {
382 wd = d.getVar('RUST_TARGETS_DIR')
383 # Order of BUILD, HOST, TARGET is important in case the files overwrite, most specific last
384 rust_gen_target(d, 'BUILD', wd, d.getVar('BUILD_ARCH'))
385 rust_gen_target(d, 'HOST', wd, d.getVar('HOST_ARCH'))
386 rust_gen_target(d, 'TARGET', wd, d.getVar('TARGET_ARCH'))
387}
388
389addtask rust_gen_targets after do_patch before do_compile
390do_rust_gen_targets[dirs] += "${RUST_TARGETS_DIR}"
391
diff --git a/meta/classes-recipe/rust.bbclass b/meta/classes-recipe/rust.bbclass
new file mode 100644
index 0000000000..dae25cac2a
--- /dev/null
+++ b/meta/classes-recipe/rust.bbclass
@@ -0,0 +1,51 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7inherit rust-common
8
9RUSTC = "rustc"
10
11RUSTC_ARCHFLAGS += "--target=${RUST_HOST_SYS} ${RUSTFLAGS}"
12
13def rust_base_dep(d):
14 # Taken from meta/classes/base.bbclass `base_dep_prepend` and modified to
15 # use rust instead of gcc
16 deps = ""
17 if not d.getVar('INHIBIT_DEFAULT_RUST_DEPS'):
18 if (d.getVar('HOST_SYS') != d.getVar('BUILD_SYS')):
19 deps += " rust-native ${RUSTLIB_DEP}"
20 else:
21 deps += " rust-native"
22 return deps
23
24DEPENDS:append = " ${@rust_base_dep(d)}"
25
26# BUILD_LDFLAGS
27# ${STAGING_LIBDIR_NATIVE}
28# ${STAGING_BASE_LIBDIR_NATIVE}
29# BUILDSDK_LDFLAGS
30# ${STAGING_LIBDIR}
31# #{STAGING_DIR_HOST}
32# TARGET_LDFLAGS ?????
33#RUSTC_BUILD_LDFLAGS = "\
34# --sysroot ${STAGING_DIR_NATIVE} \
35# -L${STAGING_LIBDIR_NATIVE} \
36# -L${STAGING_BASE_LIBDIR_NATIVE} \
37#"
38
39# XXX: for some reason bitbake sets BUILD_* & TARGET_* but uses the bare
40# variables for HOST. Alias things to make it easier for us.
41HOST_LDFLAGS ?= "${LDFLAGS}"
42HOST_CFLAGS ?= "${CFLAGS}"
43HOST_CXXFLAGS ?= "${CXXFLAGS}"
44HOST_CPPFLAGS ?= "${CPPFLAGS}"
45
46rustlib_suffix="${TUNE_ARCH}${TARGET_VENDOR}-${TARGET_OS}/rustlib/${RUST_HOST_SYS}/lib"
47# Native sysroot standard library path
48rustlib_src="${prefix}/lib/${rustlib_suffix}"
49# Host sysroot standard library path
50rustlib="${libdir}/${rustlib_suffix}"
51rustlib:class-native="${libdir}/rustlib/${BUILD_SYS}/lib"
diff --git a/meta/classes-recipe/scons.bbclass b/meta/classes-recipe/scons.bbclass
new file mode 100644
index 0000000000..5f0d4a910b
--- /dev/null
+++ b/meta/classes-recipe/scons.bbclass
@@ -0,0 +1,34 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7inherit python3native
8
9DEPENDS += "python3-scons-native"
10
11EXTRA_OESCONS ?= ""
12
13do_configure() {
14 if [ -n "${CONFIGURESTAMPFILE}" -a "${S}" = "${B}" ]; then
15 if [ -e "${CONFIGURESTAMPFILE}" -a "`cat ${CONFIGURESTAMPFILE}`" != "${BB_TASKHASH}" -a "${CLEANBROKEN}" != "1" ]; then
16 ${STAGING_BINDIR_NATIVE}/scons --directory=${S} --clean PREFIX=${prefix} prefix=${prefix} ${EXTRA_OESCONS}
17 fi
18
19 mkdir -p `dirname ${CONFIGURESTAMPFILE}`
20 echo ${BB_TASKHASH} > ${CONFIGURESTAMPFILE}
21 fi
22}
23
24scons_do_compile() {
25 ${STAGING_BINDIR_NATIVE}/scons --directory=${S} ${PARALLEL_MAKE} PREFIX=${prefix} prefix=${prefix} ${EXTRA_OESCONS} || \
26 die "scons build execution failed."
27}
28
29scons_do_install() {
30 ${STAGING_BINDIR_NATIVE}/scons --directory=${S} install_root=${D}${prefix} PREFIX=${prefix} prefix=${prefix} ${EXTRA_OESCONS} install || \
31 die "scons install execution failed."
32}
33
34EXPORT_FUNCTIONS do_compile do_install
diff --git a/meta/classes-recipe/setuptools3-base.bbclass b/meta/classes-recipe/setuptools3-base.bbclass
new file mode 100644
index 0000000000..21b688ced0
--- /dev/null
+++ b/meta/classes-recipe/setuptools3-base.bbclass
@@ -0,0 +1,37 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7DEPENDS:append:class-target = " ${PYTHON_PN}-native ${PYTHON_PN}"
8DEPENDS:append:class-nativesdk = " ${PYTHON_PN}-native ${PYTHON_PN}"
9RDEPENDS:${PN}:append:class-target = " ${PYTHON_PN}-core"
10
11export STAGING_INCDIR
12export STAGING_LIBDIR
13
14# LDSHARED is the ld *command* used to create shared library
15export LDSHARED = "${CCLD} -shared"
16# LDXXSHARED is the ld *command* used to create shared library of C++
17# objects
18export LDCXXSHARED = "${CXX} -shared"
19# CCSHARED are the C *flags* used to create objects to go into a shared
20# library (module)
21export CCSHARED = "-fPIC -DPIC"
22# LINKFORSHARED are the flags passed to the $(CC) command that links
23# the python executable
24export LINKFORSHARED = "${SECURITY_CFLAGS} -Xlinker -export-dynamic"
25
26FILES:${PN} += "${libdir}/* ${libdir}/${PYTHON_DIR}/*"
27
28FILES:${PN}-staticdev += "\
29 ${PYTHON_SITEPACKAGES_DIR}/*.a \
30"
31FILES:${PN}-dev += "\
32 ${datadir}/pkgconfig \
33 ${libdir}/pkgconfig \
34 ${PYTHON_SITEPACKAGES_DIR}/*.la \
35"
36inherit python3native python3targetconfig
37
diff --git a/meta/classes-recipe/setuptools3.bbclass b/meta/classes-recipe/setuptools3.bbclass
new file mode 100644
index 0000000000..4c6e79ee9a
--- /dev/null
+++ b/meta/classes-recipe/setuptools3.bbclass
@@ -0,0 +1,38 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7inherit setuptools3-base python_pep517
8
9DEPENDS += "python3-setuptools-native python3-wheel-native"
10
11SETUPTOOLS_BUILD_ARGS ?= ""
12
13SETUPTOOLS_SETUP_PATH ?= "${S}"
14
15setuptools3_do_configure() {
16 :
17}
18
19setuptools3_do_compile() {
20 cd ${SETUPTOOLS_SETUP_PATH}
21 NO_FETCH_BUILD=1 \
22 STAGING_INCDIR=${STAGING_INCDIR} \
23 STAGING_LIBDIR=${STAGING_LIBDIR} \
24 ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py \
25 bdist_wheel --verbose --dist-dir ${PEP517_WHEEL_PATH} ${SETUPTOOLS_BUILD_ARGS} || \
26 bbfatal_log "'${PYTHON_PN} setup.py bdist_wheel ${SETUPTOOLS_BUILD_ARGS}' execution failed."
27}
28setuptools3_do_compile[vardepsexclude] = "MACHINE"
29do_compile[cleandirs] += "${PEP517_WHEEL_PATH}"
30
31# This could be removed in the future but some recipes in meta-oe still use it
32setuptools3_do_install() {
33 python_pep517_do_install
34}
35
36EXPORT_FUNCTIONS do_configure do_compile do_install
37
38export LDSHARED="${CCLD} -shared"
diff --git a/meta/classes-recipe/setuptools3_legacy.bbclass b/meta/classes-recipe/setuptools3_legacy.bbclass
new file mode 100644
index 0000000000..21748f922a
--- /dev/null
+++ b/meta/classes-recipe/setuptools3_legacy.bbclass
@@ -0,0 +1,84 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# This class is for packages which use the deprecated setuptools behaviour,
8# specifically custom install tasks which don't work correctly with bdist_wheel.
9# This behaviour is deprecated in setuptools[1] and won't work in the future, so
10# all users of this should consider their options: pure Python modules can use a
11# modern Python tool such as build[2], or packages which are doing more (such as
12# installing init scripts) should use a fully-featured build system such as Meson.
13#
14# [1] https://setuptools.pypa.io/en/latest/history.html#id142
15# [2] https://pypi.org/project/build/
16
17inherit setuptools3-base
18
19B = "${WORKDIR}/build"
20
21SETUPTOOLS_BUILD_ARGS ?= ""
22SETUPTOOLS_INSTALL_ARGS ?= "--root=${D} \
23 --prefix=${prefix} \
24 --install-lib=${PYTHON_SITEPACKAGES_DIR} \
25 --install-data=${datadir}"
26
27SETUPTOOLS_PYTHON = "python3"
28SETUPTOOLS_PYTHON:class-native = "nativepython3"
29
30SETUPTOOLS_SETUP_PATH ?= "${S}"
31
32setuptools3_legacy_do_configure() {
33 :
34}
35
36setuptools3_legacy_do_compile() {
37 cd ${SETUPTOOLS_SETUP_PATH}
38 NO_FETCH_BUILD=1 \
39 STAGING_INCDIR=${STAGING_INCDIR} \
40 STAGING_LIBDIR=${STAGING_LIBDIR} \
41 ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py \
42 build --build-base=${B} ${SETUPTOOLS_BUILD_ARGS} || \
43 bbfatal_log "'${PYTHON_PN} setup.py build ${SETUPTOOLS_BUILD_ARGS}' execution failed."
44}
45setuptools3_legacy_do_compile[vardepsexclude] = "MACHINE"
46
47setuptools3_legacy_do_install() {
48 cd ${SETUPTOOLS_SETUP_PATH}
49 install -d ${D}${PYTHON_SITEPACKAGES_DIR}
50 STAGING_INCDIR=${STAGING_INCDIR} \
51 STAGING_LIBDIR=${STAGING_LIBDIR} \
52 PYTHONPATH=${D}${PYTHON_SITEPACKAGES_DIR} \
53 ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py \
54 build --build-base=${B} install --skip-build ${SETUPTOOLS_INSTALL_ARGS} || \
55 bbfatal_log "'${PYTHON_PN} setup.py install ${SETUPTOOLS_INSTALL_ARGS}' execution failed."
56
57 # support filenames with *spaces*
58 find ${D} -name "*.py" -exec grep -q ${D} {} \; \
59 -exec sed -i -e s:${D}::g {} \;
60
61 for i in ${D}${bindir}/* ${D}${sbindir}/*; do
62 if [ -f "$i" ]; then
63 sed -i -e s:${PYTHON}:${USRBINPATH}/env\ ${SETUPTOOLS_PYTHON}:g $i
64 sed -i -e s:${STAGING_BINDIR_NATIVE}:${bindir}:g $i
65 fi
66 done
67
68 rm -f ${D}${PYTHON_SITEPACKAGES_DIR}/easy-install.pth
69
70 #
71 # FIXME: Bandaid against wrong datadir computation
72 #
73 if [ -e ${D}${datadir}/share ]; then
74 mv -f ${D}${datadir}/share/* ${D}${datadir}/
75 rmdir ${D}${datadir}/share
76 fi
77}
78setuptools3_legacy_do_install[vardepsexclude] = "MACHINE"
79
80EXPORT_FUNCTIONS do_configure do_compile do_install
81
82export LDSHARED="${CCLD} -shared"
83DEPENDS += "python3-setuptools-native"
84
diff --git a/meta/classes-recipe/siteinfo.bbclass b/meta/classes-recipe/siteinfo.bbclass
new file mode 100644
index 0000000000..d31c9b2571
--- /dev/null
+++ b/meta/classes-recipe/siteinfo.bbclass
@@ -0,0 +1,232 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# This class exists to provide information about the targets that
8# may be needed by other classes and/or recipes. If you add a new
9# target this will probably need to be updated.
10
11#
12# Returns information about 'what' for the named target 'target'
13# where 'target' == "<arch>-<os>"
14#
15# 'what' can be one of
16# * target: Returns the target name ("<arch>-<os>")
17# * endianness: Return "be" for big endian targets, "le" for little endian
18# * bits: Returns the bit size of the target, either "32" or "64"
19# * libc: Returns the name of the c library used by the target
20#
21# It is an error for the target not to exist.
22# If 'what' doesn't exist then an empty value is returned
23#
24def siteinfo_data_for_machine(arch, os, d):
25 archinfo = {
26 "allarch": "endian-little bit-32", # bogus, but better than special-casing the checks below for allarch
27 "aarch64": "endian-little bit-64 arm-common arm-64",
28 "aarch64_be": "endian-big bit-64 arm-common arm-64",
29 "arc": "endian-little bit-32 arc-common",
30 "arceb": "endian-big bit-32 arc-common",
31 "arm": "endian-little bit-32 arm-common arm-32",
32 "armeb": "endian-big bit-32 arm-common arm-32",
33 "avr32": "endian-big bit-32 avr32-common",
34 "bfin": "endian-little bit-32 bfin-common",
35 "epiphany": "endian-little bit-32",
36 "i386": "endian-little bit-32 ix86-common",
37 "i486": "endian-little bit-32 ix86-common",
38 "i586": "endian-little bit-32 ix86-common",
39 "i686": "endian-little bit-32 ix86-common",
40 "ia64": "endian-little bit-64",
41 "lm32": "endian-big bit-32",
42 "m68k": "endian-big bit-32",
43 "microblaze": "endian-big bit-32 microblaze-common",
44 "microblazeel": "endian-little bit-32 microblaze-common",
45 "mips": "endian-big bit-32 mips-common",
46 "mips64": "endian-big bit-64 mips-common",
47 "mips64el": "endian-little bit-64 mips-common",
48 "mipsisa64r6": "endian-big bit-64 mips-common",
49 "mipsisa64r6el": "endian-little bit-64 mips-common",
50 "mipsel": "endian-little bit-32 mips-common",
51 "mipsisa32r6": "endian-big bit-32 mips-common",
52 "mipsisa32r6el": "endian-little bit-32 mips-common",
53 "powerpc": "endian-big bit-32 powerpc-common",
54 "powerpcle": "endian-little bit-32 powerpc-common",
55 "nios2": "endian-little bit-32 nios2-common",
56 "powerpc64": "endian-big bit-64 powerpc-common",
57 "powerpc64le": "endian-little bit-64 powerpc-common",
58 "ppc": "endian-big bit-32 powerpc-common",
59 "ppc64": "endian-big bit-64 powerpc-common",
60 "ppc64le" : "endian-little bit-64 powerpc-common",
61 "riscv32": "endian-little bit-32 riscv-common",
62 "riscv64": "endian-little bit-64 riscv-common",
63 "sh3": "endian-little bit-32 sh-common",
64 "sh3eb": "endian-big bit-32 sh-common",
65 "sh4": "endian-little bit-32 sh-common",
66 "sh4eb": "endian-big bit-32 sh-common",
67 "sparc": "endian-big bit-32",
68 "viac3": "endian-little bit-32 ix86-common",
69 "x86_64": "endian-little", # bitinfo specified in targetinfo
70 }
71 osinfo = {
72 "darwin": "common-darwin",
73 "darwin9": "common-darwin",
74 "linux": "common-linux common-glibc",
75 "linux-gnu": "common-linux common-glibc",
76 "linux-gnu_ilp32": "common-linux common-glibc",
77 "linux-gnux32": "common-linux common-glibc",
78 "linux-gnun32": "common-linux common-glibc",
79 "linux-gnueabi": "common-linux common-glibc",
80 "linux-gnuspe": "common-linux common-glibc",
81 "linux-musl": "common-linux common-musl",
82 "linux-muslx32": "common-linux common-musl",
83 "linux-musleabi": "common-linux common-musl",
84 "linux-muslspe": "common-linux common-musl",
85 "uclinux-uclibc": "common-uclibc",
86 "cygwin": "common-cygwin",
87 "mingw32": "common-mingw",
88 }
89 targetinfo = {
90 "aarch64-linux-gnu": "aarch64-linux",
91 "aarch64_be-linux-gnu": "aarch64_be-linux",
92 "aarch64-linux-gnu_ilp32": "bit-32 aarch64_be-linux arm-32",
93 "aarch64_be-linux-gnu_ilp32": "bit-32 aarch64_be-linux arm-32",
94 "aarch64-linux-musl": "aarch64-linux",
95 "aarch64_be-linux-musl": "aarch64_be-linux",
96 "arm-linux-gnueabi": "arm-linux",
97 "arm-linux-musleabi": "arm-linux",
98 "armeb-linux-gnueabi": "armeb-linux",
99 "armeb-linux-musleabi": "armeb-linux",
100 "microblazeel-linux" : "microblaze-linux",
101 "microblazeel-linux-musl" : "microblaze-linux",
102 "mips-linux-musl": "mips-linux",
103 "mipsel-linux-musl": "mipsel-linux",
104 "mips64-linux-musl": "mips64-linux",
105 "mips64el-linux-musl": "mips64el-linux",
106 "mips64-linux-gnun32": "mips-linux bit-32",
107 "mips64el-linux-gnun32": "mipsel-linux bit-32",
108 "mipsisa64r6-linux-gnun32": "mipsisa32r6-linux bit-32",
109 "mipsisa64r6el-linux-gnun32": "mipsisa32r6el-linux bit-32",
110 "powerpc-linux": "powerpc32-linux powerpc32-linux-glibc",
111 "powerpc-linux-musl": "powerpc-linux powerpc32-linux powerpc32-linux-musl",
112 "powerpcle-linux": "powerpc32-linux powerpc32-linux-glibc",
113 "powerpcle-linux-musl": "powerpc-linux powerpc32-linux powerpc32-linux-musl",
114 "powerpc-linux-gnuspe": "powerpc-linux powerpc32-linux powerpc32-linux-glibc",
115 "powerpc-linux-muslspe": "powerpc-linux powerpc32-linux powerpc32-linux-musl",
116 "powerpc64-linux-gnuspe": "powerpc-linux powerpc64-linux powerpc64-linux-glibc",
117 "powerpc64-linux-muslspe": "powerpc-linux powerpc64-linux powerpc64-linux-musl",
118 "powerpc64-linux": "powerpc-linux powerpc64-linux powerpc64-linux-glibc",
119 "powerpc64-linux-musl": "powerpc-linux powerpc64-linux powerpc64-linux-musl",
120 "powerpc64le-linux": "powerpc-linux powerpc64-linux powerpc64-linux-glibc",
121 "powerpc64le-linux-musl": "powerpc-linux powerpc64-linux powerpc64-linux-musl",
122 "riscv32-linux": "riscv32-linux",
123 "riscv32-linux-musl": "riscv32-linux",
124 "riscv64-linux": "riscv64-linux",
125 "riscv64-linux-musl": "riscv64-linux",
126 "x86_64-cygwin": "bit-64",
127 "x86_64-darwin": "bit-64",
128 "x86_64-darwin9": "bit-64",
129 "x86_64-linux": "bit-64",
130 "x86_64-linux-musl": "x86_64-linux bit-64",
131 "x86_64-linux-muslx32": "bit-32 ix86-common x32-linux",
132 "x86_64-elf": "bit-64",
133 "x86_64-linux-gnu": "bit-64 x86_64-linux",
134 "x86_64-linux-gnux32": "bit-32 ix86-common x32-linux",
135 "x86_64-mingw32": "bit-64",
136 }
137
138 # Add in any extra user supplied data which may come from a BSP layer, removing the
139 # need to always change this class directly
140 extra_siteinfo = (d.getVar("SITEINFO_EXTRA_DATAFUNCS") or "").split()
141 for m in extra_siteinfo:
142 call = m + "(archinfo, osinfo, targetinfo, d)"
143 locs = { "archinfo" : archinfo, "osinfo" : osinfo, "targetinfo" : targetinfo, "d" : d}
144 archinfo, osinfo, targetinfo = bb.utils.better_eval(call, locs)
145
146 target = "%s-%s" % (arch, os)
147
148 sitedata = []
149 if arch in archinfo:
150 sitedata.extend(archinfo[arch].split())
151 if os in osinfo:
152 sitedata.extend(osinfo[os].split())
153 if target in targetinfo:
154 sitedata.extend(targetinfo[target].split())
155 sitedata.append(target)
156 sitedata.append("common")
157
158 bb.debug(1, "SITE files %s" % sitedata);
159 return sitedata
160
161def siteinfo_data(d):
162 return siteinfo_data_for_machine(d.getVar("HOST_ARCH"), d.getVar("HOST_OS"), d)
163
164python () {
165 sitedata = set(siteinfo_data(d))
166 if "endian-little" in sitedata:
167 d.setVar("SITEINFO_ENDIANNESS", "le")
168 elif "endian-big" in sitedata:
169 d.setVar("SITEINFO_ENDIANNESS", "be")
170 else:
171 bb.error("Unable to determine endianness for architecture '%s'" %
172 d.getVar("HOST_ARCH"))
173 bb.fatal("Please add your architecture to siteinfo.bbclass")
174
175 if "bit-32" in sitedata:
176 d.setVar("SITEINFO_BITS", "32")
177 elif "bit-64" in sitedata:
178 d.setVar("SITEINFO_BITS", "64")
179 else:
180 bb.error("Unable to determine bit size for architecture '%s'" %
181 d.getVar("HOST_ARCH"))
182 bb.fatal("Please add your architecture to siteinfo.bbclass")
183}
184
185# Layers with siteconfig need to add a replacement path to this variable so the
186# sstate isn't path specific
187SITEINFO_PATHVARS = "COREBASE"
188
189def siteinfo_get_files(d, sysrootcache=False):
190 sitedata = siteinfo_data(d)
191 sitefiles = []
192 searched = []
193 for path in d.getVar("BBPATH").split(":"):
194 for element in sitedata:
195 filename = os.path.join(path, "site", element)
196 if os.path.exists(filename):
197 searched.append(filename + ":True")
198 sitefiles.append(filename)
199 else:
200 searched.append(filename + ":False")
201
202 # Have to parameterise out hardcoded paths such as COREBASE for the main site files
203 for var in d.getVar("SITEINFO_PATHVARS").split():
204 searched2 = []
205 replace = os.path.normpath(d.getVar(var))
206 for s in searched:
207 searched2.append(s.replace(replace, "${" + var + "}"))
208 searched = searched2
209
210 if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross', d) or bb.data.inherits_class('crosssdk', d):
211 # We need sstate sigs for native/cross not to vary upon arch so we can't depend on the site files.
212 # In future we may want to depend upon all site files?
213 # This would show up as breaking sstatetests.SStateTests.test_sstate_32_64_same_hash for example
214 searched = []
215
216 if not sysrootcache:
217 return sitefiles, searched
218
219 # Now check for siteconfig cache files in sysroots
220 path_siteconfig = d.getVar('SITECONFIG_SYSROOTCACHE')
221 if path_siteconfig and os.path.isdir(path_siteconfig):
222 for i in os.listdir(path_siteconfig):
223 if not i.endswith("_config"):
224 continue
225 filename = os.path.join(path_siteconfig, i)
226 sitefiles.append(filename)
227 return sitefiles, searched
228
229#
230# Make some information available via variables
231#
232SITECONFIG_SYSROOTCACHE = "${STAGING_DATADIR}/${TARGET_SYS}_config_site.d"
diff --git a/meta/classes-recipe/syslinux.bbclass b/meta/classes-recipe/syslinux.bbclass
new file mode 100644
index 0000000000..be3b898b4d
--- /dev/null
+++ b/meta/classes-recipe/syslinux.bbclass
@@ -0,0 +1,194 @@
1# syslinux.bbclass
2# Copyright (C) 2004-2006, Advanced Micro Devices, Inc.
3# SPDX-License-Identifier: MIT
4
5# Provide syslinux specific functions for building bootable images.
6
7# External variables
8# ${INITRD} - indicates a list of filesystem images to concatenate and use as an initrd (optional)
9# ${ROOTFS} - indicates a filesystem image to include as the root filesystem (optional)
10# ${AUTO_SYSLINUXMENU} - set this to 1 to enable creating an automatic menu
11# ${LABELS} - a list of targets for the automatic config
12# ${APPEND} - an override list of append strings for each label
13# ${SYSLINUX_OPTS} - additional options to add to the syslinux file ';' delimited
14# ${SYSLINUX_SPLASH} - A background for the vga boot menu if using the boot menu
15# ${SYSLINUX_DEFAULT_CONSOLE} - set to "console=ttyX" to change kernel boot default console
16# ${SYSLINUX_SERIAL} - Set an alternate serial port or turn off serial with empty string
17# ${SYSLINUX_SERIAL_TTY} - Set alternate console=tty... kernel boot argument
18# ${SYSLINUX_KERNEL_ARGS} - Add additional kernel arguments
19
20do_bootimg[depends] += "${MLPREFIX}syslinux:do_populate_sysroot \
21 syslinux-native:do_populate_sysroot"
22
23ISOLINUXDIR ?= "/isolinux"
24SYSLINUXDIR = "/"
25# The kernel has an internal default console, which you can override with
26# a console=...some_tty...
27SYSLINUX_DEFAULT_CONSOLE ?= ""
28SYSLINUX_SERIAL ?= "0 115200"
29SYSLINUX_SERIAL_TTY ?= "console=ttyS0,115200"
30SYSLINUX_PROMPT ?= "0"
31SYSLINUX_TIMEOUT ?= "50"
32AUTO_SYSLINUXMENU ?= "1"
33SYSLINUX_ALLOWOPTIONS ?= "1"
34SYSLINUX_ROOT ?= "${ROOT}"
35SYSLINUX_CFG_VM ?= "${S}/syslinux_vm.cfg"
36SYSLINUX_CFG_LIVE ?= "${S}/syslinux_live.cfg"
37APPEND ?= ""
38
39# Need UUID utility code.
40inherit fs-uuid
41
42syslinux_populate() {
43 DEST=$1
44 BOOTDIR=$2
45 CFGNAME=$3
46
47 install -d ${DEST}${BOOTDIR}
48
49 # Install the config files
50 install -m 0644 ${SYSLINUX_CFG} ${DEST}${BOOTDIR}/${CFGNAME}
51 if [ "${AUTO_SYSLINUXMENU}" = 1 ] ; then
52 install -m 0644 ${STAGING_DATADIR}/syslinux/vesamenu.c32 ${DEST}${BOOTDIR}/vesamenu.c32
53 install -m 0444 ${STAGING_DATADIR}/syslinux/libcom32.c32 ${DEST}${BOOTDIR}/libcom32.c32
54 install -m 0444 ${STAGING_DATADIR}/syslinux/libutil.c32 ${DEST}${BOOTDIR}/libutil.c32
55 if [ "${SYSLINUX_SPLASH}" != "" ] ; then
56 install -m 0644 ${SYSLINUX_SPLASH} ${DEST}${BOOTDIR}/splash.lss
57 fi
58 fi
59}
60
61syslinux_iso_populate() {
62 iso_dir=$1
63 syslinux_populate $iso_dir ${ISOLINUXDIR} isolinux.cfg
64 install -m 0644 ${STAGING_DATADIR}/syslinux/isolinux.bin $iso_dir${ISOLINUXDIR}
65 install -m 0644 ${STAGING_DATADIR}/syslinux/ldlinux.c32 $iso_dir${ISOLINUXDIR}
66}
67
68syslinux_hddimg_populate() {
69 hdd_dir=$1
70 syslinux_populate $hdd_dir ${SYSLINUXDIR} syslinux.cfg
71 install -m 0444 ${STAGING_DATADIR}/syslinux/ldlinux.sys $hdd_dir${SYSLINUXDIR}/ldlinux.sys
72}
73
74syslinux_hddimg_install() {
75 syslinux ${IMGDEPLOYDIR}/${IMAGE_NAME}.hddimg
76}
77
78python build_syslinux_cfg () {
79 import copy
80 import sys
81
82 workdir = d.getVar('WORKDIR')
83 if not workdir:
84 bb.error("WORKDIR not defined, unable to package")
85 return
86
87 labels = d.getVar('LABELS')
88 if not labels:
89 bb.debug(1, "LABELS not defined, nothing to do")
90 return
91
92 if labels == []:
93 bb.debug(1, "No labels, nothing to do")
94 return
95
96 cfile = d.getVar('SYSLINUX_CFG')
97 if not cfile:
98 bb.fatal('Unable to read SYSLINUX_CFG')
99
100 try:
101 cfgfile = open(cfile, 'w')
102 except OSError:
103 bb.fatal('Unable to open %s' % cfile)
104
105 cfgfile.write('# Automatically created by OE\n')
106
107 opts = d.getVar('SYSLINUX_OPTS')
108
109 if opts:
110 for opt in opts.split(';'):
111 cfgfile.write('%s\n' % opt)
112
113 allowoptions = d.getVar('SYSLINUX_ALLOWOPTIONS')
114 if allowoptions:
115 cfgfile.write('ALLOWOPTIONS %s\n' % allowoptions)
116 else:
117 cfgfile.write('ALLOWOPTIONS 1\n')
118
119 syslinux_default_console = d.getVar('SYSLINUX_DEFAULT_CONSOLE')
120 syslinux_serial_tty = d.getVar('SYSLINUX_SERIAL_TTY')
121 syslinux_serial = d.getVar('SYSLINUX_SERIAL')
122 if syslinux_serial:
123 cfgfile.write('SERIAL %s\n' % syslinux_serial)
124
125 menu = (d.getVar('AUTO_SYSLINUXMENU') == "1")
126
127 if menu and syslinux_serial:
128 cfgfile.write('DEFAULT Graphics console %s\n' % (labels.split()[0]))
129 else:
130 cfgfile.write('DEFAULT %s\n' % (labels.split()[0]))
131
132 timeout = d.getVar('SYSLINUX_TIMEOUT')
133
134 if timeout:
135 cfgfile.write('TIMEOUT %s\n' % timeout)
136 else:
137 cfgfile.write('TIMEOUT 50\n')
138
139 prompt = d.getVar('SYSLINUX_PROMPT')
140 if prompt:
141 cfgfile.write('PROMPT %s\n' % prompt)
142 else:
143 cfgfile.write('PROMPT 1\n')
144
145 if menu:
146 cfgfile.write('ui vesamenu.c32\n')
147 cfgfile.write('menu title Select kernel options and boot kernel\n')
148 cfgfile.write('menu tabmsg Press [Tab] to edit, [Return] to select\n')
149 splash = d.getVar('SYSLINUX_SPLASH')
150 if splash:
151 cfgfile.write('menu background splash.lss\n')
152
153 for label in labels.split():
154 localdata = bb.data.createCopy(d)
155
156 overrides = localdata.getVar('OVERRIDES')
157 if not overrides:
158 bb.fatal('OVERRIDES not defined')
159
160 localdata.setVar('OVERRIDES', label + ':' + overrides)
161
162 btypes = [ [ "", syslinux_default_console ] ]
163 if menu and syslinux_serial:
164 btypes = [ [ "Graphics console ", syslinux_default_console ],
165 [ "Serial console ", syslinux_serial_tty ] ]
166
167 root= d.getVar('SYSLINUX_ROOT')
168 if not root:
169 bb.fatal('SYSLINUX_ROOT not defined')
170
171 kernel = localdata.getVar('KERNEL_IMAGETYPE')
172 for btype in btypes:
173 cfgfile.write('LABEL %s%s\nKERNEL /%s\n' % (btype[0], label, kernel))
174
175 exargs = d.getVar('SYSLINUX_KERNEL_ARGS')
176 if exargs:
177 btype[1] += " " + exargs
178
179 append = localdata.getVar('APPEND')
180 initrd = localdata.getVar('INITRD')
181
182 append = root + " " + append
183 cfgfile.write('APPEND ')
184
185 if initrd:
186 cfgfile.write('initrd=/initrd ')
187
188 cfgfile.write('LABEL=%s '% (label))
189 append = replace_rootfs_uuid(d, append)
190 cfgfile.write('%s %s\n' % (append, btype[1]))
191
192 cfgfile.close()
193}
194build_syslinux_cfg[dirs] = "${S}"
diff --git a/meta/classes-recipe/systemd-boot-cfg.bbclass b/meta/classes-recipe/systemd-boot-cfg.bbclass
new file mode 100644
index 0000000000..366dd23738
--- /dev/null
+++ b/meta/classes-recipe/systemd-boot-cfg.bbclass
@@ -0,0 +1,77 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7SYSTEMD_BOOT_CFG ?= "${S}/loader.conf"
8SYSTEMD_BOOT_ENTRIES ?= ""
9SYSTEMD_BOOT_TIMEOUT ?= "10"
10
11# Uses MACHINE specific KERNEL_IMAGETYPE
12PACKAGE_ARCH = "${MACHINE_ARCH}"
13
14# Need UUID utility code.
15inherit fs-uuid
16
17python build_efi_cfg() {
18 s = d.getVar("S")
19 labels = d.getVar('LABELS')
20 if not labels:
21 bb.debug(1, "LABELS not defined, nothing to do")
22 return
23
24 if labels == []:
25 bb.debug(1, "No labels, nothing to do")
26 return
27
28 cfile = d.getVar('SYSTEMD_BOOT_CFG')
29 cdir = os.path.dirname(cfile)
30 if not os.path.exists(cdir):
31 os.makedirs(cdir)
32 try:
33 cfgfile = open(cfile, 'w')
34 except OSError:
35 bb.fatal('Unable to open %s' % cfile)
36
37 cfgfile.write('# Automatically created by OE\n')
38 cfgfile.write('default %s\n' % (labels.split()[0]))
39 timeout = d.getVar('SYSTEMD_BOOT_TIMEOUT')
40 if timeout:
41 cfgfile.write('timeout %s\n' % timeout)
42 else:
43 cfgfile.write('timeout 10\n')
44 cfgfile.close()
45
46 for label in labels.split():
47 localdata = d.createCopy()
48
49 entryfile = "%s/%s.conf" % (s, label)
50 if not os.path.exists(s):
51 os.makedirs(s)
52 d.appendVar("SYSTEMD_BOOT_ENTRIES", " " + entryfile)
53 try:
54 entrycfg = open(entryfile, "w")
55 except OSError:
56 bb.fatal('Unable to open %s' % entryfile)
57
58 entrycfg.write('title %s\n' % label)
59
60 kernel = localdata.getVar("KERNEL_IMAGETYPE")
61 entrycfg.write('linux /%s\n' % kernel)
62
63 append = localdata.getVar('APPEND')
64 initrd = localdata.getVar('INITRD')
65
66 if initrd:
67 entrycfg.write('initrd /initrd\n')
68 lb = label
69 if label == "install":
70 lb = "install-efi"
71 entrycfg.write('options LABEL=%s ' % lb)
72 if append:
73 append = replace_rootfs_uuid(d, append)
74 entrycfg.write('%s' % append)
75 entrycfg.write('\n')
76 entrycfg.close()
77}
diff --git a/meta/classes-recipe/systemd-boot.bbclass b/meta/classes-recipe/systemd-boot.bbclass
new file mode 100644
index 0000000000..5aa32dd997
--- /dev/null
+++ b/meta/classes-recipe/systemd-boot.bbclass
@@ -0,0 +1,35 @@
1# Copyright (C) 2016 Intel Corporation
2#
3# SPDX-License-Identifier: MIT
4
5# systemd-boot.bbclass - The "systemd-boot" is essentially the gummiboot merged into systemd.
6# The original standalone gummiboot project is dead without any more
7# maintenance.
8#
9# Set EFI_PROVIDER = "systemd-boot" to use systemd-boot on your live images instead of grub-efi
10# (images built by image-live.bbclass)
11
12do_bootimg[depends] += "${MLPREFIX}systemd-boot:do_deploy"
13
14require conf/image-uefi.conf
15# Need UUID utility code.
16inherit fs-uuid
17
18efi_populate() {
19 efi_populate_common "$1" systemd
20
21 # systemd-boot requires these paths for configuration files
22 # they are not customizable so no point in new vars
23 install -d ${DEST}/loader
24 install -d ${DEST}/loader/entries
25 install -m 0644 ${SYSTEMD_BOOT_CFG} ${DEST}/loader/loader.conf
26 for i in ${SYSTEMD_BOOT_ENTRIES}; do
27 install -m 0644 ${i} ${DEST}/loader/entries
28 done
29}
30
31efi_iso_populate:append() {
32 cp -r $iso_dir/loader ${EFIIMGDIR}
33}
34
35inherit systemd-boot-cfg
diff --git a/meta/classes-recipe/systemd.bbclass b/meta/classes-recipe/systemd.bbclass
new file mode 100644
index 0000000000..f6564c2b31
--- /dev/null
+++ b/meta/classes-recipe/systemd.bbclass
@@ -0,0 +1,239 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# The list of packages that should have systemd packaging scripts added. For
8# each entry, optionally have a SYSTEMD_SERVICE:[package] that lists the service
9# files in this package. If this variable isn't set, [package].service is used.
10SYSTEMD_PACKAGES ?= "${PN}"
11SYSTEMD_PACKAGES:class-native ?= ""
12SYSTEMD_PACKAGES:class-nativesdk ?= ""
13
14# Whether to enable or disable the services on installation.
15SYSTEMD_AUTO_ENABLE ??= "enable"
16
17# This class will be included in any recipe that supports systemd init scripts,
18# even if systemd is not in DISTRO_FEATURES. As such don't make any changes
19# directly but check the DISTRO_FEATURES first.
20python __anonymous() {
21 # If the distro features have systemd but not sysvinit, inhibit update-rcd
22 # from doing any work so that pure-systemd images don't have redundant init
23 # files.
24 if bb.utils.contains('DISTRO_FEATURES', 'systemd', True, False, d):
25 d.appendVar("DEPENDS", " systemd-systemctl-native")
26 d.appendVar("PACKAGE_WRITE_DEPS", " systemd-systemctl-native")
27 if not bb.utils.contains('DISTRO_FEATURES', 'sysvinit', True, False, d):
28 d.setVar("INHIBIT_UPDATERCD_BBCLASS", "1")
29}
30
31systemd_postinst() {
32if systemctl >/dev/null 2>/dev/null; then
33 OPTS=""
34
35 if [ -n "$D" ]; then
36 OPTS="--root=$D"
37 fi
38
39 if [ "${SYSTEMD_AUTO_ENABLE}" = "enable" ]; then
40 for service in ${SYSTEMD_SERVICE_ESCAPED}; do
41 systemctl ${OPTS} enable "$service"
42 done
43 fi
44
45 if [ -z "$D" ]; then
46 systemctl daemon-reload
47 systemctl preset ${SYSTEMD_SERVICE_ESCAPED}
48
49 if [ "${SYSTEMD_AUTO_ENABLE}" = "enable" ]; then
50 systemctl --no-block restart ${SYSTEMD_SERVICE_ESCAPED}
51 fi
52 fi
53fi
54}
55
56systemd_prerm() {
57if systemctl >/dev/null 2>/dev/null; then
58 if [ -z "$D" ]; then
59 systemctl stop ${SYSTEMD_SERVICE_ESCAPED}
60
61 systemctl disable ${SYSTEMD_SERVICE_ESCAPED}
62 fi
63fi
64}
65
66
67systemd_populate_packages[vardeps] += "systemd_prerm systemd_postinst"
68systemd_populate_packages[vardepsexclude] += "OVERRIDES"
69
70
71python systemd_populate_packages() {
72 import re
73 import shlex
74
75 if not bb.utils.contains('DISTRO_FEATURES', 'systemd', True, False, d):
76 return
77
78 def get_package_var(d, var, pkg):
79 val = (d.getVar('%s:%s' % (var, pkg)) or "").strip()
80 if val == "":
81 val = (d.getVar(var) or "").strip()
82 return val
83
84 # Check if systemd-packages already included in PACKAGES
85 def systemd_check_package(pkg_systemd):
86 packages = d.getVar('PACKAGES')
87 if not pkg_systemd in packages.split():
88 bb.error('%s does not appear in package list, please add it' % pkg_systemd)
89
90
91 def systemd_generate_package_scripts(pkg):
92 bb.debug(1, 'adding systemd calls to postinst/postrm for %s' % pkg)
93
94 paths_escaped = ' '.join(shlex.quote(s) for s in d.getVar('SYSTEMD_SERVICE:' + pkg).split())
95 d.setVar('SYSTEMD_SERVICE_ESCAPED:' + pkg, paths_escaped)
96
97 # Add pkg to the overrides so that it finds the SYSTEMD_SERVICE:pkg
98 # variable.
99 localdata = d.createCopy()
100 localdata.prependVar("OVERRIDES", pkg + ":")
101
102 postinst = d.getVar('pkg_postinst:%s' % pkg)
103 if not postinst:
104 postinst = '#!/bin/sh\n'
105 postinst += localdata.getVar('systemd_postinst')
106 d.setVar('pkg_postinst:%s' % pkg, postinst)
107
108 prerm = d.getVar('pkg_prerm:%s' % pkg)
109 if not prerm:
110 prerm = '#!/bin/sh\n'
111 prerm += localdata.getVar('systemd_prerm')
112 d.setVar('pkg_prerm:%s' % pkg, prerm)
113
114
115 # Add files to FILES:*-systemd if existent and not already done
116 def systemd_append_file(pkg_systemd, file_append):
117 appended = False
118 if os.path.exists(oe.path.join(d.getVar("D"), file_append)):
119 var_name = "FILES:" + pkg_systemd
120 files = d.getVar(var_name, False) or ""
121 if file_append not in files.split():
122 d.appendVar(var_name, " " + file_append)
123 appended = True
124 return appended
125
126 # Add systemd files to FILES:*-systemd, parse for Also= and follow recursive
127 def systemd_add_files_and_parse(pkg_systemd, path, service, keys):
128 # avoid infinite recursion
129 if systemd_append_file(pkg_systemd, oe.path.join(path, service)):
130 fullpath = oe.path.join(d.getVar("D"), path, service)
131 if service.find('.service') != -1:
132 # for *.service add *@.service
133 service_base = service.replace('.service', '')
134 systemd_add_files_and_parse(pkg_systemd, path, service_base + '@.service', keys)
135 if service.find('.socket') != -1:
136 # for *.socket add *.service and *@.service
137 service_base = service.replace('.socket', '')
138 systemd_add_files_and_parse(pkg_systemd, path, service_base + '.service', keys)
139 systemd_add_files_and_parse(pkg_systemd, path, service_base + '@.service', keys)
140 for key in keys.split():
141 # recurse all dependencies found in keys ('Also';'Conflicts';..) and add to files
142 cmd = "grep %s %s | sed 's,%s=,,g' | tr ',' '\\n'" % (key, shlex.quote(fullpath), key)
143 pipe = os.popen(cmd, 'r')
144 line = pipe.readline()
145 while line:
146 line = line.replace('\n', '')
147 systemd_add_files_and_parse(pkg_systemd, path, line, keys)
148 line = pipe.readline()
149 pipe.close()
150
151 # Check service-files and call systemd_add_files_and_parse for each entry
152 def systemd_check_services():
153 searchpaths = [oe.path.join(d.getVar("sysconfdir"), "systemd", "system"),]
154 searchpaths.append(d.getVar("systemd_system_unitdir"))
155 systemd_packages = d.getVar('SYSTEMD_PACKAGES')
156
157 keys = 'Also'
158 # scan for all in SYSTEMD_SERVICE[]
159 for pkg_systemd in systemd_packages.split():
160 for service in get_package_var(d, 'SYSTEMD_SERVICE', pkg_systemd).split():
161 path_found = ''
162
163 # Deal with adding, for example, 'ifplugd@eth0.service' from
164 # 'ifplugd@.service'
165 base = None
166 at = service.find('@')
167 if at != -1:
168 ext = service.rfind('.')
169 base = service[:at] + '@' + service[ext:]
170
171 for path in searchpaths:
172 if os.path.exists(oe.path.join(d.getVar("D"), path, service)):
173 path_found = path
174 break
175 elif base is not None:
176 if os.path.exists(oe.path.join(d.getVar("D"), path, base)):
177 path_found = path
178 break
179
180 if path_found != '':
181 systemd_add_files_and_parse(pkg_systemd, path_found, service, keys)
182 else:
183 bb.fatal("Didn't find service unit '{0}', specified in SYSTEMD_SERVICE:{1}. {2}".format(
184 service, pkg_systemd, "Also looked for service unit '{0}'.".format(base) if base is not None else ""))
185
186 def systemd_create_presets(pkg, action):
187 presetf = oe.path.join(d.getVar("PKGD"), d.getVar("systemd_unitdir"), "system-preset/98-%s.preset" % pkg)
188 bb.utils.mkdirhier(os.path.dirname(presetf))
189 with open(presetf, 'a') as fd:
190 for service in d.getVar('SYSTEMD_SERVICE:%s' % pkg).split():
191 fd.write("%s %s\n" % (action,service))
192 d.appendVar("FILES:%s" % pkg, ' ' + oe.path.join(d.getVar("systemd_unitdir"), "system-preset/98-%s.preset" % pkg))
193
194 # Run all modifications once when creating package
195 if os.path.exists(d.getVar("D")):
196 for pkg in d.getVar('SYSTEMD_PACKAGES').split():
197 systemd_check_package(pkg)
198 if d.getVar('SYSTEMD_SERVICE:' + pkg):
199 systemd_generate_package_scripts(pkg)
200 action = get_package_var(d, 'SYSTEMD_AUTO_ENABLE', pkg)
201 if action in ("enable", "disable"):
202 systemd_create_presets(pkg, action)
203 elif action not in ("mask", "preset"):
204 bb.fatal("SYSTEMD_AUTO_ENABLE:%s '%s' is not 'enable', 'disable', 'mask' or 'preset'" % (pkg, action))
205 systemd_check_services()
206}
207
208PACKAGESPLITFUNCS:prepend = "systemd_populate_packages "
209
210python rm_systemd_unitdir (){
211 import shutil
212 if not bb.utils.contains('DISTRO_FEATURES', 'systemd', True, False, d):
213 systemd_unitdir = oe.path.join(d.getVar("D"), d.getVar('systemd_unitdir'))
214 if os.path.exists(systemd_unitdir):
215 shutil.rmtree(systemd_unitdir)
216 systemd_libdir = os.path.dirname(systemd_unitdir)
217 if (os.path.exists(systemd_libdir) and not os.listdir(systemd_libdir)):
218 os.rmdir(systemd_libdir)
219}
220
221python rm_sysvinit_initddir (){
222 import shutil
223 sysv_initddir = oe.path.join(d.getVar("D"), (d.getVar('INIT_D_DIR') or "/etc/init.d"))
224
225 if bb.utils.contains('DISTRO_FEATURES', 'systemd', True, False, d) and \
226 not bb.utils.contains('DISTRO_FEATURES', 'sysvinit', True, False, d) and \
227 os.path.exists(sysv_initddir):
228 systemd_system_unitdir = oe.path.join(d.getVar("D"), d.getVar('systemd_system_unitdir'))
229
230 # If systemd_system_unitdir contains anything, delete sysv_initddir
231 if (os.path.exists(systemd_system_unitdir) and os.listdir(systemd_system_unitdir)):
232 shutil.rmtree(sysv_initddir)
233}
234
235do_install[postfuncs] += "${RMINITDIR} "
236RMINITDIR:class-target = " rm_sysvinit_initddir rm_systemd_unitdir "
237RMINITDIR:class-nativesdk = " rm_sysvinit_initddir rm_systemd_unitdir "
238RMINITDIR = ""
239
diff --git a/meta/classes-recipe/testimage.bbclass b/meta/classes-recipe/testimage.bbclass
new file mode 100644
index 0000000000..8d2fab21df
--- /dev/null
+++ b/meta/classes-recipe/testimage.bbclass
@@ -0,0 +1,508 @@
1# Copyright (C) 2013 Intel Corporation
2#
3# SPDX-License-Identifier: MIT
4
5inherit metadata_scm
6inherit image-artifact-names
7
8# testimage.bbclass enables testing of qemu images using python unittests.
9# Most of the tests are commands run on target image over ssh.
10# To use it add testimage to global inherit and call your target image with -c testimage
11# You can try it out like this:
12# - first add IMAGE_CLASSES += "testimage" in local.conf
13# - build a qemu core-image-sato
14# - then bitbake core-image-sato -c testimage. That will run a standard suite of tests.
15#
16# The tests can be run automatically each time an image is built if you set
17# TESTIMAGE_AUTO = "1"
18
19TESTIMAGE_AUTO ??= "0"
20
21# You can set (or append to) TEST_SUITES in local.conf to select the tests
22# which you want to run for your target.
23# The test names are the module names in meta/lib/oeqa/runtime/cases.
24# Each name in TEST_SUITES represents a required test for the image. (no skipping allowed)
25# Appending "auto" means that it will try to run all tests that are suitable for the image (each test decides that on it's own).
26# Note that order in TEST_SUITES is relevant: tests are run in an order such that
27# tests mentioned in @skipUnlessPassed run before the tests that depend on them,
28# but without such dependencies, tests run in the order in which they are listed
29# in TEST_SUITES.
30#
31# A layer can add its own tests in lib/oeqa/runtime, provided it extends BBPATH as normal in its layer.conf.
32
33# TEST_LOG_DIR contains a command ssh log and may contain infromation about what command is running, output and return codes and for qemu a boot log till login.
34# Booting is handled by this class, and it's not a test in itself.
35# TEST_QEMUBOOT_TIMEOUT can be used to set the maximum time in seconds the launch code will wait for the login prompt.
36# TEST_OVERALL_TIMEOUT can be used to set the maximum time in seconds the tests will be allowed to run (defaults to no limit).
37# TEST_QEMUPARAMS can be used to pass extra parameters to qemu, e.g. "-m 1024" for setting the amount of ram to 1 GB.
38# TEST_RUNQEMUPARAMS can be used to pass extra parameters to runqemu, e.g. "gl" to enable OpenGL acceleration.
39# QEMU_USE_KVM can be set to "" to disable the use of kvm (by default it is enabled if target_arch == build_arch or both of them are x86 archs)
40
41# TESTIMAGE_BOOT_PATTERNS can be used to override certain patterns used to communicate with the target when booting,
42# if a pattern is not specifically present on this variable a default will be used when booting the target.
43# TESTIMAGE_BOOT_PATTERNS[<flag>] overrides the pattern used for that specific flag, where flag comes from a list of accepted flags
44# e.g. normally the system boots and waits for a login prompt (login:), after that it sends the command: "root\n" to log as the root user
45# if we wanted to log in as the hypothetical "webserver" user for example we could set the following:
46# TESTIMAGE_BOOT_PATTERNS = "send_login_user search_login_succeeded"
47# TESTIMAGE_BOOT_PATTERNS[send_login_user] = "webserver\n"
48# TESTIMAGE_BOOT_PATTERNS[search_login_succeeded] = "webserver@[a-zA-Z0-9\-]+:~#"
49# The accepted flags are the following: search_reached_prompt, send_login_user, search_login_succeeded, search_cmd_finished.
50# They are prefixed with either search/send, to differentiate if the pattern is meant to be sent or searched to/from the target terminal
51
52TEST_LOG_DIR ?= "${WORKDIR}/testimage"
53
54TEST_EXPORT_DIR ?= "${TMPDIR}/testimage/${PN}"
55TEST_INSTALL_TMP_DIR ?= "${WORKDIR}/testimage/install_tmp"
56TEST_NEEDED_PACKAGES_DIR ?= "${WORKDIR}/testimage/packages"
57TEST_EXTRACTED_DIR ?= "${TEST_NEEDED_PACKAGES_DIR}/extracted"
58TEST_PACKAGED_DIR ?= "${TEST_NEEDED_PACKAGES_DIR}/packaged"
59
60BASICTESTSUITE = "\
61 ping date df ssh scp python perl gi ptest parselogs \
62 logrotate connman systemd oe_syslog pam stap ldd xorg \
63 kernelmodule gcc buildcpio buildlzip buildgalculator \
64 dnf rpm opkg apt weston go rust"
65
66DEFAULT_TEST_SUITES = "${BASICTESTSUITE}"
67
68# musl doesn't support systemtap
69DEFAULT_TEST_SUITES:remove:libc-musl = "stap"
70
71# qemumips is quite slow and has reached the timeout limit several times on the YP build cluster,
72# mitigate this by removing build tests for qemumips machines.
73MIPSREMOVE ??= "buildcpio buildlzip buildgalculator"
74DEFAULT_TEST_SUITES:remove:qemumips = "${MIPSREMOVE}"
75DEFAULT_TEST_SUITES:remove:qemumips64 = "${MIPSREMOVE}"
76
77TEST_SUITES ?= "${DEFAULT_TEST_SUITES}"
78
79QEMU_USE_KVM ?= "1"
80TEST_QEMUBOOT_TIMEOUT ?= "1000"
81TEST_OVERALL_TIMEOUT ?= ""
82TEST_TARGET ?= "qemu"
83TEST_QEMUPARAMS ?= ""
84TEST_RUNQEMUPARAMS ?= ""
85
86TESTIMAGE_BOOT_PATTERNS ?= ""
87
88TESTIMAGEDEPENDS = ""
89TESTIMAGEDEPENDS:append:qemuall = " qemu-native:do_populate_sysroot qemu-helper-native:do_populate_sysroot qemu-helper-native:do_addto_recipe_sysroot"
90TESTIMAGEDEPENDS += "${@bb.utils.contains('IMAGE_PKGTYPE', 'rpm', 'cpio-native:do_populate_sysroot', '', d)}"
91TESTIMAGEDEPENDS += "${@bb.utils.contains('IMAGE_PKGTYPE', 'rpm', 'dnf-native:do_populate_sysroot', '', d)}"
92TESTIMAGEDEPENDS += "${@bb.utils.contains('IMAGE_PKGTYPE', 'rpm', 'createrepo-c-native:do_populate_sysroot', '', d)}"
93TESTIMAGEDEPENDS += "${@bb.utils.contains('IMAGE_PKGTYPE', 'ipk', 'opkg-utils-native:do_populate_sysroot package-index:do_package_index', '', d)}"
94TESTIMAGEDEPENDS += "${@bb.utils.contains('IMAGE_PKGTYPE', 'deb', 'apt-native:do_populate_sysroot package-index:do_package_index', '', d)}"
95
96TESTIMAGELOCK = "${TMPDIR}/testimage.lock"
97TESTIMAGELOCK:qemuall = ""
98
99TESTIMAGE_DUMP_DIR ?= "${LOG_DIR}/runtime-hostdump/"
100
101TESTIMAGE_UPDATE_VARS ?= "DL_DIR WORKDIR DEPLOY_DIR"
102
103testimage_dump_target () {
104 top -bn1
105 ps
106 free
107 df
108 # The next command will export the default gateway IP
109 export DEFAULT_GATEWAY=$(ip route | awk '/default/ { print $3}')
110 ping -c3 $DEFAULT_GATEWAY
111 dmesg
112 netstat -an
113 ip address
114 # Next command will dump logs from /var/log/
115 find /var/log/ -type f 2>/dev/null -exec echo "====================" \; -exec echo {} \; -exec echo "====================" \; -exec cat {} \; -exec echo "" \;
116}
117
118testimage_dump_host () {
119 top -bn1
120 iostat -x -z -N -d -p ALL 20 2
121 ps -ef
122 free
123 df
124 memstat
125 dmesg
126 ip -s link
127 netstat -an
128}
129
130testimage_dump_monitor () {
131 query-status
132 query-block
133 dump-guest-memory {"paging":false,"protocol":"file:%s.img"}
134}
135
136python do_testimage() {
137 testimage_main(d)
138}
139
140addtask testimage
141do_testimage[nostamp] = "1"
142do_testimage[network] = "1"
143do_testimage[depends] += "${TESTIMAGEDEPENDS}"
144do_testimage[lockfiles] += "${TESTIMAGELOCK}"
145
146def testimage_sanity(d):
147 if (d.getVar('TEST_TARGET') == 'simpleremote'
148 and (not d.getVar('TEST_TARGET_IP')
149 or not d.getVar('TEST_SERVER_IP'))):
150 bb.fatal('When TEST_TARGET is set to "simpleremote" '
151 'TEST_TARGET_IP and TEST_SERVER_IP are needed too.')
152
153def get_testimage_configuration(d, test_type, machine):
154 import platform
155 from oeqa.utils.metadata import get_layers
156 configuration = {'TEST_TYPE': test_type,
157 'MACHINE': machine,
158 'DISTRO': d.getVar("DISTRO"),
159 'IMAGE_BASENAME': d.getVar("IMAGE_BASENAME"),
160 'IMAGE_PKGTYPE': d.getVar("IMAGE_PKGTYPE"),
161 'STARTTIME': d.getVar("DATETIME"),
162 'HOST_DISTRO': oe.lsb.distro_identifier().replace(' ', '-'),
163 'LAYERS': get_layers(d.getVar("BBLAYERS"))}
164 return configuration
165get_testimage_configuration[vardepsexclude] = "DATETIME"
166
167def get_testimage_json_result_dir(d):
168 json_result_dir = os.path.join(d.getVar("LOG_DIR"), 'oeqa')
169 custom_json_result_dir = d.getVar("OEQA_JSON_RESULT_DIR")
170 if custom_json_result_dir:
171 json_result_dir = custom_json_result_dir
172 return json_result_dir
173
174def get_testimage_result_id(configuration):
175 return '%s_%s_%s_%s' % (configuration['TEST_TYPE'], configuration['IMAGE_BASENAME'], configuration['MACHINE'], configuration['STARTTIME'])
176
177def get_testimage_boot_patterns(d):
178 from collections import defaultdict
179 boot_patterns = defaultdict(str)
180 # Only accept certain values
181 accepted_patterns = ['search_reached_prompt', 'send_login_user', 'search_login_succeeded', 'search_cmd_finished']
182 # Not all patterns need to be overriden, e.g. perhaps we only want to change the user
183 boot_patterns_flags = d.getVarFlags('TESTIMAGE_BOOT_PATTERNS') or {}
184 if boot_patterns_flags:
185 patterns_set = [p for p in boot_patterns_flags.items() if p[0] in d.getVar('TESTIMAGE_BOOT_PATTERNS').split()]
186 for flag, flagval in patterns_set:
187 if flag not in accepted_patterns:
188 bb.fatal('Testimage: The only accepted boot patterns are: search_reached_prompt,send_login_user, \
189 search_login_succeeded,search_cmd_finished\n Make sure your TESTIMAGE_BOOT_PATTERNS=%s \
190 contains an accepted flag.' % d.getVar('TESTIMAGE_BOOT_PATTERNS'))
191 return
192 # We know boot prompt is searched through in binary format, others might be expressions
193 if flag == 'search_reached_prompt':
194 boot_patterns[flag] = flagval.encode()
195 else:
196 boot_patterns[flag] = flagval.encode().decode('unicode-escape')
197 return boot_patterns
198
199
200def testimage_main(d):
201 import os
202 import json
203 import signal
204 import logging
205 import shutil
206
207 from bb.utils import export_proxies
208 from oeqa.runtime.context import OERuntimeTestContext
209 from oeqa.runtime.context import OERuntimeTestContextExecutor
210 from oeqa.core.target.qemu import supported_fstypes
211 from oeqa.core.utils.test import getSuiteCases
212 from oeqa.utils import make_logger_bitbake_compatible
213
214 def sigterm_exception(signum, stackframe):
215 """
216 Catch SIGTERM from worker in order to stop qemu.
217 """
218 os.kill(os.getpid(), signal.SIGINT)
219
220 def handle_test_timeout(timeout):
221 bb.warn("Global test timeout reached (%s seconds), stopping the tests." %(timeout))
222 os.kill(os.getpid(), signal.SIGINT)
223
224 testimage_sanity(d)
225
226 if (d.getVar('IMAGE_PKGTYPE') == 'rpm'
227 and ('dnf' in d.getVar('TEST_SUITES') or 'auto' in d.getVar('TEST_SUITES'))):
228 create_rpm_index(d)
229
230 logger = make_logger_bitbake_compatible(logging.getLogger("BitBake"))
231 pn = d.getVar("PN")
232
233 bb.utils.mkdirhier(d.getVar("TEST_LOG_DIR"))
234
235 image_name = ("%s/%s" % (d.getVar('DEPLOY_DIR_IMAGE'),
236 d.getVar('IMAGE_LINK_NAME')))
237
238 tdname = "%s.testdata.json" % image_name
239 try:
240 with open(tdname, "r") as f:
241 td = json.load(f)
242 except FileNotFoundError as err:
243 bb.fatal('File %s not found (%s).\nHave you built the image with INHERIT += "testimage" in the conf/local.conf?' % (tdname, err))
244
245 # Some variables need to be updates (mostly paths) with the
246 # ones of the current environment because some tests require them.
247 for var in d.getVar('TESTIMAGE_UPDATE_VARS').split():
248 td[var] = d.getVar(var)
249
250 image_manifest = "%s.manifest" % image_name
251 image_packages = OERuntimeTestContextExecutor.readPackagesManifest(image_manifest)
252
253 extract_dir = d.getVar("TEST_EXTRACTED_DIR")
254
255 # Get machine
256 machine = d.getVar("MACHINE")
257
258 # Get rootfs
259 fstypes = d.getVar('IMAGE_FSTYPES').split()
260 if d.getVar("TEST_TARGET") == "qemu":
261 fstypes = [fs for fs in fstypes if fs in supported_fstypes]
262 if not fstypes:
263 bb.fatal('Unsupported image type built. Add a compatible image to '
264 'IMAGE_FSTYPES. Supported types: %s' %
265 ', '.join(supported_fstypes))
266 qfstype = fstypes[0]
267 qdeffstype = d.getVar("QB_DEFAULT_FSTYPE")
268 if qdeffstype:
269 qfstype = qdeffstype
270 rootfs = '%s.%s' % (image_name, qfstype)
271
272 # Get tmpdir (not really used, just for compatibility)
273 tmpdir = d.getVar("TMPDIR")
274
275 # Get deploy_dir_image (not really used, just for compatibility)
276 dir_image = d.getVar("DEPLOY_DIR_IMAGE")
277
278 # Get bootlog
279 bootlog = os.path.join(d.getVar("TEST_LOG_DIR"),
280 'qemu_boot_log.%s' % d.getVar('DATETIME'))
281
282 # Get display
283 display = d.getVar("BB_ORIGENV").getVar("DISPLAY")
284
285 # Get kernel
286 kernel_name = ('%s-%s.bin' % (d.getVar("KERNEL_IMAGETYPE"), machine))
287 kernel = os.path.join(d.getVar("DEPLOY_DIR_IMAGE"), kernel_name)
288
289 # Get boottime
290 boottime = int(d.getVar("TEST_QEMUBOOT_TIMEOUT"))
291
292 # Get use_kvm
293 kvm = oe.types.qemu_use_kvm(d.getVar('QEMU_USE_KVM'), d.getVar('TARGET_ARCH'))
294
295 # Get OVMF
296 ovmf = d.getVar("QEMU_USE_OVMF")
297
298 slirp = False
299 if d.getVar("QEMU_USE_SLIRP"):
300 slirp = True
301
302 # TODO: We use the current implementation of qemu runner because of
303 # time constrains, qemu runner really needs a refactor too.
304 target_kwargs = { 'machine' : machine,
305 'rootfs' : rootfs,
306 'tmpdir' : tmpdir,
307 'dir_image' : dir_image,
308 'display' : display,
309 'kernel' : kernel,
310 'boottime' : boottime,
311 'bootlog' : bootlog,
312 'kvm' : kvm,
313 'slirp' : slirp,
314 'dump_dir' : d.getVar("TESTIMAGE_DUMP_DIR"),
315 'serial_ports': len(d.getVar("SERIAL_CONSOLES").split()),
316 'ovmf' : ovmf,
317 'tmpfsdir' : d.getVar("RUNQEMU_TMPFS_DIR"),
318 }
319
320 if d.getVar("TESTIMAGE_BOOT_PATTERNS"):
321 target_kwargs['boot_patterns'] = get_testimage_boot_patterns(d)
322
323 # hardware controlled targets might need further access
324 target_kwargs['powercontrol_cmd'] = d.getVar("TEST_POWERCONTROL_CMD") or None
325 target_kwargs['powercontrol_extra_args'] = d.getVar("TEST_POWERCONTROL_EXTRA_ARGS") or ""
326 target_kwargs['serialcontrol_cmd'] = d.getVar("TEST_SERIALCONTROL_CMD") or None
327 target_kwargs['serialcontrol_extra_args'] = d.getVar("TEST_SERIALCONTROL_EXTRA_ARGS") or ""
328 target_kwargs['testimage_dump_monitor'] = d.getVar("testimage_dump_monitor") or ""
329 target_kwargs['testimage_dump_target'] = d.getVar("testimage_dump_target") or ""
330
331 def export_ssh_agent(d):
332 import os
333
334 variables = ['SSH_AGENT_PID', 'SSH_AUTH_SOCK']
335 for v in variables:
336 if v not in os.environ.keys():
337 val = d.getVar(v)
338 if val is not None:
339 os.environ[v] = val
340
341 export_ssh_agent(d)
342
343 # runtime use network for download projects for build
344 export_proxies(d)
345
346 # we need the host dumper in test context
347 host_dumper = OERuntimeTestContextExecutor.getHostDumper(
348 d.getVar("testimage_dump_host"),
349 d.getVar("TESTIMAGE_DUMP_DIR"))
350
351 # the robot dance
352 target = OERuntimeTestContextExecutor.getTarget(
353 d.getVar("TEST_TARGET"), logger, d.getVar("TEST_TARGET_IP"),
354 d.getVar("TEST_SERVER_IP"), **target_kwargs)
355
356 # test context
357 tc = OERuntimeTestContext(td, logger, target, host_dumper,
358 image_packages, extract_dir)
359
360 # Load tests before starting the target
361 test_paths = get_runtime_paths(d)
362 test_modules = d.getVar('TEST_SUITES').split()
363 if not test_modules:
364 bb.fatal('Empty test suite, please verify TEST_SUITES variable')
365
366 tc.loadTests(test_paths, modules=test_modules)
367
368 suitecases = getSuiteCases(tc.suites)
369 if not suitecases:
370 bb.fatal('Empty test suite, please verify TEST_SUITES variable')
371 else:
372 bb.debug(2, 'test suites:\n\t%s' % '\n\t'.join([str(c) for c in suitecases]))
373
374 package_extraction(d, tc.suites)
375
376 results = None
377 complete = False
378 orig_sigterm_handler = signal.signal(signal.SIGTERM, sigterm_exception)
379 try:
380 # We need to check if runqemu ends unexpectedly
381 # or if the worker send us a SIGTERM
382 tc.target.start(params=d.getVar("TEST_QEMUPARAMS"), runqemuparams=d.getVar("TEST_RUNQEMUPARAMS"))
383 import threading
384 try:
385 threading.Timer(int(d.getVar("TEST_OVERALL_TIMEOUT")), handle_test_timeout, (int(d.getVar("TEST_OVERALL_TIMEOUT")),)).start()
386 except ValueError:
387 pass
388 results = tc.runTests()
389 complete = True
390 except (KeyboardInterrupt, BlockingIOError) as err:
391 if isinstance(err, KeyboardInterrupt):
392 bb.error('testimage interrupted, shutting down...')
393 else:
394 bb.error('runqemu failed, shutting down...')
395 if results:
396 results.stop()
397 results = tc.results
398 finally:
399 signal.signal(signal.SIGTERM, orig_sigterm_handler)
400 tc.target.stop()
401
402 # Show results (if we have them)
403 if results:
404 configuration = get_testimage_configuration(d, 'runtime', machine)
405 results.logDetails(get_testimage_json_result_dir(d),
406 configuration,
407 get_testimage_result_id(configuration),
408 dump_streams=d.getVar('TESTREPORT_FULLLOGS'))
409 results.logSummary(pn)
410
411 # Copy additional logs to tmp/log/oeqa so it's easier to find them
412 targetdir = os.path.join(get_testimage_json_result_dir(d), d.getVar("PN"))
413 os.makedirs(targetdir, exist_ok=True)
414 os.symlink(bootlog, os.path.join(targetdir, os.path.basename(bootlog)))
415 os.symlink(d.getVar("BB_LOGFILE"), os.path.join(targetdir, os.path.basename(d.getVar("BB_LOGFILE") + "." + d.getVar('DATETIME'))))
416
417 if not results or not complete:
418 bb.fatal('%s - FAILED - tests were interrupted during execution, check the logs in %s' % (pn, d.getVar("LOG_DIR")), forcelog=True)
419 if not results.wasSuccessful():
420 bb.fatal('%s - FAILED - also check the logs in %s' % (pn, d.getVar("LOG_DIR")), forcelog=True)
421
422def get_runtime_paths(d):
423 """
424 Returns a list of paths where runtime test must reside.
425
426 Runtime tests are expected in <LAYER_DIR>/lib/oeqa/runtime/cases/
427 """
428 paths = []
429
430 for layer in d.getVar('BBLAYERS').split():
431 path = os.path.join(layer, 'lib/oeqa/runtime/cases')
432 if os.path.isdir(path):
433 paths.append(path)
434 return paths
435
436def create_index(arg):
437 import subprocess
438
439 index_cmd = arg
440 try:
441 bb.note("Executing '%s' ..." % index_cmd)
442 result = subprocess.check_output(index_cmd,
443 stderr=subprocess.STDOUT,
444 shell=True)
445 result = result.decode('utf-8')
446 except subprocess.CalledProcessError as e:
447 return("Index creation command '%s' failed with return code "
448 '%d:\n%s' % (e.cmd, e.returncode, e.output.decode("utf-8")))
449 if result:
450 bb.note(result)
451 return None
452
453def create_rpm_index(d):
454 import glob
455 # Index RPMs
456 rpm_createrepo = bb.utils.which(os.getenv('PATH'), "createrepo_c")
457 index_cmds = []
458 archs = (d.getVar('ALL_MULTILIB_PACKAGE_ARCHS') or '').replace('-', '_')
459
460 for arch in archs.split():
461 rpm_dir = os.path.join(d.getVar('DEPLOY_DIR_RPM'), arch)
462 idx_path = os.path.join(d.getVar('WORKDIR'), 'oe-testimage-repo', arch)
463
464 if not os.path.isdir(rpm_dir):
465 continue
466
467 lockfilename = os.path.join(d.getVar('DEPLOY_DIR_RPM'), 'rpm.lock')
468 lf = bb.utils.lockfile(lockfilename, False)
469 oe.path.copyhardlinktree(rpm_dir, idx_path)
470 # Full indexes overload a 256MB image so reduce the number of rpms
471 # in the feed by filtering to specific packages needed by the tests.
472 package_list = glob.glob(idx_path + "*/*.rpm")
473
474 for pkg in package_list:
475 if os.path.basename(pkg).startswith(("curl-ptest")):
476 bb.utils.remove(pkg)
477
478 if not os.path.basename(pkg).startswith(("rpm", "run-postinsts", "busybox", "bash", "update-alternatives", "libc6", "curl", "musl")):
479 bb.utils.remove(pkg)
480
481 bb.utils.unlockfile(lf)
482 cmd = '%s --update -q %s' % (rpm_createrepo, idx_path)
483
484 # Create repodata
485 result = create_index(cmd)
486 if result:
487 bb.fatal('%s' % ('\n'.join(result)))
488
489def package_extraction(d, test_suites):
490 from oeqa.utils.package_manager import find_packages_to_extract
491 from oeqa.utils.package_manager import extract_packages
492
493 bb.utils.remove(d.getVar("TEST_NEEDED_PACKAGES_DIR"), recurse=True)
494 packages = find_packages_to_extract(test_suites)
495 if packages:
496 bb.utils.mkdirhier(d.getVar("TEST_INSTALL_TMP_DIR"))
497 bb.utils.mkdirhier(d.getVar("TEST_PACKAGED_DIR"))
498 bb.utils.mkdirhier(d.getVar("TEST_EXTRACTED_DIR"))
499 extract_packages(d, packages)
500
501testimage_main[vardepsexclude] += "BB_ORIGENV DATETIME"
502
503python () {
504 if oe.types.boolean(d.getVar("TESTIMAGE_AUTO") or "False"):
505 bb.build.addtask("testimage", "do_build", "do_image_complete", d)
506}
507
508inherit testsdk
diff --git a/meta/classes-recipe/testsdk.bbclass b/meta/classes-recipe/testsdk.bbclass
new file mode 100644
index 0000000000..fd82e6ef41
--- /dev/null
+++ b/meta/classes-recipe/testsdk.bbclass
@@ -0,0 +1,52 @@
1# Copyright (C) 2013 - 2016 Intel Corporation
2#
3# SPDX-License-Identifier: MIT
4
5# testsdk.bbclass enables testing for SDK and Extensible SDK
6#
7# To run SDK tests, run the commands:
8# $ bitbake <image-name> -c populate_sdk
9# $ bitbake <image-name> -c testsdk
10#
11# To run eSDK tests, run the commands:
12# $ bitbake <image-name> -c populate_sdk_ext
13# $ bitbake <image-name> -c testsdkext
14#
15# where "<image-name>" is an image like core-image-sato.
16
17TESTSDK_CLASS_NAME ?= "oeqa.sdk.testsdk.TestSDK"
18TESTSDKEXT_CLASS_NAME ?= "oeqa.sdkext.testsdk.TestSDKExt"
19
20def import_and_run(name, d):
21 import importlib
22
23 class_name = d.getVar(name)
24 if class_name:
25 module, cls = class_name.rsplit('.', 1)
26 m = importlib.import_module(module)
27 c = getattr(m, cls)()
28 c.run(d)
29 else:
30 bb.warn('No tests were run because %s did not define a class' % name)
31
32import_and_run[vardepsexclude] = "DATETIME BB_ORIGENV"
33
34python do_testsdk() {
35 import_and_run('TESTSDK_CLASS_NAME', d)
36}
37addtask testsdk
38do_testsdk[nostamp] = "1"
39do_testsdk[network] = "1"
40
41python do_testsdkext() {
42 import_and_run('TESTSDKEXT_CLASS_NAME', d)
43}
44addtask testsdkext
45do_testsdkext[nostamp] = "1"
46do_testsdkext[network] = "1"
47
48python () {
49 if oe.types.boolean(d.getVar("TESTIMAGE_AUTO") or "False"):
50 bb.build.addtask("testsdk", None, "do_populate_sdk", d)
51 bb.build.addtask("testsdkext", None, "do_populate_sdk_ext", d)
52}
diff --git a/meta/classes-recipe/texinfo.bbclass b/meta/classes-recipe/texinfo.bbclass
new file mode 100644
index 0000000000..380247faf5
--- /dev/null
+++ b/meta/classes-recipe/texinfo.bbclass
@@ -0,0 +1,24 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# This class is inherited by recipes whose upstream packages invoke the
8# texinfo utilities at build-time. Native and cross recipes are made to use the
9# dummy scripts provided by texinfo-dummy-native, for improved performance.
10# Target architecture recipes use the genuine Texinfo utilities. By default,
11# they use the Texinfo utilities on the host system. If you want to use the
12# Texinfo recipe, you can remove texinfo-native from ASSUME_PROVIDED and
13# makeinfo from SANITY_REQUIRED_UTILITIES.
14
15TEXDEP = "${@bb.utils.contains('DISTRO_FEATURES', 'api-documentation', 'texinfo-replacement-native', 'texinfo-dummy-native', d)}"
16TEXDEP:class-native = "texinfo-dummy-native"
17TEXDEP:class-cross = "texinfo-dummy-native"
18TEXDEP:class-crosssdk = "texinfo-dummy-native"
19TEXDEP:class-cross-canadian = "texinfo-dummy-native"
20DEPENDS:append = " ${TEXDEP}"
21
22# libtool-cross doesn't inherit cross
23TEXDEP:pn-libtool-cross = "texinfo-dummy-native"
24
diff --git a/meta/classes-recipe/toolchain-scripts-base.bbclass b/meta/classes-recipe/toolchain-scripts-base.bbclass
new file mode 100644
index 0000000000..d24a986e02
--- /dev/null
+++ b/meta/classes-recipe/toolchain-scripts-base.bbclass
@@ -0,0 +1,17 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7#This function create a version information file
8toolchain_create_sdk_version () {
9 local versionfile=$1
10 rm -f $versionfile
11 touch $versionfile
12 echo 'Distro: ${DISTRO}' >> $versionfile
13 echo 'Distro Version: ${DISTRO_VERSION}' >> $versionfile
14 echo 'Metadata Revision: ${METADATA_REVISION}' >> $versionfile
15 echo 'Timestamp: ${DATETIME}' >> $versionfile
16}
17toolchain_create_sdk_version[vardepsexclude] = "DATETIME"
diff --git a/meta/classes-recipe/toolchain-scripts.bbclass b/meta/classes-recipe/toolchain-scripts.bbclass
new file mode 100644
index 0000000000..3cc823fe63
--- /dev/null
+++ b/meta/classes-recipe/toolchain-scripts.bbclass
@@ -0,0 +1,236 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7inherit toolchain-scripts-base siteinfo kernel-arch
8
9# We want to be able to change the value of MULTIMACH_TARGET_SYS, because it
10# doesn't always match our expectations... but we default to the stock value
11REAL_MULTIMACH_TARGET_SYS ?= "${MULTIMACH_TARGET_SYS}"
12TARGET_CC_ARCH:append:libc-musl = " -mmusl"
13
14# default debug prefix map isn't valid in the SDK
15DEBUG_PREFIX_MAP = ""
16
17EXPORT_SDK_PS1 = "${@ 'export PS1=\\"%s\\"' % d.getVar('SDK_PS1') if d.getVar('SDK_PS1') else ''}"
18
19# This function creates an environment-setup-script for use in a deployable SDK
20toolchain_create_sdk_env_script () {
21 # Create environment setup script. Remember that $SDKTARGETSYSROOT should
22 # only be expanded on the target at runtime.
23 base_sbindir=${10:-${base_sbindir_nativesdk}}
24 base_bindir=${9:-${base_bindir_nativesdk}}
25 sbindir=${8:-${sbindir_nativesdk}}
26 sdkpathnative=${7:-${SDKPATHNATIVE}}
27 prefix=${6:-${prefix_nativesdk}}
28 bindir=${5:-${bindir_nativesdk}}
29 libdir=${4:-${libdir}}
30 sysroot=${3:-${SDKTARGETSYSROOT}}
31 multimach_target_sys=${2:-${REAL_MULTIMACH_TARGET_SYS}}
32 script=${1:-${SDK_OUTPUT}/${SDKPATH}/environment-setup-$multimach_target_sys}
33 rm -f $script
34 touch $script
35
36 echo '# Check for LD_LIBRARY_PATH being set, which can break SDK and generally is a bad practice' >> $script
37 echo '# http://tldp.org/HOWTO/Program-Library-HOWTO/shared-libraries.html#AEN80' >> $script
38 echo '# http://xahlee.info/UnixResource_dir/_/ldpath.html' >> $script
39 echo '# Only disable this check if you are absolutely know what you are doing!' >> $script
40 echo 'if [ ! -z "$LD_LIBRARY_PATH" ]; then' >> $script
41 echo " echo \"Your environment is misconfigured, you probably need to 'unset LD_LIBRARY_PATH'\"" >> $script
42 echo " echo \"but please check why this was set in the first place and that it's safe to unset.\"" >> $script
43 echo ' echo "The SDK will not operate correctly in most cases when LD_LIBRARY_PATH is set."' >> $script
44 echo ' echo "For more references see:"' >> $script
45 echo ' echo " http://tldp.org/HOWTO/Program-Library-HOWTO/shared-libraries.html#AEN80"' >> $script
46 echo ' echo " http://xahlee.info/UnixResource_dir/_/ldpath.html"' >> $script
47 echo ' return 1' >> $script
48 echo 'fi' >> $script
49
50 echo "${EXPORT_SDK_PS1}" >> $script
51 echo 'export SDKTARGETSYSROOT='"$sysroot" >> $script
52 EXTRAPATH=""
53 for i in ${CANADIANEXTRAOS}; do
54 EXTRAPATH="$EXTRAPATH:$sdkpathnative$bindir/${TARGET_ARCH}${TARGET_VENDOR}-$i"
55 done
56 echo "export PATH=$sdkpathnative$bindir:$sdkpathnative$sbindir:$sdkpathnative$base_bindir:$sdkpathnative$base_sbindir:$sdkpathnative$bindir/../${HOST_SYS}/bin:$sdkpathnative$bindir/${TARGET_SYS}"$EXTRAPATH':$PATH' >> $script
57 echo 'export PKG_CONFIG_SYSROOT_DIR=$SDKTARGETSYSROOT' >> $script
58 echo 'export PKG_CONFIG_PATH=$SDKTARGETSYSROOT'"$libdir"'/pkgconfig:$SDKTARGETSYSROOT'"$prefix"'/share/pkgconfig' >> $script
59 echo 'export CONFIG_SITE=${SDKPATH}/site-config-'"${multimach_target_sys}" >> $script
60 echo "export OECORE_NATIVE_SYSROOT=\"$sdkpathnative\"" >> $script
61 echo 'export OECORE_TARGET_SYSROOT="$SDKTARGETSYSROOT"' >> $script
62 echo "export OECORE_ACLOCAL_OPTS=\"-I $sdkpathnative/usr/share/aclocal\"" >> $script
63 echo 'export OECORE_BASELIB="${baselib}"' >> $script
64 echo 'export OECORE_TARGET_ARCH="${TARGET_ARCH}"' >>$script
65 echo 'export OECORE_TARGET_OS="${TARGET_OS}"' >>$script
66
67 echo 'unset command_not_found_handle' >> $script
68
69 toolchain_shared_env_script
70}
71
72# This function creates an environment-setup-script in B which enables
73# a OE-core IDE to integrate with the build tree
74# Caller must ensure CONFIG_SITE is setup
75toolchain_create_tree_env_script () {
76 script=${B}/environment-setup-${REAL_MULTIMACH_TARGET_SYS}
77 rm -f $script
78 touch $script
79 echo 'standalone_sysroot_target="${STAGING_DIR}/${MACHINE}"' >> $script
80 echo 'standalone_sysroot_native="${STAGING_DIR}/${BUILD_ARCH}"' >> $script
81 echo 'orig=`pwd`; cd ${COREBASE}; . ./oe-init-build-env ${TOPDIR}; cd $orig' >> $script
82 echo 'export PATH=$standalone_sysroot_native/${bindir_native}:$standalone_sysroot_native/${bindir_native}/${TARGET_SYS}:$PATH' >> $script
83 echo 'export PKG_CONFIG_SYSROOT_DIR=$standalone_sysroot_target' >> $script
84 echo 'export PKG_CONFIG_PATH=$standalone_sysroot_target'"$libdir"'/pkgconfig:$standalone_sysroot_target'"$prefix"'/share/pkgconfig' >> $script
85 echo 'export CONFIG_SITE="${CONFIG_SITE}"' >> $script
86 echo 'export SDKTARGETSYSROOT=$standalone_sysroot_target' >> $script
87 echo 'export OECORE_NATIVE_SYSROOT=$standalone_sysroot_native' >> $script
88 echo 'export OECORE_TARGET_SYSROOT=$standalone_sysroot_target' >> $script
89 echo 'export OECORE_ACLOCAL_OPTS="-I $standalone_sysroot_native/usr/share/aclocal"' >> $script
90 echo 'export OECORE_BASELIB="${baselib}"' >> $script
91 echo 'export OECORE_TARGET_ARCH="${TARGET_ARCH}"' >>$script
92 echo 'export OECORE_TARGET_OS="${TARGET_OS}"' >>$script
93
94 toolchain_shared_env_script
95
96 cat >> $script <<EOF
97
98if [ -d "\$OECORE_NATIVE_SYSROOT/${datadir}/post-relocate-setup.d/" ]; then
99 for s in \$OECORE_NATIVE_SYSROOT/${datadir}/post-relocate-setup.d/*; do
100 if [ ! -x \$s ]; then
101 continue
102 fi
103 \$s "\$1"
104 status=\$?
105 if [ \$status != 0 ]; then
106 echo "post-relocate command \"\$s \$1\" failed with status \$status" >&2
107 exit \$status
108 fi
109 done
110fi
111EOF
112}
113
114toolchain_shared_env_script () {
115 echo 'export CC="${TARGET_PREFIX}gcc ${TARGET_CC_ARCH} --sysroot=$SDKTARGETSYSROOT"' >> $script
116 echo 'export CXX="${TARGET_PREFIX}g++ ${TARGET_CC_ARCH} --sysroot=$SDKTARGETSYSROOT"' >> $script
117 echo 'export CPP="${TARGET_PREFIX}gcc -E ${TARGET_CC_ARCH} --sysroot=$SDKTARGETSYSROOT"' >> $script
118 echo 'export AS="${TARGET_PREFIX}as ${TARGET_AS_ARCH}"' >> $script
119 echo 'export LD="${TARGET_PREFIX}ld ${TARGET_LD_ARCH} --sysroot=$SDKTARGETSYSROOT"' >> $script
120 echo 'export GDB=${TARGET_PREFIX}gdb' >> $script
121 echo 'export STRIP=${TARGET_PREFIX}strip' >> $script
122 echo 'export RANLIB=${TARGET_PREFIX}ranlib' >> $script
123 echo 'export OBJCOPY=${TARGET_PREFIX}objcopy' >> $script
124 echo 'export OBJDUMP=${TARGET_PREFIX}objdump' >> $script
125 echo 'export READELF=${TARGET_PREFIX}readelf' >> $script
126 echo 'export AR=${TARGET_PREFIX}ar' >> $script
127 echo 'export NM=${TARGET_PREFIX}nm' >> $script
128 echo 'export M4=m4' >> $script
129 echo 'export TARGET_PREFIX=${TARGET_PREFIX}' >> $script
130 echo 'export CONFIGURE_FLAGS="--target=${TARGET_SYS} --host=${TARGET_SYS} --build=${SDK_ARCH}-linux --with-libtool-sysroot=$SDKTARGETSYSROOT"' >> $script
131 echo 'export CFLAGS="${TARGET_CFLAGS}"' >> $script
132 echo 'export CXXFLAGS="${TARGET_CXXFLAGS}"' >> $script
133 echo 'export LDFLAGS="${TARGET_LDFLAGS}"' >> $script
134 echo 'export CPPFLAGS="${TARGET_CPPFLAGS}"' >> $script
135 echo 'export KCFLAGS="--sysroot=$SDKTARGETSYSROOT"' >> $script
136 echo 'export OECORE_DISTRO_VERSION="${DISTRO_VERSION}"' >> $script
137 echo 'export OECORE_SDK_VERSION="${SDK_VERSION}"' >> $script
138 echo 'export ARCH=${ARCH}' >> $script
139 echo 'export CROSS_COMPILE=${TARGET_PREFIX}' >> $script
140 echo 'export OECORE_TUNE_CCARGS="${TUNE_CCARGS}"' >> $script
141
142 cat >> $script <<EOF
143
144# Append environment subscripts
145if [ -d "\$OECORE_TARGET_SYSROOT/environment-setup.d" ]; then
146 for envfile in \$OECORE_TARGET_SYSROOT/environment-setup.d/*.sh; do
147 . \$envfile
148 done
149fi
150if [ -d "\$OECORE_NATIVE_SYSROOT/environment-setup.d" ]; then
151 for envfile in \$OECORE_NATIVE_SYSROOT/environment-setup.d/*.sh; do
152 . \$envfile
153 done
154fi
155EOF
156}
157
158toolchain_create_post_relocate_script() {
159 relocate_script=$1
160 env_dir=$2
161 rm -f $relocate_script
162 touch $relocate_script
163
164 cat >> $relocate_script <<EOF
165if [ -d "${SDKPATHNATIVE}/post-relocate-setup.d/" ]; then
166 # Source top-level SDK env scripts in case they are needed for the relocate
167 # scripts.
168 for env_setup_script in ${env_dir}/environment-setup-*; do
169 . \$env_setup_script
170 status=\$?
171 if [ \$status != 0 ]; then
172 echo "\$0: Failed to source \$env_setup_script with status \$status"
173 exit \$status
174 fi
175
176 for s in ${SDKPATHNATIVE}/post-relocate-setup.d/*; do
177 if [ ! -x \$s ]; then
178 continue
179 fi
180 \$s "\$1"
181 status=\$?
182 if [ \$status != 0 ]; then
183 echo "post-relocate command \"\$s \$1\" failed with status \$status" >&2
184 exit \$status
185 fi
186 done
187 done
188 rm -rf "${SDKPATHNATIVE}/post-relocate-setup.d"
189fi
190EOF
191}
192
193#we get the cached site config in the runtime
194TOOLCHAIN_CONFIGSITE_NOCACHE = "${@' '.join(siteinfo_get_files(d)[0])}"
195TOOLCHAIN_CONFIGSITE_SYSROOTCACHE = "${STAGING_DIR}/${MLPREFIX}${MACHINE}/${target_datadir}/${TARGET_SYS}_config_site.d"
196TOOLCHAIN_NEED_CONFIGSITE_CACHE ??= "virtual/${MLPREFIX}libc ncurses"
197DEPENDS += "${TOOLCHAIN_NEED_CONFIGSITE_CACHE}"
198
199#This function create a site config file
200toolchain_create_sdk_siteconfig () {
201 local siteconfig=$1
202
203 rm -f $siteconfig
204 touch $siteconfig
205
206 for sitefile in ${TOOLCHAIN_CONFIGSITE_NOCACHE} ; do
207 cat $sitefile >> $siteconfig
208 done
209
210 #get cached site config
211 for sitefile in ${TOOLCHAIN_NEED_CONFIGSITE_CACHE}; do
212 # Resolve virtual/* names to the real recipe name using sysroot-providers info
213 case $sitefile in virtual/*)
214 sitefile=`echo $sitefile | tr / _`
215 sitefile=`cat ${STAGING_DIR_TARGET}/sysroot-providers/$sitefile`
216 esac
217
218 if [ -r ${TOOLCHAIN_CONFIGSITE_SYSROOTCACHE}/${sitefile}_config ]; then
219 cat ${TOOLCHAIN_CONFIGSITE_SYSROOTCACHE}/${sitefile}_config >> $siteconfig
220 fi
221 done
222}
223# The immediate expansion above can result in unwanted path dependencies here
224toolchain_create_sdk_siteconfig[vardepsexclude] = "TOOLCHAIN_CONFIGSITE_SYSROOTCACHE"
225
226python __anonymous () {
227 import oe.classextend
228 deps = ""
229 for dep in (d.getVar('TOOLCHAIN_NEED_CONFIGSITE_CACHE') or "").split():
230 deps += " %s:do_populate_sysroot" % dep
231 for variant in (d.getVar('MULTILIB_VARIANTS') or "").split():
232 clsextend = oe.classextend.ClassExtender(variant, d)
233 newdep = clsextend.extend_name(dep)
234 deps += " %s:do_populate_sysroot" % newdep
235 d.appendVarFlag('do_configure', 'depends', deps)
236}
diff --git a/meta/classes-recipe/uboot-config.bbclass b/meta/classes-recipe/uboot-config.bbclass
new file mode 100644
index 0000000000..9889d026fa
--- /dev/null
+++ b/meta/classes-recipe/uboot-config.bbclass
@@ -0,0 +1,133 @@
1# Handle U-Boot config for a machine
2#
3# The format to specify it, in the machine, is:
4#
5# UBOOT_CONFIG ??= <default>
6# UBOOT_CONFIG[foo] = "config,images,binary"
7#
8# or
9#
10# UBOOT_MACHINE = "config"
11#
12# Copyright 2013, 2014 (C) O.S. Systems Software LTDA.
13#
14# SPDX-License-Identifier: MIT
15
16
17def removesuffix(s, suffix):
18 if suffix and s.endswith(suffix):
19 return s[:-len(suffix)]
20 return s
21
22# Some versions of u-boot use .bin and others use .img. By default use .bin
23# but enable individual recipes to change this value.
24UBOOT_SUFFIX ??= "bin"
25UBOOT_BINARY ?= "u-boot.${UBOOT_SUFFIX}"
26UBOOT_BINARYNAME ?= "${@os.path.splitext(d.getVar("UBOOT_BINARY"))[0]}"
27UBOOT_IMAGE ?= "${UBOOT_BINARYNAME}-${MACHINE}-${PV}-${PR}.${UBOOT_SUFFIX}"
28UBOOT_SYMLINK ?= "${UBOOT_BINARYNAME}-${MACHINE}.${UBOOT_SUFFIX}"
29UBOOT_MAKE_TARGET ?= "all"
30
31# Output the ELF generated. Some platforms can use the ELF file and directly
32# load it (JTAG booting, QEMU) additionally the ELF can be used for debugging
33# purposes.
34UBOOT_ELF ?= ""
35UBOOT_ELF_SUFFIX ?= "elf"
36UBOOT_ELF_IMAGE ?= "u-boot-${MACHINE}-${PV}-${PR}.${UBOOT_ELF_SUFFIX}"
37UBOOT_ELF_BINARY ?= "u-boot.${UBOOT_ELF_SUFFIX}"
38UBOOT_ELF_SYMLINK ?= "u-boot-${MACHINE}.${UBOOT_ELF_SUFFIX}"
39
40# Some versions of u-boot build an SPL (Second Program Loader) image that
41# should be packaged along with the u-boot binary as well as placed in the
42# deploy directory. For those versions they can set the following variables
43# to allow packaging the SPL.
44SPL_SUFFIX ?= ""
45SPL_BINARY ?= ""
46SPL_DELIMITER ?= "${@'.' if d.getVar("SPL_SUFFIX") else ''}"
47SPL_BINARYFILE ?= "${@os.path.basename(d.getVar("SPL_BINARY"))}"
48SPL_BINARYNAME ?= "${@removesuffix(d.getVar("SPL_BINARYFILE"), "." + d.getVar("SPL_SUFFIX"))}"
49SPL_IMAGE ?= "${SPL_BINARYNAME}-${MACHINE}-${PV}-${PR}${SPL_DELIMITER}${SPL_SUFFIX}"
50SPL_SYMLINK ?= "${SPL_BINARYNAME}-${MACHINE}${SPL_DELIMITER}${SPL_SUFFIX}"
51
52# Additional environment variables or a script can be installed alongside
53# u-boot to be used automatically on boot. This file, typically 'uEnv.txt'
54# or 'boot.scr', should be packaged along with u-boot as well as placed in the
55# deploy directory. Machine configurations needing one of these files should
56# include it in the SRC_URI and set the UBOOT_ENV parameter.
57UBOOT_ENV_SUFFIX ?= "txt"
58UBOOT_ENV ?= ""
59UBOOT_ENV_SRC_SUFFIX ?= "cmd"
60UBOOT_ENV_SRC ?= "${UBOOT_ENV}.${UBOOT_ENV_SRC_SUFFIX}"
61UBOOT_ENV_BINARY ?= "${UBOOT_ENV}.${UBOOT_ENV_SUFFIX}"
62UBOOT_ENV_IMAGE ?= "${UBOOT_ENV}-${MACHINE}-${PV}-${PR}.${UBOOT_ENV_SUFFIX}"
63UBOOT_ENV_SYMLINK ?= "${UBOOT_ENV}-${MACHINE}.${UBOOT_ENV_SUFFIX}"
64
65# Default name of u-boot initial env, but enable individual recipes to change
66# this value.
67UBOOT_INITIAL_ENV ?= "${PN}-initial-env"
68
69# U-Boot EXTLINUX variables. U-Boot searches for /boot/extlinux/extlinux.conf
70# to find EXTLINUX conf file.
71UBOOT_EXTLINUX_INSTALL_DIR ?= "/boot/extlinux"
72UBOOT_EXTLINUX_CONF_NAME ?= "extlinux.conf"
73UBOOT_EXTLINUX_SYMLINK ?= "${UBOOT_EXTLINUX_CONF_NAME}-${MACHINE}-${PR}"
74
75# Options for the device tree compiler passed to mkimage '-D' feature:
76UBOOT_MKIMAGE_DTCOPTS ??= ""
77SPL_MKIMAGE_DTCOPTS ??= ""
78
79# mkimage command
80UBOOT_MKIMAGE ?= "uboot-mkimage"
81UBOOT_MKIMAGE_SIGN ?= "${UBOOT_MKIMAGE}"
82
83# Arguments passed to mkimage for signing
84UBOOT_MKIMAGE_SIGN_ARGS ?= ""
85SPL_MKIMAGE_SIGN_ARGS ?= ""
86
87# Options to deploy the u-boot device tree
88UBOOT_DTB ?= ""
89UBOOT_DTB_BINARY ??= ""
90
91python () {
92 ubootmachine = d.getVar("UBOOT_MACHINE")
93 ubootconfigflags = d.getVarFlags('UBOOT_CONFIG')
94 ubootbinary = d.getVar('UBOOT_BINARY')
95 ubootbinaries = d.getVar('UBOOT_BINARIES')
96 # The "doc" varflag is special, we don't want to see it here
97 ubootconfigflags.pop('doc', None)
98 ubootconfig = (d.getVar('UBOOT_CONFIG') or "").split()
99
100 if not ubootmachine and not ubootconfig:
101 PN = d.getVar("PN")
102 FILE = os.path.basename(d.getVar("FILE"))
103 bb.debug(1, "To build %s, see %s for instructions on \
104 setting up your machine config" % (PN, FILE))
105 raise bb.parse.SkipRecipe("Either UBOOT_MACHINE or UBOOT_CONFIG must be set in the %s machine configuration." % d.getVar("MACHINE"))
106
107 if ubootmachine and ubootconfig:
108 raise bb.parse.SkipRecipe("You cannot use UBOOT_MACHINE and UBOOT_CONFIG at the same time.")
109
110 if ubootconfigflags and ubootbinaries:
111 raise bb.parse.SkipRecipe("You cannot use UBOOT_BINARIES as it is internal to uboot_config.bbclass.")
112
113 if len(ubootconfig) > 0:
114 for config in ubootconfig:
115 for f, v in ubootconfigflags.items():
116 if config == f:
117 items = v.split(',')
118 if items[0] and len(items) > 3:
119 raise bb.parse.SkipRecipe('Only config,images,binary can be specified!')
120 d.appendVar('UBOOT_MACHINE', ' ' + items[0])
121 # IMAGE_FSTYPES appending
122 if len(items) > 1 and items[1]:
123 bb.debug(1, "Appending '%s' to IMAGE_FSTYPES." % items[1])
124 d.appendVar('IMAGE_FSTYPES', ' ' + items[1])
125 if len(items) > 2 and items[2]:
126 bb.debug(1, "Appending '%s' to UBOOT_BINARIES." % items[2])
127 d.appendVar('UBOOT_BINARIES', ' ' + items[2])
128 else:
129 bb.debug(1, "Appending '%s' to UBOOT_BINARIES." % ubootbinary)
130 d.appendVar('UBOOT_BINARIES', ' ' + ubootbinary)
131 return
132 raise bb.parse.SkipRecipe("The selected UBOOT_CONFIG key %s has no match in %s." % (ubootconfig, ubootconfigflags.keys()))
133}
diff --git a/meta/classes-recipe/uboot-extlinux-config.bbclass b/meta/classes-recipe/uboot-extlinux-config.bbclass
new file mode 100644
index 0000000000..86a7d30ca0
--- /dev/null
+++ b/meta/classes-recipe/uboot-extlinux-config.bbclass
@@ -0,0 +1,158 @@
1# uboot-extlinux-config.bbclass
2#
3# This class allow the extlinux.conf generation for U-Boot use. The
4# U-Boot support for it is given to allow the Generic Distribution
5# Configuration specification use by OpenEmbedded-based products.
6#
7# External variables:
8#
9# UBOOT_EXTLINUX_CONSOLE - Set to "console=ttyX" to change kernel boot
10# default console.
11# UBOOT_EXTLINUX_LABELS - A list of targets for the automatic config.
12# UBOOT_EXTLINUX_KERNEL_ARGS - Add additional kernel arguments.
13# UBOOT_EXTLINUX_KERNEL_IMAGE - Kernel image name.
14# UBOOT_EXTLINUX_FDTDIR - Device tree directory.
15# UBOOT_EXTLINUX_FDT - Device tree file.
16# UBOOT_EXTLINUX_INITRD - Indicates a list of filesystem images to
17# concatenate and use as an initrd (optional).
18# UBOOT_EXTLINUX_MENU_DESCRIPTION - Name to use as description.
19# UBOOT_EXTLINUX_ROOT - Root kernel cmdline.
20# UBOOT_EXTLINUX_TIMEOUT - Timeout before DEFAULT selection is made.
21# Measured in 1/10 of a second.
22# UBOOT_EXTLINUX_DEFAULT_LABEL - Target to be selected by default after
23# the timeout period
24#
25# If there's only one label system will boot automatically and menu won't be
26# created. If you want to use more than one labels, e.g linux and alternate,
27# use overrides to set menu description, console and others variables.
28#
29# Ex:
30#
31# UBOOT_EXTLINUX_LABELS ??= "default fallback"
32#
33# UBOOT_EXTLINUX_DEFAULT_LABEL ??= "Linux Default"
34# UBOOT_EXTLINUX_TIMEOUT ??= "30"
35#
36# UBOOT_EXTLINUX_KERNEL_IMAGE_default ??= "../zImage"
37# UBOOT_EXTLINUX_MENU_DESCRIPTION_default ??= "Linux Default"
38#
39# UBOOT_EXTLINUX_KERNEL_IMAGE_fallback ??= "../zImage-fallback"
40# UBOOT_EXTLINUX_MENU_DESCRIPTION_fallback ??= "Linux Fallback"
41#
42# Results:
43#
44# menu title Select the boot mode
45# TIMEOUT 30
46# DEFAULT Linux Default
47# LABEL Linux Default
48# KERNEL ../zImage
49# FDTDIR ../
50# APPEND root=/dev/mmcblk2p2 rootwait rw console=${console}
51# LABEL Linux Fallback
52# KERNEL ../zImage-fallback
53# FDTDIR ../
54# APPEND root=/dev/mmcblk2p2 rootwait rw console=${console}
55#
56# Copyright (C) 2016, O.S. Systems Software LTDA. All Rights Reserved
57# SPDX-License-Identifier: MIT
58#
59# The kernel has an internal default console, which you can override with
60# a console=...some_tty...
61UBOOT_EXTLINUX_CONSOLE ??= "console=${console},${baudrate}"
62UBOOT_EXTLINUX_LABELS ??= "linux"
63UBOOT_EXTLINUX_FDT ??= ""
64UBOOT_EXTLINUX_FDTDIR ??= "../"
65UBOOT_EXTLINUX_KERNEL_IMAGE ??= "../${KERNEL_IMAGETYPE}"
66UBOOT_EXTLINUX_KERNEL_ARGS ??= "rootwait rw"
67UBOOT_EXTLINUX_MENU_DESCRIPTION:linux ??= "${DISTRO_NAME}"
68
69UBOOT_EXTLINUX_CONFIG = "${B}/extlinux.conf"
70
71python do_create_extlinux_config() {
72 if d.getVar("UBOOT_EXTLINUX") != "1":
73 return
74
75 if not d.getVar('WORKDIR'):
76 bb.error("WORKDIR not defined, unable to package")
77
78 labels = d.getVar('UBOOT_EXTLINUX_LABELS')
79 if not labels:
80 bb.fatal("UBOOT_EXTLINUX_LABELS not defined, nothing to do")
81
82 if not labels.strip():
83 bb.fatal("No labels, nothing to do")
84
85 cfile = d.getVar('UBOOT_EXTLINUX_CONFIG')
86 if not cfile:
87 bb.fatal('Unable to read UBOOT_EXTLINUX_CONFIG')
88
89 localdata = bb.data.createCopy(d)
90
91 try:
92 with open(cfile, 'w') as cfgfile:
93 cfgfile.write('# Generic Distro Configuration file generated by OpenEmbedded\n')
94
95 if len(labels.split()) > 1:
96 cfgfile.write('menu title Select the boot mode\n')
97
98 timeout = localdata.getVar('UBOOT_EXTLINUX_TIMEOUT')
99 if timeout:
100 cfgfile.write('TIMEOUT %s\n' % (timeout))
101
102 if len(labels.split()) > 1:
103 default = localdata.getVar('UBOOT_EXTLINUX_DEFAULT_LABEL')
104 if default:
105 cfgfile.write('DEFAULT %s\n' % (default))
106
107 # Need to deconflict the labels with existing overrides
108 label_overrides = labels.split()
109 default_overrides = localdata.getVar('OVERRIDES').split(':')
110 # We're keeping all the existing overrides that aren't used as a label
111 # an override for that label will be added back in while we're processing that label
112 keep_overrides = list(filter(lambda x: x not in label_overrides, default_overrides))
113
114 for label in labels.split():
115
116 localdata.setVar('OVERRIDES', ':'.join(keep_overrides + [label]))
117
118 extlinux_console = localdata.getVar('UBOOT_EXTLINUX_CONSOLE')
119
120 menu_description = localdata.getVar('UBOOT_EXTLINUX_MENU_DESCRIPTION')
121 if not menu_description:
122 menu_description = label
123
124 root = localdata.getVar('UBOOT_EXTLINUX_ROOT')
125 if not root:
126 bb.fatal('UBOOT_EXTLINUX_ROOT not defined')
127
128 kernel_image = localdata.getVar('UBOOT_EXTLINUX_KERNEL_IMAGE')
129 fdtdir = localdata.getVar('UBOOT_EXTLINUX_FDTDIR')
130
131 fdt = localdata.getVar('UBOOT_EXTLINUX_FDT')
132
133 if fdt:
134 cfgfile.write('LABEL %s\n\tKERNEL %s\n\tFDT %s\n' %
135 (menu_description, kernel_image, fdt))
136 elif fdtdir:
137 cfgfile.write('LABEL %s\n\tKERNEL %s\n\tFDTDIR %s\n' %
138 (menu_description, kernel_image, fdtdir))
139 else:
140 cfgfile.write('LABEL %s\n\tKERNEL %s\n' % (menu_description, kernel_image))
141
142 kernel_args = localdata.getVar('UBOOT_EXTLINUX_KERNEL_ARGS')
143
144 initrd = localdata.getVar('UBOOT_EXTLINUX_INITRD')
145 if initrd:
146 cfgfile.write('\tINITRD %s\n'% initrd)
147
148 kernel_args = root + " " + kernel_args
149 cfgfile.write('\tAPPEND %s %s\n' % (kernel_args, extlinux_console))
150
151 except OSError:
152 bb.fatal('Unable to open %s' % (cfile))
153}
154UBOOT_EXTLINUX_VARS = "CONSOLE MENU_DESCRIPTION ROOT KERNEL_IMAGE FDTDIR FDT KERNEL_ARGS INITRD"
155do_create_extlinux_config[vardeps] += "${@' '.join(['UBOOT_EXTLINUX_%s_%s' % (v, l) for v in d.getVar('UBOOT_EXTLINUX_VARS').split() for l in d.getVar('UBOOT_EXTLINUX_LABELS').split()])}"
156do_create_extlinux_config[vardepsexclude] += "OVERRIDES"
157
158addtask create_extlinux_config before do_install do_deploy after do_compile
diff --git a/meta/classes-recipe/uboot-sign.bbclass b/meta/classes-recipe/uboot-sign.bbclass
new file mode 100644
index 0000000000..debbf23ec6
--- /dev/null
+++ b/meta/classes-recipe/uboot-sign.bbclass
@@ -0,0 +1,505 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# This file is part of U-Boot verified boot support and is intended to be
8# inherited from u-boot recipe and from kernel-fitimage.bbclass.
9#
10# The signature procedure requires the user to generate an RSA key and
11# certificate in a directory and to define the following variable:
12#
13# UBOOT_SIGN_KEYDIR = "/keys/directory"
14# UBOOT_SIGN_KEYNAME = "dev" # keys name in keydir (eg. "dev.crt", "dev.key")
15# UBOOT_MKIMAGE_DTCOPTS = "-I dts -O dtb -p 2000"
16# UBOOT_SIGN_ENABLE = "1"
17#
18# As verified boot depends on fitImage generation, following is also required:
19#
20# KERNEL_CLASSES ?= " kernel-fitimage "
21# KERNEL_IMAGETYPE ?= "fitImage"
22#
23# The signature support is limited to the use of CONFIG_OF_SEPARATE in U-Boot.
24#
25# The tasks sequence is set as below, using DEPLOY_IMAGE_DIR as common place to
26# treat the device tree blob:
27#
28# * u-boot:do_install:append
29# Install UBOOT_DTB_BINARY to datadir, so that kernel can use it for
30# signing, and kernel will deploy UBOOT_DTB_BINARY after signs it.
31#
32# * virtual/kernel:do_assemble_fitimage
33# Sign the image
34#
35# * u-boot:do_deploy[postfuncs]
36# Deploy files like UBOOT_DTB_IMAGE, UBOOT_DTB_SYMLINK and others.
37#
38# For more details on signature process, please refer to U-Boot documentation.
39
40# We need some variables from u-boot-config
41inherit uboot-config
42
43# Enable use of a U-Boot fitImage
44UBOOT_FITIMAGE_ENABLE ?= "0"
45
46# Signature activation - these require their respective fitImages
47UBOOT_SIGN_ENABLE ?= "0"
48SPL_SIGN_ENABLE ?= "0"
49
50# Default value for deployment filenames.
51UBOOT_DTB_IMAGE ?= "u-boot-${MACHINE}-${PV}-${PR}.dtb"
52UBOOT_DTB_BINARY ?= "u-boot.dtb"
53UBOOT_DTB_SYMLINK ?= "u-boot-${MACHINE}.dtb"
54UBOOT_NODTB_IMAGE ?= "u-boot-nodtb-${MACHINE}-${PV}-${PR}.bin"
55UBOOT_NODTB_BINARY ?= "u-boot-nodtb.bin"
56UBOOT_NODTB_SYMLINK ?= "u-boot-nodtb-${MACHINE}.bin"
57UBOOT_ITS_IMAGE ?= "u-boot-its-${MACHINE}-${PV}-${PR}"
58UBOOT_ITS ?= "u-boot.its"
59UBOOT_ITS_SYMLINK ?= "u-boot-its-${MACHINE}"
60UBOOT_FITIMAGE_IMAGE ?= "u-boot-fitImage-${MACHINE}-${PV}-${PR}"
61UBOOT_FITIMAGE_BINARY ?= "u-boot-fitImage"
62UBOOT_FITIMAGE_SYMLINK ?= "u-boot-fitImage-${MACHINE}"
63SPL_DIR ?= "spl"
64SPL_DTB_IMAGE ?= "u-boot-spl-${MACHINE}-${PV}-${PR}.dtb"
65SPL_DTB_BINARY ?= "u-boot-spl.dtb"
66SPL_DTB_SYMLINK ?= "u-boot-spl-${MACHINE}.dtb"
67SPL_NODTB_IMAGE ?= "u-boot-spl-nodtb-${MACHINE}-${PV}-${PR}.bin"
68SPL_NODTB_BINARY ?= "u-boot-spl-nodtb.bin"
69SPL_NODTB_SYMLINK ?= "u-boot-spl-nodtb-${MACHINE}.bin"
70
71# U-Boot fitImage description
72UBOOT_FIT_DESC ?= "U-Boot fitImage for ${DISTRO_NAME}/${PV}/${MACHINE}"
73
74# Kernel / U-Boot fitImage Hash Algo
75FIT_HASH_ALG ?= "sha256"
76UBOOT_FIT_HASH_ALG ?= "sha256"
77
78# Kernel / U-Boot fitImage Signature Algo
79FIT_SIGN_ALG ?= "rsa2048"
80UBOOT_FIT_SIGN_ALG ?= "rsa2048"
81
82# Kernel / U-Boot fitImage Padding Algo
83FIT_PAD_ALG ?= "pkcs-1.5"
84
85# Generate keys for signing Kernel / U-Boot fitImage
86FIT_GENERATE_KEYS ?= "0"
87UBOOT_FIT_GENERATE_KEYS ?= "0"
88
89# Size of private keys in number of bits
90FIT_SIGN_NUMBITS ?= "2048"
91UBOOT_FIT_SIGN_NUMBITS ?= "2048"
92
93# args to openssl genrsa (Default is just the public exponent)
94FIT_KEY_GENRSA_ARGS ?= "-F4"
95UBOOT_FIT_KEY_GENRSA_ARGS ?= "-F4"
96
97# args to openssl req (Default is -batch for non interactive mode and
98# -new for new certificate)
99FIT_KEY_REQ_ARGS ?= "-batch -new"
100UBOOT_FIT_KEY_REQ_ARGS ?= "-batch -new"
101
102# Standard format for public key certificate
103FIT_KEY_SIGN_PKCS ?= "-x509"
104UBOOT_FIT_KEY_SIGN_PKCS ?= "-x509"
105
106# Functions on this bbclass can apply to either U-boot or Kernel,
107# depending on the scenario
108UBOOT_PN = "${@d.getVar('PREFERRED_PROVIDER_u-boot') or 'u-boot'}"
109KERNEL_PN = "${@d.getVar('PREFERRED_PROVIDER_virtual/kernel')}"
110
111# We need u-boot-tools-native if we're creating a U-Boot fitImage
112python() {
113 if d.getVar('UBOOT_FITIMAGE_ENABLE') == '1':
114 depends = d.getVar("DEPENDS")
115 depends = "%s u-boot-tools-native dtc-native" % depends
116 d.setVar("DEPENDS", depends)
117}
118
119concat_dtb_helper() {
120 if [ -e "${UBOOT_DTB_BINARY}" ]; then
121 ln -sf ${UBOOT_DTB_IMAGE} ${DEPLOYDIR}/${UBOOT_DTB_BINARY}
122 ln -sf ${UBOOT_DTB_IMAGE} ${DEPLOYDIR}/${UBOOT_DTB_SYMLINK}
123 fi
124
125 if [ -f "${UBOOT_NODTB_BINARY}" ]; then
126 install ${UBOOT_NODTB_BINARY} ${DEPLOYDIR}/${UBOOT_NODTB_IMAGE}
127 ln -sf ${UBOOT_NODTB_IMAGE} ${DEPLOYDIR}/${UBOOT_NODTB_SYMLINK}
128 ln -sf ${UBOOT_NODTB_IMAGE} ${DEPLOYDIR}/${UBOOT_NODTB_BINARY}
129 fi
130
131 # If we're not using a signed u-boot fit, concatenate SPL w/o DTB & U-Boot DTB
132 # with public key (otherwise it will be deployed by the equivalent
133 # concat_spl_dtb_helper function - cf. kernel-fitimage.bbclass for more details)
134 if [ "${SPL_SIGN_ENABLE}" != "1" ] ; then
135 deployed_uboot_dtb_binary='${DEPLOY_DIR_IMAGE}/${UBOOT_DTB_IMAGE}'
136 if [ "x${UBOOT_SUFFIX}" = "ximg" -o "x${UBOOT_SUFFIX}" = "xrom" ] && \
137 [ -e "$deployed_uboot_dtb_binary" ]; then
138 oe_runmake EXT_DTB=$deployed_uboot_dtb_binary
139 install ${UBOOT_BINARY} ${DEPLOYDIR}/${UBOOT_IMAGE}
140 elif [ -e "${DEPLOYDIR}/${UBOOT_NODTB_IMAGE}" -a -e "$deployed_uboot_dtb_binary" ]; then
141 cd ${DEPLOYDIR}
142 cat ${UBOOT_NODTB_IMAGE} $deployed_uboot_dtb_binary | tee ${B}/${CONFIG_B_PATH}/${UBOOT_BINARY} > ${UBOOT_IMAGE}
143
144 if [ -n "${UBOOT_CONFIG}" ]
145 then
146 i=0
147 j=0
148 for config in ${UBOOT_MACHINE}; do
149 i=$(expr $i + 1);
150 for type in ${UBOOT_CONFIG}; do
151 j=$(expr $j + 1);
152 if [ $j -eq $i ]
153 then
154 cp ${UBOOT_IMAGE} ${B}/${CONFIG_B_PATH}/u-boot-$type.${UBOOT_SUFFIX}
155 fi
156 done
157 done
158 fi
159 else
160 bbwarn "Failure while adding public key to u-boot binary. Verified boot won't be available."
161 fi
162 fi
163}
164
165concat_spl_dtb_helper() {
166
167 # We only deploy symlinks to the u-boot-spl.dtb,as the KERNEL_PN will
168 # be responsible for deploying the real file
169 if [ -e "${SPL_DIR}/${SPL_DTB_BINARY}" ] ; then
170 ln -sf ${SPL_DTB_IMAGE} ${DEPLOYDIR}/${SPL_DTB_SYMLINK}
171 ln -sf ${SPL_DTB_IMAGE} ${DEPLOYDIR}/${SPL_DTB_BINARY}
172 fi
173
174 # Concatenate the SPL nodtb binary and u-boot.dtb
175 deployed_spl_dtb_binary='${DEPLOY_DIR_IMAGE}/${SPL_DTB_IMAGE}'
176 if [ -e "${DEPLOYDIR}/${SPL_NODTB_IMAGE}" -a -e "$deployed_spl_dtb_binary" ] ; then
177 cd ${DEPLOYDIR}
178 cat ${SPL_NODTB_IMAGE} $deployed_spl_dtb_binary | tee ${B}/${CONFIG_B_PATH}/${SPL_BINARY} > ${SPL_IMAGE}
179 else
180 bbwarn "Failure while adding public key to spl binary. Verified U-Boot boot won't be available."
181 fi
182}
183
184
185concat_dtb() {
186 if [ "${UBOOT_SIGN_ENABLE}" = "1" -a "${PN}" = "${UBOOT_PN}" -a -n "${UBOOT_DTB_BINARY}" ]; then
187 mkdir -p ${DEPLOYDIR}
188 if [ -n "${UBOOT_CONFIG}" ]; then
189 for config in ${UBOOT_MACHINE}; do
190 CONFIG_B_PATH="$config"
191 cd ${B}/$config
192 concat_dtb_helper
193 done
194 else
195 CONFIG_B_PATH=""
196 cd ${B}
197 concat_dtb_helper
198 fi
199 fi
200}
201
202concat_spl_dtb() {
203 if [ "${SPL_SIGN_ENABLE}" = "1" -a "${PN}" = "${UBOOT_PN}" -a -n "${SPL_DTB_BINARY}" ]; then
204 mkdir -p ${DEPLOYDIR}
205 if [ -n "${UBOOT_CONFIG}" ]; then
206 for config in ${UBOOT_MACHINE}; do
207 CONFIG_B_PATH="$config"
208 cd ${B}/$config
209 concat_spl_dtb_helper
210 done
211 else
212 CONFIG_B_PATH=""
213 cd ${B}
214 concat_spl_dtb_helper
215 fi
216 fi
217}
218
219
220# Install UBOOT_DTB_BINARY to datadir, so that kernel can use it for
221# signing, and kernel will deploy UBOOT_DTB_BINARY after signs it.
222install_helper() {
223 if [ -f "${UBOOT_DTB_BINARY}" ]; then
224 # UBOOT_DTB_BINARY is a symlink to UBOOT_DTB_IMAGE, so we
225 # need both of them.
226 install -Dm 0644 ${UBOOT_DTB_BINARY} ${D}${datadir}/${UBOOT_DTB_IMAGE}
227 ln -sf ${UBOOT_DTB_IMAGE} ${D}${datadir}/${UBOOT_DTB_BINARY}
228 else
229 bbwarn "${UBOOT_DTB_BINARY} not found"
230 fi
231}
232
233# Install SPL dtb and u-boot nodtb to datadir,
234install_spl_helper() {
235 if [ -f "${SPL_DIR}/${SPL_DTB_BINARY}" ]; then
236 install -Dm 0644 ${SPL_DIR}/${SPL_DTB_BINARY} ${D}${datadir}/${SPL_DTB_IMAGE}
237 ln -sf ${SPL_DTB_IMAGE} ${D}${datadir}/${SPL_DTB_BINARY}
238 else
239 bbwarn "${SPL_DTB_BINARY} not found"
240 fi
241 if [ -f "${UBOOT_NODTB_BINARY}" ] ; then
242 install -Dm 0644 ${UBOOT_NODTB_BINARY} ${D}${datadir}/${UBOOT_NODTB_IMAGE}
243 ln -sf ${UBOOT_NODTB_IMAGE} ${D}${datadir}/${UBOOT_NODTB_BINARY}
244 else
245 bbwarn "${UBOOT_NODTB_BINARY} not found"
246 fi
247
248 # We need to install a 'stub' u-boot-fitimage + its to datadir,
249 # so that the KERNEL_PN can use the correct filename when
250 # assembling and deploying them
251 touch ${D}/${datadir}/${UBOOT_FITIMAGE_IMAGE}
252 touch ${D}/${datadir}/${UBOOT_ITS_IMAGE}
253}
254
255do_install:append() {
256 if [ "${PN}" = "${UBOOT_PN}" ]; then
257 if [ -n "${UBOOT_CONFIG}" ]; then
258 for config in ${UBOOT_MACHINE}; do
259 cd ${B}/$config
260 if [ "${UBOOT_SIGN_ENABLE}" = "1" -o "${UBOOT_FITIMAGE_ENABLE}" = "1" ] && \
261 [ -n "${UBOOT_DTB_BINARY}" ]; then
262 install_helper
263 fi
264 if [ "${UBOOT_FITIMAGE_ENABLE}" = "1" -a -n "${SPL_DTB_BINARY}" ]; then
265 install_spl_helper
266 fi
267 done
268 else
269 cd ${B}
270 if [ "${UBOOT_SIGN_ENABLE}" = "1" -o "${UBOOT_FITIMAGE_ENABLE}" = "1" ] && \
271 [ -n "${UBOOT_DTB_BINARY}" ]; then
272 install_helper
273 fi
274 if [ "${UBOOT_FITIMAGE_ENABLE}" = "1" -a -n "${SPL_DTB_BINARY}" ]; then
275 install_spl_helper
276 fi
277 fi
278 fi
279}
280
281do_uboot_generate_rsa_keys() {
282 if [ "${SPL_SIGN_ENABLE}" = "0" ] && [ "${UBOOT_FIT_GENERATE_KEYS}" = "1" ]; then
283 bbwarn "UBOOT_FIT_GENERATE_KEYS is set to 1 eventhough SPL_SIGN_ENABLE is set to 0. The keys will not be generated as they won't be used."
284 fi
285
286 if [ "${SPL_SIGN_ENABLE}" = "1" ] && [ "${UBOOT_FIT_GENERATE_KEYS}" = "1" ]; then
287
288 # Generate keys only if they don't already exist
289 if [ ! -f "${SPL_SIGN_KEYDIR}/${SPL_SIGN_KEYNAME}".key ] || \
290 [ ! -f "${SPL_SIGN_KEYDIR}/${SPL_SIGN_KEYNAME}".crt ]; then
291
292 # make directory if it does not already exist
293 mkdir -p "${SPL_SIGN_KEYDIR}"
294
295 echo "Generating RSA private key for signing U-Boot fitImage"
296 openssl genrsa ${UBOOT_FIT_KEY_GENRSA_ARGS} -out \
297 "${SPL_SIGN_KEYDIR}/${SPL_SIGN_KEYNAME}".key \
298 "${UBOOT_FIT_SIGN_NUMBITS}"
299
300 echo "Generating certificate for signing U-Boot fitImage"
301 openssl req ${FIT_KEY_REQ_ARGS} "${UBOOT_FIT_KEY_SIGN_PKCS}" \
302 -key "${SPL_SIGN_KEYDIR}/${SPL_SIGN_KEYNAME}".key \
303 -out "${SPL_SIGN_KEYDIR}/${SPL_SIGN_KEYNAME}".crt
304 fi
305 fi
306
307}
308
309addtask uboot_generate_rsa_keys before do_uboot_assemble_fitimage after do_compile
310
311# Create a ITS file for the U-boot FIT, for use when
312# we want to sign it so that the SPL can verify it
313uboot_fitimage_assemble() {
314 uboot_its="$1"
315 uboot_nodtb_bin="$2"
316 uboot_dtb="$3"
317 uboot_bin="$4"
318 spl_dtb="$5"
319 uboot_csum="${UBOOT_FIT_HASH_ALG}"
320 uboot_sign_algo="${UBOOT_FIT_SIGN_ALG}"
321 uboot_sign_keyname="${SPL_SIGN_KEYNAME}"
322
323 rm -f $uboot_its $uboot_bin
324
325 # First we create the ITS script
326 cat << EOF >> $uboot_its
327/dts-v1/;
328
329/ {
330 description = "${UBOOT_FIT_DESC}";
331 #address-cells = <1>;
332
333 images {
334 uboot {
335 description = "U-Boot image";
336 data = /incbin/("$uboot_nodtb_bin");
337 type = "standalone";
338 os = "u-boot";
339 arch = "${UBOOT_ARCH}";
340 compression = "none";
341 load = <${UBOOT_LOADADDRESS}>;
342 entry = <${UBOOT_ENTRYPOINT}>;
343EOF
344
345 if [ "${SPL_SIGN_ENABLE}" = "1" ] ; then
346 cat << EOF >> $uboot_its
347 signature {
348 algo = "$uboot_csum,$uboot_sign_algo";
349 key-name-hint = "$uboot_sign_keyname";
350 };
351EOF
352 fi
353
354 cat << EOF >> $uboot_its
355 };
356 fdt {
357 description = "U-Boot FDT";
358 data = /incbin/("$uboot_dtb");
359 type = "flat_dt";
360 arch = "${UBOOT_ARCH}";
361 compression = "none";
362EOF
363
364 if [ "${SPL_SIGN_ENABLE}" = "1" ] ; then
365 cat << EOF >> $uboot_its
366 signature {
367 algo = "$uboot_csum,$uboot_sign_algo";
368 key-name-hint = "$uboot_sign_keyname";
369 };
370EOF
371 fi
372
373 cat << EOF >> $uboot_its
374 };
375 };
376
377 configurations {
378 default = "conf";
379 conf {
380 description = "Boot with signed U-Boot FIT";
381 loadables = "uboot";
382 fdt = "fdt";
383 };
384 };
385};
386EOF
387
388 #
389 # Assemble the U-boot FIT image
390 #
391 ${UBOOT_MKIMAGE} \
392 ${@'-D "${SPL_MKIMAGE_DTCOPTS}"' if len('${SPL_MKIMAGE_DTCOPTS}') else ''} \
393 -f $uboot_its \
394 $uboot_bin
395
396 if [ "${SPL_SIGN_ENABLE}" = "1" ] ; then
397 #
398 # Sign the U-boot FIT image and add public key to SPL dtb
399 #
400 ${UBOOT_MKIMAGE_SIGN} \
401 ${@'-D "${SPL_MKIMAGE_DTCOPTS}"' if len('${SPL_MKIMAGE_DTCOPTS}') else ''} \
402 -F -k "${SPL_SIGN_KEYDIR}" \
403 -K "$spl_dtb" \
404 -r $uboot_bin \
405 ${SPL_MKIMAGE_SIGN_ARGS}
406 fi
407
408}
409
410do_uboot_assemble_fitimage() {
411 # This function runs in KERNEL_PN context. The reason for that is that we need to
412 # support the scenario where UBOOT_SIGN_ENABLE is placing the Kernel fitImage's
413 # pubkey in the u-boot.dtb file, so that we can use it when building the U-Boot
414 # fitImage itself.
415 if [ "${UBOOT_FITIMAGE_ENABLE}" = "1" ] && \
416 [ -n "${SPL_DTB_BINARY}" -a "${PN}" = "${KERNEL_PN}" ] ; then
417 if [ "${UBOOT_SIGN_ENABLE}" != "1" ]; then
418 # If we're not signing the Kernel fitImage, that means
419 # we need to copy the u-boot.dtb from staging ourselves
420 cp -P ${STAGING_DATADIR}/u-boot*.dtb ${B}
421 fi
422 # As we are in the kernel context, we need to copy u-boot-spl.dtb from staging first.
423 # Unfortunately, need to glob on top of ${SPL_DTB_BINARY} since _IMAGE and _SYMLINK
424 # will contain U-boot's PV
425 # Similarly, we need to get the filename for the 'stub' u-boot-fitimage + its in
426 # staging so that we can use it for creating the image with the correct filename
427 # in the KERNEL_PN context.
428 # As for the u-boot.dtb (with fitimage's pubkey), it should come from the dependent
429 # do_assemble_fitimage task
430 cp -P ${STAGING_DATADIR}/u-boot-spl*.dtb ${B}
431 cp -P ${STAGING_DATADIR}/u-boot-nodtb*.bin ${B}
432 rm -rf ${B}/u-boot-fitImage-* ${B}/u-boot-its-*
433 kernel_uboot_fitimage_name=`basename ${STAGING_DATADIR}/u-boot-fitImage-*`
434 kernel_uboot_its_name=`basename ${STAGING_DATADIR}/u-boot-its-*`
435 cd ${B}
436 uboot_fitimage_assemble $kernel_uboot_its_name ${UBOOT_NODTB_BINARY} \
437 ${UBOOT_DTB_BINARY} $kernel_uboot_fitimage_name \
438 ${SPL_DTB_BINARY}
439 fi
440}
441
442addtask uboot_assemble_fitimage before do_deploy after do_compile
443
444do_deploy:prepend:pn-${UBOOT_PN}() {
445 if [ "${UBOOT_SIGN_ENABLE}" = "1" -a -n "${UBOOT_DTB_BINARY}" ] ; then
446 concat_dtb
447 fi
448
449 if [ "${UBOOT_FITIMAGE_ENABLE}" = "1" ] ; then
450 # Deploy the u-boot-nodtb binary and symlinks...
451 if [ -f "${SPL_DIR}/${SPL_NODTB_BINARY}" ] ; then
452 echo "Copying u-boot-nodtb binary..."
453 install -m 0644 ${SPL_DIR}/${SPL_NODTB_BINARY} ${DEPLOYDIR}/${SPL_NODTB_IMAGE}
454 ln -sf ${SPL_NODTB_IMAGE} ${DEPLOYDIR}/${SPL_NODTB_SYMLINK}
455 ln -sf ${SPL_NODTB_IMAGE} ${DEPLOYDIR}/${SPL_NODTB_BINARY}
456 fi
457
458
459 # We only deploy the symlinks to the uboot-fitImage and uboot-its
460 # images, as the KERNEL_PN will take care of deploying the real file
461 ln -sf ${UBOOT_FITIMAGE_IMAGE} ${DEPLOYDIR}/${UBOOT_FITIMAGE_BINARY}
462 ln -sf ${UBOOT_FITIMAGE_IMAGE} ${DEPLOYDIR}/${UBOOT_FITIMAGE_SYMLINK}
463 ln -sf ${UBOOT_ITS_IMAGE} ${DEPLOYDIR}/${UBOOT_ITS}
464 ln -sf ${UBOOT_ITS_IMAGE} ${DEPLOYDIR}/${UBOOT_ITS_SYMLINK}
465 fi
466
467 if [ "${SPL_SIGN_ENABLE}" = "1" -a -n "${SPL_DTB_BINARY}" ] ; then
468 concat_spl_dtb
469 fi
470
471
472}
473
474do_deploy:append:pn-${UBOOT_PN}() {
475 # If we're creating a u-boot fitImage, point u-boot.bin
476 # symlink since it might get used by image recipes
477 if [ "${UBOOT_FITIMAGE_ENABLE}" = "1" ] ; then
478 ln -sf ${UBOOT_FITIMAGE_IMAGE} ${DEPLOYDIR}/${UBOOT_BINARY}
479 ln -sf ${UBOOT_FITIMAGE_IMAGE} ${DEPLOYDIR}/${UBOOT_SYMLINK}
480 fi
481}
482
483python () {
484 if ( (d.getVar('UBOOT_SIGN_ENABLE') == '1'
485 or d.getVar('UBOOT_FITIMAGE_ENABLE') == '1')
486 and d.getVar('PN') == d.getVar('UBOOT_PN')
487 and d.getVar('UBOOT_DTB_BINARY')):
488
489 # Make "bitbake u-boot -cdeploy" deploys the signed u-boot.dtb
490 # and/or the U-Boot fitImage
491 d.appendVarFlag('do_deploy', 'depends', ' %s:do_deploy' % d.getVar('KERNEL_PN'))
492
493 if d.getVar('UBOOT_FITIMAGE_ENABLE') == '1' and d.getVar('PN') == d.getVar('KERNEL_PN'):
494 # As the U-Boot fitImage is created by the KERNEL_PN, we need
495 # to make sure that the u-boot-spl.dtb and u-boot-spl-nodtb.bin
496 # files are in the staging dir for it's use
497 d.appendVarFlag('do_uboot_assemble_fitimage', 'depends', ' %s:do_populate_sysroot' % d.getVar('UBOOT_PN'))
498
499 # If the Kernel fitImage is being signed, we need to
500 # create the U-Boot fitImage after it
501 if d.getVar('UBOOT_SIGN_ENABLE') == '1':
502 d.appendVarFlag('do_uboot_assemble_fitimage', 'depends', ' %s:do_assemble_fitimage' % d.getVar('KERNEL_PN'))
503 d.appendVarFlag('do_uboot_assemble_fitimage', 'depends', ' %s:do_assemble_fitimage_initramfs' % d.getVar('KERNEL_PN'))
504
505}
diff --git a/meta/classes-recipe/update-alternatives.bbclass b/meta/classes-recipe/update-alternatives.bbclass
new file mode 100644
index 0000000000..970d9bcd45
--- /dev/null
+++ b/meta/classes-recipe/update-alternatives.bbclass
@@ -0,0 +1,333 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# This class is used to help the alternatives system which is useful when
8# multiple sources provide same command. You can use update-alternatives
9# command directly in your recipe, but in most cases this class simplifies
10# that job.
11#
12# To use this class a number of variables should be defined:
13#
14# List all of the alternatives needed by a package:
15# ALTERNATIVE:<pkg> = "name1 name2 name3 ..."
16#
17# i.e. ALTERNATIVE:busybox = "sh sed test bracket"
18#
19# The pathname of the link
20# ALTERNATIVE_LINK_NAME[name] = "target"
21#
22# This is the name of the binary once it's been installed onto the runtime.
23# This name is global to all split packages in this recipe, and should match
24# other recipes with the same functionality.
25# i.e. ALTERNATIVE_LINK_NAME[bracket] = "/usr/bin/["
26#
27# NOTE: If ALTERNATIVE_LINK_NAME is not defined, it defaults to ${bindir}/name
28#
29# The default link to create for all targets
30# ALTERNATIVE_TARGET = "target"
31#
32# This is useful in a multicall binary case
33# i.e. ALTERNATIVE_TARGET = "/bin/busybox"
34#
35# A non-default link to create for a target
36# ALTERNATIVE_TARGET[name] = "target"
37#
38# This is the name of the binary as it's been install by do_install
39# i.e. ALTERNATIVE_TARGET[sh] = "/bin/bash"
40#
41# A package specific link for a target
42# ALTERNATIVE_TARGET_<pkg>[name] = "target"
43#
44# This is useful when a recipe provides multiple alternatives for the
45# same item.
46#
47# NOTE: If ALTERNATIVE_TARGET is not defined, it will inherit the value
48# from ALTERNATIVE_LINK_NAME.
49#
50# NOTE: If the ALTERNATIVE_LINK_NAME and ALTERNATIVE_TARGET are the same,
51# ALTERNATIVE_TARGET will have '.{BPN}' appended to it. If the file
52# referenced has not been renamed, it will also be renamed. (This avoids
53# the need to rename alternative files in the do_install step, but still
54# supports it if necessary for some reason.)
55#
56# The default priority for any alternatives
57# ALTERNATIVE_PRIORITY = "priority"
58#
59# i.e. default is ALTERNATIVE_PRIORITY = "10"
60#
61# The non-default priority for a specific target
62# ALTERNATIVE_PRIORITY[name] = "priority"
63#
64# The package priority for a specific target
65# ALTERNATIVE_PRIORITY_<pkg>[name] = "priority"
66
67ALTERNATIVE_PRIORITY = "10"
68
69# We need special processing for vardeps because it can not work on
70# modified flag values. So we aggregate the flags into a new variable
71# and include that vairable in the set.
72UPDALTVARS = "ALTERNATIVE ALTERNATIVE_LINK_NAME ALTERNATIVE_TARGET ALTERNATIVE_PRIORITY"
73
74PACKAGE_WRITE_DEPS += "virtual/update-alternatives-native"
75
76def gen_updatealternativesvardeps(d):
77 pkgs = (d.getVar("PACKAGES") or "").split()
78 vars = (d.getVar("UPDALTVARS") or "").split()
79
80 # First compute them for non_pkg versions
81 for v in vars:
82 for flag in sorted((d.getVarFlags(v) or {}).keys()):
83 if flag == "doc" or flag == "vardeps" or flag == "vardepsexp":
84 continue
85 d.appendVar('%s_VARDEPS' % (v), ' %s:%s' % (flag, d.getVarFlag(v, flag, False)))
86
87 for p in pkgs:
88 for v in vars:
89 for flag in sorted((d.getVarFlags("%s_%s" % (v,p)) or {}).keys()):
90 if flag == "doc" or flag == "vardeps" or flag == "vardepsexp":
91 continue
92 d.appendVar('%s_VARDEPS_%s' % (v,p), ' %s:%s' % (flag, d.getVarFlag('%s_%s' % (v,p), flag, False)))
93
94def ua_extend_depends(d):
95 if not 'virtual/update-alternatives' in d.getVar('PROVIDES'):
96 d.appendVar('DEPENDS', ' virtual/${MLPREFIX}update-alternatives')
97
98def update_alternatives_enabled(d):
99 # Update Alternatives only works on target packages...
100 if bb.data.inherits_class('native', d) or \
101 bb.data.inherits_class('cross', d) or bb.data.inherits_class('crosssdk', d) or \
102 bb.data.inherits_class('cross-canadian', d):
103 return False
104
105 # Disable when targeting mingw32 (no target support)
106 if d.getVar("TARGET_OS") == "mingw32":
107 return False
108
109 return True
110
111python __anonymous() {
112 if not update_alternatives_enabled(d):
113 return
114
115 # compute special vardeps
116 gen_updatealternativesvardeps(d)
117
118 # extend the depends to include virtual/update-alternatives
119 ua_extend_depends(d)
120}
121
122def gen_updatealternativesvars(d):
123 ret = []
124 pkgs = (d.getVar("PACKAGES") or "").split()
125 vars = (d.getVar("UPDALTVARS") or "").split()
126
127 for v in vars:
128 ret.append(v + "_VARDEPS")
129
130 for p in pkgs:
131 for v in vars:
132 ret.append(v + ":" + p)
133 ret.append(v + "_VARDEPS_" + p)
134 return " ".join(ret)
135
136# Now the new stuff, we use a custom function to generate the right values
137populate_packages[vardeps] += "${UPDALTVARS} ${@gen_updatealternativesvars(d)}"
138
139# We need to do the rename after the image creation step, but before
140# the split and strip steps.. PACKAGE_PREPROCESS_FUNCS is the right
141# place for that.
142PACKAGE_PREPROCESS_FUNCS += "apply_update_alternative_renames"
143python apply_update_alternative_renames () {
144 if not update_alternatives_enabled(d):
145 return
146
147 import re
148
149 def update_files(alt_target, alt_target_rename, pkg, d):
150 f = d.getVar('FILES:' + pkg)
151 if f:
152 f = re.sub(r'(^|\s)%s(\s|$)' % re.escape (alt_target), r'\1%s\2' % alt_target_rename, f)
153 d.setVar('FILES:' + pkg, f)
154
155 # Check for deprecated usage...
156 pn = d.getVar('BPN')
157 if d.getVar('ALTERNATIVE_LINKS') != None:
158 bb.fatal('%s: Use of ALTERNATIVE_LINKS/ALTERNATIVE_PATH/ALTERNATIVE_NAME is no longer supported, please convert to the updated syntax, see update-alternatives.bbclass for more info.' % pn)
159
160 # Do actual update alternatives processing
161 pkgdest = d.getVar('PKGD')
162 for pkg in (d.getVar('PACKAGES') or "").split():
163 # If the src == dest, we know we need to rename the dest by appending ${BPN}
164 link_rename = []
165 for alt_name in (d.getVar('ALTERNATIVE:%s' % pkg) or "").split():
166 alt_link = d.getVarFlag('ALTERNATIVE_LINK_NAME', alt_name)
167 if not alt_link:
168 alt_link = "%s/%s" % (d.getVar('bindir'), alt_name)
169 d.setVarFlag('ALTERNATIVE_LINK_NAME', alt_name, alt_link)
170 if alt_link.startswith(os.path.join(d.getVar('sysconfdir'), 'init.d')):
171 # Managing init scripts does not work (bug #10433), foremost
172 # because of a race with update-rc.d
173 bb.fatal("Using update-alternatives for managing SysV init scripts is not supported")
174
175 alt_target = d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, alt_name) or d.getVarFlag('ALTERNATIVE_TARGET', alt_name)
176 alt_target = alt_target or d.getVar('ALTERNATIVE_TARGET_%s' % pkg) or d.getVar('ALTERNATIVE_TARGET') or alt_link
177 # Sometimes alt_target is specified as relative to the link name.
178 alt_target = os.path.join(os.path.dirname(alt_link), alt_target)
179
180 # If the link and target are the same name, we need to rename the target.
181 if alt_link == alt_target:
182 src = '%s/%s' % (pkgdest, alt_target)
183 alt_target_rename = '%s.%s' % (alt_target, pn)
184 dest = '%s/%s' % (pkgdest, alt_target_rename)
185 if os.path.lexists(dest):
186 bb.note('%s: Already renamed: %s' % (pn, alt_target_rename))
187 elif os.path.lexists(src):
188 if os.path.islink(src):
189 # Delay rename of links
190 link_rename.append((alt_target, alt_target_rename))
191 else:
192 bb.note('%s: Rename %s -> %s' % (pn, alt_target, alt_target_rename))
193 bb.utils.rename(src, dest)
194 update_files(alt_target, alt_target_rename, pkg, d)
195 else:
196 bb.warn("%s: alternative target (%s or %s) does not exist, skipping..." % (pn, alt_target, alt_target_rename))
197 continue
198 d.setVarFlag('ALTERNATIVE_TARGET_%s' % pkg, alt_name, alt_target_rename)
199
200 # Process delayed link names
201 # Do these after other renames so we can correct broken links
202 for (alt_target, alt_target_rename) in link_rename:
203 src = '%s/%s' % (pkgdest, alt_target)
204 dest = '%s/%s' % (pkgdest, alt_target_rename)
205 link_target = oe.path.realpath(src, pkgdest, True)
206
207 if os.path.lexists(link_target):
208 # Ok, the link_target exists, we can rename
209 bb.note('%s: Rename (link) %s -> %s' % (pn, alt_target, alt_target_rename))
210 bb.utils.rename(src, dest)
211 else:
212 # Try to resolve the broken link to link.${BPN}
213 link_maybe = '%s.%s' % (os.readlink(src), pn)
214 if os.path.lexists(os.path.join(os.path.dirname(src), link_maybe)):
215 # Ok, the renamed link target exists.. create a new link, and remove the original
216 bb.note('%s: Creating new link %s -> %s' % (pn, alt_target_rename, link_maybe))
217 os.symlink(link_maybe, dest)
218 os.unlink(src)
219 else:
220 bb.warn('%s: Unable to resolve dangling symlink: %s' % (pn, alt_target))
221 continue
222 update_files(alt_target, alt_target_rename, pkg, d)
223}
224
225def update_alternatives_alt_targets(d, pkg):
226 """
227 Returns the update-alternatives metadata for a package.
228
229 The returned format is a list of tuples where the tuple contains:
230 alt_name: The binary name
231 alt_link: The path for the binary (Shared by different packages)
232 alt_target: The path for the renamed binary (Unique per package)
233 alt_priority: The priority of the alt_target
234
235 All the alt_targets will be installed into the sysroot. The alt_link is
236 a symlink pointing to the alt_target with the highest priority.
237 """
238
239 pn = d.getVar('BPN')
240 pkgdest = d.getVar('PKGD')
241 updates = list()
242 for alt_name in (d.getVar('ALTERNATIVE:%s' % pkg) or "").split():
243 alt_link = d.getVarFlag('ALTERNATIVE_LINK_NAME', alt_name)
244 alt_target = d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, alt_name) or \
245 d.getVarFlag('ALTERNATIVE_TARGET', alt_name) or \
246 d.getVar('ALTERNATIVE_TARGET_%s' % pkg) or \
247 d.getVar('ALTERNATIVE_TARGET') or \
248 alt_link
249 alt_priority = d.getVarFlag('ALTERNATIVE_PRIORITY_%s' % pkg, alt_name) or \
250 d.getVarFlag('ALTERNATIVE_PRIORITY', alt_name) or \
251 d.getVar('ALTERNATIVE_PRIORITY_%s' % pkg) or \
252 d.getVar('ALTERNATIVE_PRIORITY')
253
254 # This shouldn't trigger, as it should have been resolved earlier!
255 if alt_link == alt_target:
256 bb.note('alt_link == alt_target: %s == %s -- correcting, this should not happen!' % (alt_link, alt_target))
257 alt_target = '%s.%s' % (alt_target, pn)
258
259 if not os.path.lexists('%s/%s' % (pkgdest, alt_target)):
260 bb.warn('%s: NOT adding alternative provide %s: %s does not exist' % (pn, alt_link, alt_target))
261 continue
262
263 alt_target = os.path.normpath(alt_target)
264 updates.append( (alt_name, alt_link, alt_target, alt_priority) )
265
266 return updates
267
268PACKAGESPLITFUNCS:prepend = "populate_packages_updatealternatives "
269
270python populate_packages_updatealternatives () {
271 if not update_alternatives_enabled(d):
272 return
273
274 # Do actual update alternatives processing
275 for pkg in (d.getVar('PACKAGES') or "").split():
276 # Create post install/removal scripts
277 alt_setup_links = ""
278 alt_remove_links = ""
279 updates = update_alternatives_alt_targets(d, pkg)
280 for alt_name, alt_link, alt_target, alt_priority in updates:
281 alt_setup_links += '\tupdate-alternatives --install %s %s %s %s\n' % (alt_link, alt_name, alt_target, alt_priority)
282 alt_remove_links += '\tupdate-alternatives --remove %s %s\n' % (alt_name, alt_target)
283
284 if alt_setup_links:
285 # RDEPENDS setup
286 provider = d.getVar('VIRTUAL-RUNTIME_update-alternatives')
287 if provider:
288 #bb.note('adding runtime requirement for update-alternatives for %s' % pkg)
289 d.appendVar('RDEPENDS:%s' % pkg, ' ' + d.getVar('MLPREFIX', False) + provider)
290
291 bb.note('adding update-alternatives calls to postinst/prerm for %s' % pkg)
292 bb.note('%s' % alt_setup_links)
293 postinst = d.getVar('pkg_postinst:%s' % pkg)
294 if postinst:
295 postinst = alt_setup_links + postinst
296 else:
297 postinst = '#!/bin/sh\n' + alt_setup_links
298 d.setVar('pkg_postinst:%s' % pkg, postinst)
299
300 bb.note('%s' % alt_remove_links)
301 prerm = d.getVar('pkg_prerm:%s' % pkg) or '#!/bin/sh\n'
302 prerm += alt_remove_links
303 d.setVar('pkg_prerm:%s' % pkg, prerm)
304}
305
306python package_do_filedeps:append () {
307 if update_alternatives_enabled(d):
308 apply_update_alternative_provides(d)
309}
310
311def apply_update_alternative_provides(d):
312 pn = d.getVar('BPN')
313 pkgdest = d.getVar('PKGDEST')
314
315 for pkg in d.getVar('PACKAGES').split():
316 for alt_name in (d.getVar('ALTERNATIVE:%s' % pkg) or "").split():
317 alt_link = d.getVarFlag('ALTERNATIVE_LINK_NAME', alt_name)
318 alt_target = d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, alt_name) or d.getVarFlag('ALTERNATIVE_TARGET', alt_name)
319 alt_target = alt_target or d.getVar('ALTERNATIVE_TARGET_%s' % pkg) or d.getVar('ALTERNATIVE_TARGET') or alt_link
320
321 if alt_link == alt_target:
322 bb.warn('%s: alt_link == alt_target: %s == %s' % (pn, alt_link, alt_target))
323 alt_target = '%s.%s' % (alt_target, pn)
324
325 if not os.path.lexists('%s/%s/%s' % (pkgdest, pkg, alt_target)):
326 continue
327
328 # Add file provide
329 trans_target = oe.package.file_translate(alt_target)
330 d.appendVar('FILERPROVIDES:%s:%s' % (trans_target, pkg), " " + alt_link)
331 if not trans_target in (d.getVar('FILERPROVIDESFLIST:%s' % pkg) or ""):
332 d.appendVar('FILERPROVIDESFLIST:%s' % pkg, " " + trans_target)
333
diff --git a/meta/classes-recipe/update-rc.d.bbclass b/meta/classes-recipe/update-rc.d.bbclass
new file mode 100644
index 0000000000..cb2aaba57c
--- /dev/null
+++ b/meta/classes-recipe/update-rc.d.bbclass
@@ -0,0 +1,129 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7UPDATERCPN ?= "${PN}"
8
9DEPENDS:append:class-target = "${@bb.utils.contains('DISTRO_FEATURES', 'sysvinit', ' update-rc.d initscripts', '', d)}"
10
11UPDATERCD = "update-rc.d"
12UPDATERCD:class-cross = ""
13UPDATERCD:class-native = ""
14UPDATERCD:class-nativesdk = ""
15
16INITSCRIPT_PARAMS ?= "defaults"
17
18INIT_D_DIR = "${sysconfdir}/init.d"
19
20def use_updatercd(d):
21 # If the distro supports both sysvinit and systemd, and the current recipe
22 # supports systemd, only call update-rc.d on rootfs creation or if systemd
23 # is not running. That's because systemctl enable/disable will already call
24 # update-rc.d if it detects initscripts.
25 if bb.utils.contains('DISTRO_FEATURES', 'systemd', True, False, d) and bb.data.inherits_class('systemd', d):
26 return '[ -n "$D" -o ! -d /run/systemd/system ]'
27 return 'true'
28
29PACKAGE_WRITE_DEPS += "update-rc.d-native"
30
31updatercd_postinst() {
32if ${@use_updatercd(d)} && type update-rc.d >/dev/null 2>/dev/null; then
33 if [ -n "$D" ]; then
34 OPT="-r $D"
35 else
36 OPT="-s"
37 fi
38 update-rc.d $OPT ${INITSCRIPT_NAME} ${INITSCRIPT_PARAMS}
39fi
40}
41
42updatercd_prerm() {
43if ${@use_updatercd(d)} && [ -z "$D" -a -x "${INIT_D_DIR}/${INITSCRIPT_NAME}" ]; then
44 ${INIT_D_DIR}/${INITSCRIPT_NAME} stop || :
45fi
46}
47
48updatercd_postrm() {
49if ${@use_updatercd(d)} && type update-rc.d >/dev/null 2>/dev/null; then
50 if [ -n "$D" ]; then
51 OPT="-f -r $D"
52 else
53 OPT="-f"
54 fi
55 update-rc.d $OPT ${INITSCRIPT_NAME} remove
56fi
57}
58
59
60def update_rc_after_parse(d):
61 if d.getVar('INITSCRIPT_PACKAGES', False) == None:
62 if d.getVar('INITSCRIPT_NAME', False) == None:
63 bb.fatal("%s inherits update-rc.d but doesn't set INITSCRIPT_NAME" % d.getVar('FILE', False))
64 if d.getVar('INITSCRIPT_PARAMS', False) == None:
65 bb.fatal("%s inherits update-rc.d but doesn't set INITSCRIPT_PARAMS" % d.getVar('FILE', False))
66
67python __anonymous() {
68 update_rc_after_parse(d)
69}
70
71PACKAGESPLITFUNCS:prepend = "${@bb.utils.contains('DISTRO_FEATURES', 'sysvinit', 'populate_packages_updatercd ', '', d)}"
72PACKAGESPLITFUNCS:remove:class-nativesdk = "populate_packages_updatercd "
73
74populate_packages_updatercd[vardeps] += "updatercd_prerm updatercd_postrm updatercd_postinst"
75populate_packages_updatercd[vardepsexclude] += "OVERRIDES"
76
77python populate_packages_updatercd () {
78 def update_rcd_auto_depend(pkg):
79 import subprocess
80 import os
81 path = d.expand("${D}${INIT_D_DIR}/${INITSCRIPT_NAME}")
82 if not os.path.exists(path):
83 return
84 statement = "grep -q -w '/etc/init.d/functions' %s" % path
85 if subprocess.call(statement, shell=True) == 0:
86 mlprefix = d.getVar('MLPREFIX') or ""
87 d.appendVar('RDEPENDS:' + pkg, ' %sinitd-functions' % (mlprefix))
88
89 def update_rcd_package(pkg):
90 bb.debug(1, 'adding update-rc.d calls to postinst/prerm/postrm for %s' % pkg)
91
92 localdata = bb.data.createCopy(d)
93 overrides = localdata.getVar("OVERRIDES")
94 localdata.setVar("OVERRIDES", "%s:%s" % (pkg, overrides))
95
96 update_rcd_auto_depend(pkg)
97
98 postinst = d.getVar('pkg_postinst:%s' % pkg)
99 if not postinst:
100 postinst = '#!/bin/sh\n'
101 postinst += localdata.getVar('updatercd_postinst')
102 d.setVar('pkg_postinst:%s' % pkg, postinst)
103
104 prerm = d.getVar('pkg_prerm:%s' % pkg)
105 if not prerm:
106 prerm = '#!/bin/sh\n'
107 prerm += localdata.getVar('updatercd_prerm')
108 d.setVar('pkg_prerm:%s' % pkg, prerm)
109
110 postrm = d.getVar('pkg_postrm:%s' % pkg)
111 if not postrm:
112 postrm = '#!/bin/sh\n'
113 postrm += localdata.getVar('updatercd_postrm')
114 d.setVar('pkg_postrm:%s' % pkg, postrm)
115
116 d.appendVar('RRECOMMENDS:' + pkg, " ${MLPREFIX}${UPDATERCD}")
117
118 # Check that this class isn't being inhibited (generally, by
119 # systemd.bbclass) before doing any work.
120 if not d.getVar("INHIBIT_UPDATERCD_BBCLASS"):
121 pkgs = d.getVar('INITSCRIPT_PACKAGES')
122 if pkgs == None:
123 pkgs = d.getVar('UPDATERCPN')
124 packages = (d.getVar('PACKAGES') or "").split()
125 if not pkgs in packages and packages != []:
126 pkgs = packages[0]
127 for pkg in pkgs.split():
128 update_rcd_package(pkg)
129}
diff --git a/meta/classes-recipe/upstream-version-is-even.bbclass b/meta/classes-recipe/upstream-version-is-even.bbclass
new file mode 100644
index 0000000000..19587cb12c
--- /dev/null
+++ b/meta/classes-recipe/upstream-version-is-even.bbclass
@@ -0,0 +1,11 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# This class ensures that the upstream version check only
8# accepts even minor versions (i.e. 3.0.x, 3.2.x, 3.4.x, etc.)
9# This scheme is used by Gnome and a number of other projects
10# to signify stable releases vs development releases.
11UPSTREAM_CHECK_REGEX = "[^\d\.](?P<pver>\d+\.(\d*[02468])+(\.\d+)+)\.tar"
diff --git a/meta/classes-recipe/vala.bbclass b/meta/classes-recipe/vala.bbclass
new file mode 100644
index 0000000000..460ddb36f0
--- /dev/null
+++ b/meta/classes-recipe/vala.bbclass
@@ -0,0 +1,30 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# Everyone needs vala-native and targets need vala, too,
8# because that is where target builds look for .vapi files.
9#
10VALADEPENDS = ""
11VALADEPENDS:class-target = "vala"
12DEPENDS:append = " vala-native ${VALADEPENDS}"
13
14# Our patched version of Vala looks in STAGING_DATADIR for .vapi files
15export STAGING_DATADIR
16# Upstream Vala >= 0.11 looks in XDG_DATA_DIRS for .vapi files
17export XDG_DATA_DIRS = "${STAGING_DATADIR}:${STAGING_LIBDIR}"
18
19# Package additional files
20FILES:${PN}-dev += "\
21 ${datadir}/vala/vapi/*.vapi \
22 ${datadir}/vala/vapi/*.deps \
23 ${datadir}/gir-1.0 \
24"
25
26# Remove vapigen.m4 that is bundled with tarballs
27# because it does not yet have our cross-compile fixes
28do_configure:prepend() {
29 rm -f ${S}/m4/vapigen.m4
30}
diff --git a/meta/classes-recipe/waf.bbclass b/meta/classes-recipe/waf.bbclass
new file mode 100644
index 0000000000..5fa0cc4987
--- /dev/null
+++ b/meta/classes-recipe/waf.bbclass
@@ -0,0 +1,81 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# avoids build breaks when using no-static-libs.inc
8DISABLE_STATIC = ""
9
10# What Python interpretter to use. Defaults to Python 3 but can be
11# overridden if required.
12WAF_PYTHON ?= "python3"
13
14B = "${WORKDIR}/build"
15do_configure[cleandirs] += "${B}"
16
17EXTRA_OECONF:append = " ${PACKAGECONFIG_CONFARGS}"
18
19EXTRA_OEWAF_BUILD ??= ""
20# In most cases, you want to pass the same arguments to `waf build` and `waf
21# install`, but you can override it if necessary
22EXTRA_OEWAF_INSTALL ??= "${EXTRA_OEWAF_BUILD}"
23
24def waflock_hash(d):
25 # Calculates the hash used for the waf lock file. This should include
26 # all of the user controllable inputs passed to waf configure. Note
27 # that the full paths for ${B} and ${S} are used; this is OK and desired
28 # because a change to either of these should create a unique lock file
29 # to prevent collisions.
30 import hashlib
31 h = hashlib.sha512()
32 def update(name):
33 val = d.getVar(name)
34 if val is not None:
35 h.update(val.encode('utf-8'))
36 update('S')
37 update('B')
38 update('prefix')
39 update('EXTRA_OECONF')
40 return h.hexdigest()
41
42# Use WAFLOCK to specify a separate lock file. The build is already
43# sufficiently isolated by setting the output directory, this ensures that
44# bitbake won't step on toes of any other configured context in the source
45# directory (e.g. if the source is coming from externalsrc and was previously
46# configured elsewhere).
47export WAFLOCK = ".lock-waf_oe_${@waflock_hash(d)}_build"
48BB_BASEHASH_IGNORE_VARS += "WAFLOCK"
49
50python waf_preconfigure() {
51 import subprocess
52 subsrcdir = d.getVar('S')
53 python = d.getVar('WAF_PYTHON')
54 wafbin = os.path.join(subsrcdir, 'waf')
55 try:
56 result = subprocess.check_output([python, wafbin, '--version'], cwd=subsrcdir, stderr=subprocess.STDOUT)
57 version = result.decode('utf-8').split()[1]
58 if bb.utils.vercmp_string_op(version, "1.8.7", ">="):
59 d.setVar("WAF_EXTRA_CONF", "--bindir=${bindir} --libdir=${libdir}")
60 except subprocess.CalledProcessError as e:
61 bb.warn("Unable to execute waf --version, exit code %d. Assuming waf version without bindir/libdir support." % e.returncode)
62 except FileNotFoundError:
63 bb.fatal("waf does not exist in %s" % subsrcdir)
64}
65
66do_configure[prefuncs] += "waf_preconfigure"
67
68waf_do_configure() {
69 (cd ${S} && ${WAF_PYTHON} ./waf configure -o ${B} --prefix=${prefix} ${WAF_EXTRA_CONF} ${EXTRA_OECONF})
70}
71
72do_compile[progress] = "outof:^\[\s*(\d+)/\s*(\d+)\]\s+"
73waf_do_compile() {
74 (cd ${S} && ${WAF_PYTHON} ./waf build ${@oe.utils.parallel_make_argument(d, '-j%d', limit=64)} ${EXTRA_OEWAF_BUILD})
75}
76
77waf_do_install() {
78 (cd ${S} && ${WAF_PYTHON} ./waf install --destdir=${D} ${EXTRA_OEWAF_INSTALL})
79}
80
81EXPORT_FUNCTIONS do_configure do_compile do_install
diff --git a/meta/classes-recipe/xmlcatalog.bbclass b/meta/classes-recipe/xmlcatalog.bbclass
new file mode 100644
index 0000000000..5826d0a8b5
--- /dev/null
+++ b/meta/classes-recipe/xmlcatalog.bbclass
@@ -0,0 +1,32 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7DEPENDS = "libxml2-native"
8
9# A whitespace-separated list of XML catalogs to be registered, for example
10# "${sysconfdir}/xml/docbook-xml.xml".
11XMLCATALOGS ?= ""
12
13SYSROOT_PREPROCESS_FUNCS:append = " xmlcatalog_sstate_postinst"
14
15xmlcatalog_complete() {
16 ROOTCATALOG="${STAGING_ETCDIR_NATIVE}/xml/catalog"
17 if [ ! -f $ROOTCATALOG ]; then
18 mkdir --parents $(dirname $ROOTCATALOG)
19 xmlcatalog --noout --create $ROOTCATALOG
20 fi
21 for CATALOG in ${XMLCATALOGS}; do
22 xmlcatalog --noout --add nextCatalog unused file://$CATALOG $ROOTCATALOG
23 done
24}
25
26xmlcatalog_sstate_postinst() {
27 mkdir -p ${SYSROOT_DESTDIR}${bindir}
28 dest=${SYSROOT_DESTDIR}${bindir}/postinst-${PN}-xmlcatalog
29 echo '#!/bin/sh' > $dest
30 echo '${xmlcatalog_complete}' >> $dest
31 chmod 0755 $dest
32}