summaryrefslogtreecommitdiffstats
path: root/meta/classes-recipe
diff options
context:
space:
mode:
Diffstat (limited to 'meta/classes-recipe')
-rw-r--r--meta/classes-recipe/autotools.bbclass62
-rw-r--r--meta/classes-recipe/barebox.bbclass160
-rw-r--r--meta/classes-recipe/baremetal-image.bbclass38
-rw-r--r--meta/classes-recipe/cargo-update-recipe-crates.bbclass2
-rw-r--r--meta/classes-recipe/cargo.bbclass14
-rw-r--r--meta/classes-recipe/cargo_common.bbclass30
-rw-r--r--meta/classes-recipe/cmake-qemu.bbclass2
-rw-r--r--meta/classes-recipe/cmake.bbclass62
-rw-r--r--meta/classes-recipe/cml1.bbclass14
-rw-r--r--meta/classes-recipe/core-image.bbclass11
-rw-r--r--meta/classes-recipe/create-spdx-image-3.0.bbclass85
-rw-r--r--meta/classes-recipe/create-spdx-sdk-3.0.bbclass74
-rw-r--r--meta/classes-recipe/cross-canadian.bbclass2
-rw-r--r--meta/classes-recipe/cross.bbclass22
-rw-r--r--meta/classes-recipe/crosssdk.bbclass1
-rw-r--r--meta/classes-recipe/cython.bbclass8
-rw-r--r--meta/classes-recipe/devicetree.bbclass24
-rw-r--r--meta/classes-recipe/devupstream.bbclass6
-rw-r--r--meta/classes-recipe/features_check.bbclass2
-rw-r--r--meta/classes-recipe/fontcache.bbclass3
-rw-r--r--meta/classes-recipe/gio-module-cache.bbclass3
-rw-r--r--meta/classes-recipe/go-mod-update-modules.bbclass152
-rw-r--r--meta/classes-recipe/go-mod.bbclass10
-rw-r--r--meta/classes-recipe/go.bbclass24
-rw-r--r--meta/classes-recipe/grub-efi-cfg.bbclass8
-rw-r--r--meta/classes-recipe/gtk-icon-cache.bbclass2
-rw-r--r--meta/classes-recipe/gtk-immodules-cache.bbclass4
-rw-r--r--meta/classes-recipe/image-live.bbclass5
-rw-r--r--meta/classes-recipe/image.bbclass120
-rw-r--r--meta/classes-recipe/image_types.bbclass15
-rw-r--r--meta/classes-recipe/image_types_wic.bbclass29
-rw-r--r--meta/classes-recipe/kernel-arch.bbclass8
-rw-r--r--meta/classes-recipe/kernel-fit-extra-artifacts.bbclass19
-rw-r--r--meta/classes-recipe/kernel-fit-image.bbclass189
-rw-r--r--meta/classes-recipe/kernel-fitimage.bbclass881
-rw-r--r--meta/classes-recipe/kernel-module-split.bbclass95
-rw-r--r--meta/classes-recipe/kernel-uboot.bbclass36
-rw-r--r--meta/classes-recipe/kernel-uimage.bbclass3
-rw-r--r--meta/classes-recipe/kernel-yocto.bbclass78
-rw-r--r--meta/classes-recipe/kernel.bbclass52
-rw-r--r--meta/classes-recipe/kernelsrc.bbclass4
-rw-r--r--meta/classes-recipe/license_image.bbclass16
-rw-r--r--meta/classes-recipe/linuxloader.bbclass2
-rw-r--r--meta/classes-recipe/manpages.bbclass2
-rw-r--r--meta/classes-recipe/meson.bbclass28
-rw-r--r--meta/classes-recipe/module.bbclass1
-rw-r--r--meta/classes-recipe/multilib_script.bbclass18
-rw-r--r--meta/classes-recipe/native.bbclass45
-rw-r--r--meta/classes-recipe/nativesdk.bbclass7
-rw-r--r--meta/classes-recipe/nospdx.bbclass13
-rw-r--r--meta/classes-recipe/npm.bbclass7
-rw-r--r--meta/classes-recipe/pixbufcache.bbclass5
-rw-r--r--meta/classes-recipe/populate_sdk_base.bbclass63
-rw-r--r--meta/classes-recipe/populate_sdk_ext.bbclass26
-rw-r--r--meta/classes-recipe/ptest-cargo.bbclass54
-rw-r--r--meta/classes-recipe/ptest-perl.bbclass2
-rw-r--r--meta/classes-recipe/ptest-python-pytest.bbclass37
-rw-r--r--meta/classes-recipe/ptest.bbclass2
-rw-r--r--meta/classes-recipe/pypi.bbclass22
-rw-r--r--meta/classes-recipe/python3-dir.bbclass2
-rw-r--r--meta/classes-recipe/python3native.bbclass6
-rw-r--r--meta/classes-recipe/python_flit_core.bbclass2
-rw-r--r--meta/classes-recipe/python_mesonpy.bbclass4
-rw-r--r--meta/classes-recipe/python_pep517.bbclass2
-rw-r--r--meta/classes-recipe/python_pyo3.bbclass12
-rw-r--r--meta/classes-recipe/qemu.bbclass59
-rw-r--r--meta/classes-recipe/qemuboot.bbclass3
-rw-r--r--meta/classes-recipe/rootfs-postcommands.bbclass53
-rw-r--r--meta/classes-recipe/rust-common.bbclass4
-rw-r--r--meta/classes-recipe/rust-target-config.bbclass100
-rw-r--r--meta/classes-recipe/rust.bbclass8
-rw-r--r--meta/classes-recipe/scons.bbclass4
-rw-r--r--meta/classes-recipe/setuptools3-base.bbclass4
-rw-r--r--meta/classes-recipe/setuptools3.bbclass17
-rw-r--r--meta/classes-recipe/setuptools3_legacy.bbclass3
-rw-r--r--meta/classes-recipe/siteinfo.bbclass16
-rw-r--r--meta/classes-recipe/sourceforge-releases.bbclass2
-rw-r--r--meta/classes-recipe/systemd.bbclass204
-rw-r--r--meta/classes-recipe/testexport.bbclass12
-rw-r--r--meta/classes-recipe/testimage.bbclass23
-rw-r--r--meta/classes-recipe/testsdk.bbclass4
-rw-r--r--meta/classes-recipe/toolchain-scripts.bbclass28
-rw-r--r--meta/classes-recipe/uboot-config.bbclass34
-rw-r--r--meta/classes-recipe/uboot-extlinux-config.bbclass18
-rw-r--r--meta/classes-recipe/uboot-sign.bbclass264
-rw-r--r--meta/classes-recipe/uki.bbclass194
-rw-r--r--meta/classes-recipe/update-alternatives.bbclass34
87 files changed, 2194 insertions, 1631 deletions
diff --git a/meta/classes-recipe/autotools.bbclass b/meta/classes-recipe/autotools.bbclass
index 9359c9b4e1..948f8c183a 100644
--- a/meta/classes-recipe/autotools.bbclass
+++ b/meta/classes-recipe/autotools.bbclass
@@ -36,7 +36,6 @@ inherit siteinfo
36# the contents of the sysroot. 36# the contents of the sysroot.
37export CONFIG_SITE 37export CONFIG_SITE
38 38
39acpaths ?= "default"
40EXTRA_AUTORECONF += "--exclude=autopoint" 39EXTRA_AUTORECONF += "--exclude=autopoint"
41 40
42export lt_cv_sys_lib_dlsearch_path_spec = "${libdir} ${base_libdir}" 41export lt_cv_sys_lib_dlsearch_path_spec = "${libdir} ${base_libdir}"
@@ -52,17 +51,11 @@ export CC_FOR_BUILD = "${BUILD_CC}"
52export CFLAGS_FOR_BUILD = "${BUILD_CFLAGS}" 51export CFLAGS_FOR_BUILD = "${BUILD_CFLAGS}"
53 52
54export CXX_FOR_BUILD = "${BUILD_CXX}" 53export CXX_FOR_BUILD = "${BUILD_CXX}"
55export CXXFLAGS_FOR_BUILD="${BUILD_CXXFLAGS}" 54export CXXFLAGS_FOR_BUILD = "${BUILD_CXXFLAGS}"
56 55
57export LD_FOR_BUILD = "${BUILD_LD}" 56export LD_FOR_BUILD = "${BUILD_LD}"
58export LDFLAGS_FOR_BUILD = "${BUILD_LDFLAGS}" 57export LDFLAGS_FOR_BUILD = "${BUILD_LDFLAGS}"
59 58
60def append_libtool_sysroot(d):
61 # Only supply libtool sysroot option for non-native packages
62 if not bb.data.inherits_class('native', d):
63 return '--with-libtool-sysroot=${STAGING_DIR_HOST}'
64 return ""
65
66CONFIGUREOPTS = " --build=${BUILD_SYS} \ 59CONFIGUREOPTS = " --build=${BUILD_SYS} \
67 --host=${HOST_SYS} \ 60 --host=${HOST_SYS} \
68 --target=${TARGET_SYS} \ 61 --target=${TARGET_SYS} \
@@ -81,8 +74,7 @@ CONFIGUREOPTS = " --build=${BUILD_SYS} \
81 --infodir=${infodir} \ 74 --infodir=${infodir} \
82 --mandir=${mandir} \ 75 --mandir=${mandir} \
83 --disable-silent-rules \ 76 --disable-silent-rules \
84 ${CONFIGUREOPT_DEPTRACK} \ 77 ${CONFIGUREOPT_DEPTRACK}"
85 ${@append_libtool_sysroot(d)}"
86CONFIGUREOPT_DEPTRACK ?= "--disable-dependency-tracking" 78CONFIGUREOPT_DEPTRACK ?= "--disable-dependency-tracking"
87 79
88CACHED_CONFIGUREVARS ?= "" 80CACHED_CONFIGUREVARS ?= ""
@@ -141,17 +133,11 @@ EXTRACONFFUNCS ??= ""
141 133
142EXTRA_OECONF:append = " ${PACKAGECONFIG_CONFARGS}" 134EXTRA_OECONF:append = " ${PACKAGECONFIG_CONFARGS}"
143 135
144do_configure[prefuncs] += "autotools_preconfigure autotools_aclocals ${EXTRACONFFUNCS}" 136do_configure[prefuncs] += "autotools_preconfigure autotools_sitefiles ${EXTRACONFFUNCS}"
145do_compile[prefuncs] += "autotools_aclocals"
146do_install[prefuncs] += "autotools_aclocals"
147do_configure[postfuncs] += "autotools_postconfigure" 137do_configure[postfuncs] += "autotools_postconfigure"
148 138
149ACLOCALDIR = "${STAGING_DATADIR}/aclocal" 139# Tell autoconf to load the site defaults from siteinfo
150ACLOCALEXTRAPATH = "" 140python autotools_sitefiles () {
151ACLOCALEXTRAPATH:class-target = " -I ${STAGING_DATADIR_NATIVE}/aclocal/"
152ACLOCALEXTRAPATH:class-nativesdk = " -I ${STAGING_DATADIR_NATIVE}/aclocal/"
153
154python autotools_aclocals () {
155 sitefiles, searched = siteinfo_get_files(d, sysrootcache=True) 141 sitefiles, searched = siteinfo_get_files(d, sysrootcache=True)
156 d.setVar("CONFIG_SITE", " ".join(sitefiles)) 142 d.setVar("CONFIG_SITE", " ".join(sitefiles))
157} 143}
@@ -178,28 +164,13 @@ autotools_do_configure() {
178 if [ -e ${AUTOTOOLS_SCRIPT_PATH}/configure.in -o -e ${AUTOTOOLS_SCRIPT_PATH}/configure.ac ]; then 164 if [ -e ${AUTOTOOLS_SCRIPT_PATH}/configure.in -o -e ${AUTOTOOLS_SCRIPT_PATH}/configure.ac ]; then
179 olddir=`pwd` 165 olddir=`pwd`
180 cd ${AUTOTOOLS_SCRIPT_PATH} 166 cd ${AUTOTOOLS_SCRIPT_PATH}
181 mkdir -p ${ACLOCALDIR} 167 # aclocal looks in the native sysroot by default, so tell it to also look in the target sysroot.
182 ACLOCAL="aclocal --system-acdir=${ACLOCALDIR}/" 168 ACLOCAL="aclocal --aclocal-path=${STAGING_DATADIR}/aclocal/"
183 if [ x"${acpaths}" = xdefault ]; then
184 acpaths=
185 for i in `find ${AUTOTOOLS_SCRIPT_PATH} -ignore_readdir_race -maxdepth 2 -name \*.m4|grep -v 'aclocal.m4'| \
186 grep -v 'acinclude.m4' | sed -e 's,\(.*/\).*$,\1,'|sort -u`; do
187 acpaths="$acpaths -I $i"
188 done
189 else
190 acpaths="${acpaths}"
191 fi
192 acpaths="$acpaths ${ACLOCALEXTRAPATH}"
193 AUTOV=`automake --version | sed -e '1{s/.* //;s/\.[0-9]\+$//};q'`
194 automake --version
195 echo "AUTOV is $AUTOV"
196 if [ -d ${STAGING_DATADIR_NATIVE}/aclocal-$AUTOV ]; then
197 ACLOCAL="$ACLOCAL --automake-acdir=${STAGING_DATADIR_NATIVE}/aclocal-$AUTOV"
198 fi
199 # autoreconf is too shy to overwrite aclocal.m4 if it doesn't look 169 # autoreconf is too shy to overwrite aclocal.m4 if it doesn't look
200 # like it was auto-generated. Work around this by blowing it away 170 # like it was auto-generated. Work around this by blowing it away
201 # by hand, unless the package specifically asked not to run aclocal. 171 # by hand, unless the package specifically asked not to run aclocal.
202 if ! echo ${EXTRA_AUTORECONF} | grep -q "aclocal"; then 172 if ! echo ${EXTRA_AUTORECONF} | grep -q "aclocal"; then
173 bbnote Removing existing aclocal.m4
203 rm -f aclocal.m4 174 rm -f aclocal.m4
204 fi 175 fi
205 if [ -e configure.in ]; then 176 if [ -e configure.in ]; then
@@ -219,8 +190,8 @@ autotools_do_configure() {
219 cp ${STAGING_DATADIR_NATIVE}/gettext/config.rpath ${AUTOTOOLS_AUXDIR}/ 190 cp ${STAGING_DATADIR_NATIVE}/gettext/config.rpath ${AUTOTOOLS_AUXDIR}/
220 if [ -d ${S}/po/ ]; then 191 if [ -d ${S}/po/ ]; then
221 cp -f ${STAGING_DATADIR_NATIVE}/gettext/po/Makefile.in.in ${S}/po/ 192 cp -f ${STAGING_DATADIR_NATIVE}/gettext/po/Makefile.in.in ${S}/po/
222 if [ ! -e ${S}/po/remove-potcdate.sin ]; then 193 if [ ! -e ${S}/po/remove-potcdate.sed ]; then
223 cp ${STAGING_DATADIR_NATIVE}/gettext/po/remove-potcdate.sin ${S}/po/ 194 cp ${STAGING_DATADIR_NATIVE}/gettext/po/remove-potcdate.sed ${S}/po/
224 fi 195 fi
225 fi 196 fi
226 PRUNE_M4="$PRUNE_M4 gettext.m4 iconv.m4 lib-ld.m4 lib-link.m4 lib-prefix.m4 nls.m4 po.m4 progtest.m4" 197 PRUNE_M4="$PRUNE_M4 gettext.m4 iconv.m4 lib-ld.m4 lib-link.m4 lib-prefix.m4 nls.m4 po.m4 progtest.m4"
@@ -231,15 +202,12 @@ autotools_do_configure() {
231 find ${S} -ignore_readdir_race -name $i -delete 202 find ${S} -ignore_readdir_race -name $i -delete
232 done 203 done
233 204
234 bbnote Executing ACLOCAL=\"$ACLOCAL\" autoreconf -Wcross --verbose --install --force ${EXTRA_AUTORECONF} $acpaths 205 bbnote Executing ACLOCAL=\"$ACLOCAL\" autoreconf -Wcross --verbose --install --force ${EXTRA_AUTORECONF}
235 ACLOCAL="$ACLOCAL" autoreconf -Wcross -Wno-obsolete --verbose --install --force ${EXTRA_AUTORECONF} $acpaths || die "autoreconf execution failed." 206 ACLOCAL="$ACLOCAL" autoreconf -Wcross -Wno-obsolete --verbose --install --force ${EXTRA_AUTORECONF} || die "autoreconf execution failed."
236 cd $olddir 207 cd $olddir
237 fi 208 fi
238 if [ -e ${CONFIGURE_SCRIPT} ]; then 209
239 oe_runconf 210 oe_runconf
240 else
241 bbnote "nothing to configure"
242 fi
243} 211}
244 212
245autotools_do_compile() { 213autotools_do_compile() {
@@ -254,8 +222,6 @@ autotools_do_install() {
254 fi 222 fi
255} 223}
256 224
257inherit siteconfig
258
259EXPORT_FUNCTIONS do_configure do_compile do_install 225EXPORT_FUNCTIONS do_configure do_compile do_install
260 226
261B = "${WORKDIR}/build" 227B = "${WORKDIR}/build"
diff --git a/meta/classes-recipe/barebox.bbclass b/meta/classes-recipe/barebox.bbclass
new file mode 100644
index 0000000000..ece8fb6485
--- /dev/null
+++ b/meta/classes-recipe/barebox.bbclass
@@ -0,0 +1,160 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7inherit kernel-arch deploy cml1 pkgconfig
8
9LICENSE ?= "GPL-2.0-only"
10
11PROVIDES += "virtual/bootloader"
12
13PACKAGE_ARCH = "${MACHINE_ARCH}"
14
15DEPENDS += "bison-native flex-native lz4-native"
16
17S = "${UNPACKDIR}/barebox-${PV}"
18B = "${WORKDIR}/build"
19
20require conf/image-uefi.conf
21
22# For some platforms and configuration, the barebox build process will require
23# additional host tools that can be activated/deactivated here.
24PACKAGECONFIG ??= "openssl libusb fit"
25
26PACKAGECONFIG[openssl] = ",,openssl-native"
27PACKAGECONFIG[libusb] = ",,libusb1-native"
28PACKAGECONFIG[fit] = ",,u-boot-tools-native dtc-native"
29
30export KBUILD_BUILD_USER ?= "oe-user"
31export KBUILD_BUILD_HOST ?= "oe-host"
32
33# unlike the kernel, barebox may build against host tools like openssl
34export HOST_EXTRACFLAGS
35
36def get_layer_rev(path):
37 try:
38 rev, _ = bb.process.run("git describe --match='' --always --dirty --broken", cwd=path)
39 except bb.process.ExecutionError:
40 rev = ""
41 return rev.strip()
42
43BAREBOX_BUILDSYSTEM_VERSION[doc] = "Build system version to add to the barebox image. By default this is the git description of the containing layer."
44BAREBOX_BUILDSYSTEM_VERSION ??= "${@get_layer_rev(os.path.dirname(d.getVar('FILE')))}"
45
46BAREBOX_FIRMWARE_DIR[doc] = "Overwrite barebox' firmware blobs search directory (CONFIG_EXTRA_FIRMWARE_DIR) with this path, default ${B}/firmware"
47BAREBOX_FIRMWARE_DIR ??= "${B}/firmware"
48
49EXTRA_OEMAKE = " \
50 CROSS_COMPILE=${TARGET_PREFIX} -C ${S} O=${B} \
51 BUILDSYSTEM_VERSION=${BAREBOX_BUILDSYSTEM_VERSION} \
52 CONFIG_EXTRA_FIRMWARE_DIR=${BAREBOX_FIRMWARE_DIR} \
53 PKG_CONFIG=pkg-config-native \
54 CROSS_PKG_CONFIG=pkg-config \
55"
56
57BAREBOX_CONFIG[doc] = "The barebox kconfig defconfig file. Not used if a file called defconfig is added to the SRC_URI."
58BAREBOX_CONFIG ?= ""
59
60# set sensible default configs for some of oe-core's QEMU MACHINEs
61BAREBOX_CONFIG:qemuarm = "multi_v7_defconfig"
62BAREBOX_CONFIG:qemuarm64 = "multi_v8_defconfig"
63BAREBOX_CONFIG:qemux86-64 = "efi_defconfig"
64
65# prevent from acting as non-buildable provider
66python () {
67 bareboxconfig = d.getVar('BAREBOX_CONFIG')
68 bareboxdefconfig = 'file://defconfig' in d.getVar('SRC_URI')
69
70 if not bareboxconfig and not bareboxdefconfig:
71 raise bb.parse.SkipRecipe("BAREBOX_CONFIG must be set in the %s machine configuration or file://defconfig must be given in SRC_URI." % d.getVar("MACHINE"))
72}
73
74barebox_do_configure() {
75 if [ -e ${UNPACKDIR}/defconfig ]; then
76 cp ${UNPACKDIR}/defconfig ${B}/.config
77 else
78 if [ -n "${BAREBOX_CONFIG}" ]; then
79 oe_runmake ${BAREBOX_CONFIG}
80 else
81 bbfatal "No defconfig given. Either add file 'file://defconfig' to SRC_URI or set BAREBOX_CONFIG"
82 fi
83 fi
84
85 ${S}/scripts/kconfig/merge_config.sh -m .config ${@" ".join(find_cfgs(d))}
86 cml1_do_configure
87}
88
89BAREBOX_ENV_DIR[doc] = "Overlay the barebox built-in environment with the environment provided by the BSP if specified."
90BAREBOX_ENV_DIR ??= "${UNPACKDIR}/env/"
91
92barebox_do_compile () {
93 export userccflags="${TARGET_LDFLAGS}${HOST_CC_ARCH}${TOOLCHAIN_OPTIONS}"
94 unset LDFLAGS
95 unset CFLAGS
96 unset CPPFLAGS
97 unset CXXFLAGS
98 unset MACHINE
99 # Allow to use ${UNPACKDIR} in kconfig options to include additionally fetched files
100 export UNPACKDIR=${UNPACKDIR}
101
102 if [ -d ${BAREBOX_ENV_DIR} ]; then
103 BAREBOX_DEFAULT_ENV="$(grep ^CONFIG_DEFAULT_ENVIRONMENT_PATH .config | cut -d '=' -f 2 | tr -d '"')"
104 oe_runmake CONFIG_DEFAULT_ENVIRONMENT_PATH="\"${BAREBOX_DEFAULT_ENV} ${BAREBOX_ENV_DIR}\""
105 else
106 oe_runmake
107 fi
108}
109
110BAREBOX_BINARY[doc] = "Specify the barebox binary to install. If not specified all barebox artifacts are installed."
111BAREBOX_BINARY ??= "${@'barebox.efi' if d.getVar('EFI_PROVIDER') == 'barebox' else ''}"
112BAREBOX_SUFFIX[doc] = "Specify the suffix for ${BAREBOX_IMAGE}."
113BAREBOX_SUFFIX ??= "img"
114BAREBOX_IMAGE[doc] = "A unique barebox image name. Unused if ${BAREBOX_BINARY} is not set."
115BAREBOX_IMAGE_DEFAULT ?= "${PN}-${MACHINE}-${PV}-${PR}.${BAREBOX_SUFFIX}"
116BAREBOX_IMAGE ?= "${@'${EFI_BOOT_IMAGE}' if d.getVar('EFI_PROVIDER') == 'barebox' else '${BAREBOX_IMAGE_DEFAULT}'}"
117
118BAREBOX_INSTALL_PATH ?= "${@'${EFI_FILES_PATH}' if d.getVar('EFI_PROVIDER') == 'barebox' else '/boot'}"
119
120barebox_do_install () {
121 if [ -n "${BAREBOX_BINARY}" ]; then
122
123 BAREBOX_BIN=${B}/${BAREBOX_BINARY}
124 if [ ! -f "${BAREBOX_BIN}" ]; then
125 BAREBOX_BIN=${B}/images/${BAREBOX_BINARY}
126 fi
127 if [ ! -f "${BAREBOX_BIN}" ]; then
128 bbfatal "Failed to locate ${BAREBOX_BINARY}"
129 fi
130
131 install -D -m 644 ${BAREBOX_BIN} ${D}${BAREBOX_INSTALL_PATH}/${BAREBOX_IMAGE}
132 ln -sf ${BAREBOX_IMAGE} ${D}${BAREBOX_INSTALL_PATH}/${BAREBOX_BINARY}
133 else
134 install -d ${D}${BAREBOX_INSTALL_PATH}/
135 for image in $(cat ${B}/barebox-flash-images); do
136 install -m 644 ${B}/${image} ${D}${BAREBOX_INSTALL_PATH}/
137 done
138 fi
139}
140FILES:${PN} = "${BAREBOX_INSTALL_PATH}"
141
142barebox_do_deploy () {
143 if [ -n "${BAREBOX_BINARY}" ]; then
144
145 BAREBOX_BIN=${B}/${BAREBOX_BINARY}
146 if [ ! -f "${BAREBOX_BIN}" ]; then
147 BAREBOX_BIN=${B}/images/${BAREBOX_BINARY}
148 fi
149
150 install -D -m 644 ${BAREBOX_BIN} ${DEPLOYDIR}/${BAREBOX_IMAGE}
151 ln -sf ${BAREBOX_IMAGE} ${DEPLOYDIR}/${BAREBOX_BINARY}
152 else
153 for image in $(cat ${B}/barebox-flash-images); do
154 cp ${B}/${image} ${DEPLOYDIR}
155 done
156 fi
157}
158addtask deploy after do_compile
159
160EXPORT_FUNCTIONS do_configure do_compile do_install do_deploy
diff --git a/meta/classes-recipe/baremetal-image.bbclass b/meta/classes-recipe/baremetal-image.bbclass
index b9a584351a..4afc171314 100644
--- a/meta/classes-recipe/baremetal-image.bbclass
+++ b/meta/classes-recipe/baremetal-image.bbclass
@@ -16,8 +16,8 @@
16# See meta-skeleton for a working example. 16# See meta-skeleton for a working example.
17 17
18 18
19# Toolchain should be baremetal or newlib based. 19# Toolchain should be baremetal or newlib/picolibc based.
20# TCLIBC="baremetal" or TCLIBC="newlib" 20# TCLIBC="baremetal" or TCLIBC="newlib" or TCLIBC="picolibc"
21COMPATIBLE_HOST:libc-musl:class-target = "null" 21COMPATIBLE_HOST:libc-musl:class-target = "null"
22COMPATIBLE_HOST:libc-glibc:class-target = "null" 22COMPATIBLE_HOST:libc-glibc:class-target = "null"
23 23
@@ -30,6 +30,9 @@ BAREMETAL_BINNAME ?= "hello_baremetal_${MACHINE}"
30IMAGE_LINK_NAME ?= "baremetal-helloworld-image-${MACHINE}" 30IMAGE_LINK_NAME ?= "baremetal-helloworld-image-${MACHINE}"
31IMAGE_NAME_SUFFIX ?= "" 31IMAGE_NAME_SUFFIX ?= ""
32 32
33IMAGE_OUTPUT_MANIFEST_DIR = "${WORKDIR}/deploy-image-output-manifest"
34IMAGE_OUTPUT_MANIFEST = "${IMAGE_OUTPUT_MANIFEST_DIR}/manifest.json"
35
33do_rootfs[dirs] = "${IMGDEPLOYDIR} ${DEPLOY_DIR_IMAGE}" 36do_rootfs[dirs] = "${IMGDEPLOYDIR} ${DEPLOY_DIR_IMAGE}"
34 37
35do_image(){ 38do_image(){
@@ -37,8 +40,28 @@ do_image(){
37 install ${D}/${base_libdir}/firmware/${BAREMETAL_BINNAME}.elf ${IMGDEPLOYDIR}/${IMAGE_LINK_NAME}.elf 40 install ${D}/${base_libdir}/firmware/${BAREMETAL_BINNAME}.elf ${IMGDEPLOYDIR}/${IMAGE_LINK_NAME}.elf
38} 41}
39 42
40do_image_complete(){ 43python do_image_complete(){
41 : 44 from pathlib import Path
45 import json
46
47 data = {
48 "taskname": "do_image",
49 "imagetype": "baremetal-image",
50 "images": []
51 }
52
53 img_deploy_dir = Path(d.getVar("IMGDEPLOYDIR"))
54
55 for child in img_deploy_dir.iterdir():
56 if not child.is_file() or child.is_symlink():
57 continue
58
59 data["images"].append({
60 "filename": child.name,
61 })
62
63 with open(d.getVar("IMAGE_OUTPUT_MANIFEST"), "w") as f:
64 json.dump([data], f)
42} 65}
43 66
44python do_rootfs(){ 67python do_rootfs(){
@@ -62,6 +85,7 @@ python do_rootfs(){
62 bb.utils.mkdirhier(sysconfdir) 85 bb.utils.mkdirhier(sysconfdir)
63 86
64 execute_pre_post_process(d, d.getVar('ROOTFS_POSTPROCESS_COMMAND')) 87 execute_pre_post_process(d, d.getVar('ROOTFS_POSTPROCESS_COMMAND'))
88 execute_pre_post_process(d, d.getVar("ROOTFS_POSTUNINSTALL_COMMAND"))
65} 89}
66 90
67 91
@@ -72,6 +96,8 @@ SSTATE_SKIP_CREATION:task-image-complete = '1'
72do_image_complete[sstate-inputdirs] = "${IMGDEPLOYDIR}" 96do_image_complete[sstate-inputdirs] = "${IMGDEPLOYDIR}"
73do_image_complete[sstate-outputdirs] = "${DEPLOY_DIR_IMAGE}" 97do_image_complete[sstate-outputdirs] = "${DEPLOY_DIR_IMAGE}"
74do_image_complete[stamp-extra-info] = "${MACHINE_ARCH}" 98do_image_complete[stamp-extra-info] = "${MACHINE_ARCH}"
99do_image_complete[sstate-plaindirs] += "${IMAGE_OUTPUT_MANIFEST_DIR}"
100do_image_complete[dirs] += "${IMAGE_OUTPUT_MANIFEST_DIR}"
75addtask do_image_complete after do_image before do_build 101addtask do_image_complete after do_image before do_build
76 102
77python do_image_complete_setscene () { 103python do_image_complete_setscene () {
@@ -103,7 +129,7 @@ QB_OPT_APPEND:append:qemuriscv32 = " -bios none"
103# since medlow can only access addresses below 0x80000000 and RAM 129# since medlow can only access addresses below 0x80000000 and RAM
104# starts at 0x80000000 on RISC-V 64 130# starts at 0x80000000 on RISC-V 64
105# Keep RISC-V 32 using -mcmodel=medlow (symbols lie between -2GB:2GB) 131# Keep RISC-V 32 using -mcmodel=medlow (symbols lie between -2GB:2GB)
106CFLAGS:append:qemuriscv64 = " -mcmodel=medany" 132TARGET_CFLAGS:append:qemuriscv64 = " -mcmodel=medany"
107 133
108 134
109## Emulate image.bbclass 135## Emulate image.bbclass
@@ -140,5 +166,5 @@ python(){
140 else: 166 else:
141 deps += " %s:%s" % (dep, task) 167 deps += " %s:%s" % (dep, task)
142 return deps 168 return deps
143 d.appendVarFlag('do_image', 'depends', extraimage_getdepends('do_populate_sysroot')) 169 d.appendVarFlag('do_image', 'depends', extraimage_getdepends('do_populate_sysroot'))
144} 170}
diff --git a/meta/classes-recipe/cargo-update-recipe-crates.bbclass b/meta/classes-recipe/cargo-update-recipe-crates.bbclass
index 8980137d02..3251d5ef2e 100644
--- a/meta/classes-recipe/cargo-update-recipe-crates.bbclass
+++ b/meta/classes-recipe/cargo-update-recipe-crates.bbclass
@@ -18,6 +18,8 @@ do_update_crates[depends] = "python3-native:do_populate_sysroot"
18do_update_crates[nostamp] = "1" 18do_update_crates[nostamp] = "1"
19do_update_crates[doc] = "Update the recipe by reading Cargo.lock and write in ${THISDIR}/${BPN}-crates.inc" 19do_update_crates[doc] = "Update the recipe by reading Cargo.lock and write in ${THISDIR}/${BPN}-crates.inc"
20 20
21RECIPE_UPGRADE_EXTRA_TASKS += "do_update_crates"
22
21# The directory where to search for Cargo.lock files 23# The directory where to search for Cargo.lock files
22CARGO_LOCK_SRC_DIR ??= "${S}" 24CARGO_LOCK_SRC_DIR ??= "${S}"
23 25
diff --git a/meta/classes-recipe/cargo.bbclass b/meta/classes-recipe/cargo.bbclass
index 0829a58dd9..2dd28e95d3 100644
--- a/meta/classes-recipe/cargo.bbclass
+++ b/meta/classes-recipe/cargo.bbclass
@@ -42,13 +42,13 @@ CARGO_BUILD_FLAGS = "-v --frozen --target ${RUST_HOST_SYS} ${BUILD_MODE} --manif
42# This is based on the content of CARGO_BUILD_FLAGS and generally will need to 42# This is based on the content of CARGO_BUILD_FLAGS and generally will need to
43# change if CARGO_BUILD_FLAGS changes. 43# change if CARGO_BUILD_FLAGS changes.
44BUILD_DIR = "${@['release', 'debug'][d.getVar('DEBUG_BUILD') == '1']}" 44BUILD_DIR = "${@['release', 'debug'][d.getVar('DEBUG_BUILD') == '1']}"
45CARGO_TARGET_SUBDIR="${RUST_HOST_SYS}/${BUILD_DIR}" 45CARGO_TARGET_SUBDIR = "${RUST_HOST_SYS}/${BUILD_DIR}"
46oe_cargo_build () { 46oe_cargo_build () {
47 export RUSTFLAGS="${RUSTFLAGS}" 47 export RUSTFLAGS="${RUSTFLAGS}"
48 bbnote "Using rust targets from ${RUST_TARGET_PATH}" 48 bbnote "Using rust targets from ${RUST_TARGET_PATH}"
49 bbnote "cargo = $(which ${CARGO})" 49 bbnote "cargo = $(which ${CARGO})"
50 bbnote "${CARGO} build ${CARGO_BUILD_FLAGS} $@" 50 bbnote "${CARGO} build ${CARGO_BUILD_FLAGS} ${PACKAGECONFIG_CONFARGS} $@"
51 "${CARGO}" build ${CARGO_BUILD_FLAGS} "$@" 51 "${CARGO}" build ${CARGO_BUILD_FLAGS} ${PACKAGECONFIG_CONFARGS} "$@"
52} 52}
53 53
54do_compile[progress] = "outof:\s+(\d+)/(\d+)" 54do_compile[progress] = "outof:\s+(\d+)/(\d+)"
@@ -61,9 +61,11 @@ cargo_do_install () {
61 for tgt in "${B}/target/${CARGO_TARGET_SUBDIR}/"*; do 61 for tgt in "${B}/target/${CARGO_TARGET_SUBDIR}/"*; do
62 case $tgt in 62 case $tgt in
63 *.so|*.rlib) 63 *.so|*.rlib)
64 install -d "${D}${rustlibdir}" 64 if [ -n "${CARGO_INSTALL_LIBRARIES}" ]; then
65 install -m755 "$tgt" "${D}${rustlibdir}" 65 install -d "${D}${rustlibdir}"
66 have_installed=true 66 install -m755 "$tgt" "${D}${rustlibdir}"
67 have_installed=true
68 fi
67 ;; 69 ;;
68 *examples) 70 *examples)
69 if [ -d "$tgt" ]; then 71 if [ -d "$tgt" ]; then
diff --git a/meta/classes-recipe/cargo_common.bbclass b/meta/classes-recipe/cargo_common.bbclass
index 0fb443edbd..c9eb2d09a5 100644
--- a/meta/classes-recipe/cargo_common.bbclass
+++ b/meta/classes-recipe/cargo_common.bbclass
@@ -18,7 +18,7 @@
18inherit rust-common 18inherit rust-common
19 19
20# Where we download our registry and dependencies to 20# Where we download our registry and dependencies to
21export CARGO_HOME = "${WORKDIR}/cargo_home" 21export CARGO_HOME = "${UNPACKDIR}/cargo_home"
22 22
23# The pkg-config-rs library used by cargo build scripts disables itself when 23# The pkg-config-rs library used by cargo build scripts disables itself when
24# cross compiling unless this is defined. We set up pkg-config appropriately 24# cross compiling unless this is defined. We set up pkg-config appropriately
@@ -41,20 +41,20 @@ CARGO_SRC_DIR ??= ""
41CARGO_MANIFEST_PATH ??= "${S}/${CARGO_SRC_DIR}/Cargo.toml" 41CARGO_MANIFEST_PATH ??= "${S}/${CARGO_SRC_DIR}/Cargo.toml"
42 42
43# Path to Cargo.lock 43# Path to Cargo.lock
44CARGO_LOCK_PATH ??= "${@ os.path.join(os.path.dirname(d.getVar('CARGO_MANIFEST_PATH', True)), 'Cargo.lock')}" 44CARGO_LOCK_PATH ??= "${@ os.path.join(os.path.dirname(d.getVar('CARGO_MANIFEST_PATH')), 'Cargo.lock')}"
45 45
46CARGO_RUST_TARGET_CCLD ??= "${RUST_TARGET_CCLD}" 46CARGO_RUST_TARGET_CCLD ??= "${RUST_TARGET_CCLD}"
47cargo_common_do_configure () { 47cargo_common_do_configure () {
48 mkdir -p ${CARGO_HOME}/bitbake 48 mkdir -p ${CARGO_HOME}/bitbake
49 49
50 cat <<- EOF > ${CARGO_HOME}/config 50 cat <<- EOF > ${CARGO_HOME}/config.toml
51 # EXTRA_OECARGO_PATHS 51 # EXTRA_OECARGO_PATHS
52 paths = [ 52 paths = [
53 $(for p in ${EXTRA_OECARGO_PATHS}; do echo \"$p\",; done) 53 $(for p in ${EXTRA_OECARGO_PATHS}; do echo \"$p\",; done)
54 ] 54 ]
55 EOF 55 EOF
56 56
57 cat <<- EOF >> ${CARGO_HOME}/config 57 cat <<- EOF >> ${CARGO_HOME}/config.toml
58 58
59 # Local mirror vendored by bitbake 59 # Local mirror vendored by bitbake
60 [source.bitbake] 60 [source.bitbake]
@@ -62,7 +62,7 @@ cargo_common_do_configure () {
62 EOF 62 EOF
63 63
64 if [ ${CARGO_DISABLE_BITBAKE_VENDORING} = "0" ]; then 64 if [ ${CARGO_DISABLE_BITBAKE_VENDORING} = "0" ]; then
65 cat <<- EOF >> ${CARGO_HOME}/config 65 cat <<- EOF >> ${CARGO_HOME}/config.toml
66 66
67 [source.crates-io] 67 [source.crates-io]
68 replace-with = "bitbake" 68 replace-with = "bitbake"
@@ -70,7 +70,7 @@ cargo_common_do_configure () {
70 EOF 70 EOF
71 fi 71 fi
72 72
73 cat <<- EOF >> ${CARGO_HOME}/config 73 cat <<- EOF >> ${CARGO_HOME}/config.toml
74 74
75 [http] 75 [http]
76 # Multiplexing can't be enabled because http2 can't be enabled 76 # Multiplexing can't be enabled because http2 can't be enabled
@@ -82,7 +82,7 @@ cargo_common_do_configure () {
82 82
83 EOF 83 EOF
84 84
85 cat <<- EOF >> ${CARGO_HOME}/config 85 cat <<- EOF >> ${CARGO_HOME}/config.toml
86 86
87 # HOST_SYS 87 # HOST_SYS
88 [target.${RUST_HOST_SYS}] 88 [target.${RUST_HOST_SYS}]
@@ -90,7 +90,7 @@ cargo_common_do_configure () {
90 EOF 90 EOF
91 91
92 if [ "${RUST_HOST_SYS}" != "${RUST_BUILD_SYS}" ]; then 92 if [ "${RUST_HOST_SYS}" != "${RUST_BUILD_SYS}" ]; then
93 cat <<- EOF >> ${CARGO_HOME}/config 93 cat <<- EOF >> ${CARGO_HOME}/config.toml
94 94
95 # BUILD_SYS 95 # BUILD_SYS
96 [target.${RUST_BUILD_SYS}] 96 [target.${RUST_BUILD_SYS}]
@@ -99,7 +99,7 @@ cargo_common_do_configure () {
99 fi 99 fi
100 100
101 if [ "${RUST_TARGET_SYS}" != "${RUST_BUILD_SYS}" -a "${RUST_TARGET_SYS}" != "${RUST_HOST_SYS}" ]; then 101 if [ "${RUST_TARGET_SYS}" != "${RUST_BUILD_SYS}" -a "${RUST_TARGET_SYS}" != "${RUST_HOST_SYS}" ]; then
102 cat <<- EOF >> ${CARGO_HOME}/config 102 cat <<- EOF >> ${CARGO_HOME}/config.toml
103 103
104 # TARGET_SYS 104 # TARGET_SYS
105 [target.${RUST_TARGET_SYS}] 105 [target.${RUST_TARGET_SYS}]
@@ -110,7 +110,7 @@ cargo_common_do_configure () {
110 # Put build output in build directory preferred by bitbake instead of 110 # Put build output in build directory preferred by bitbake instead of
111 # inside source directory unless they are the same 111 # inside source directory unless they are the same
112 if [ "${B}" != "${S}" ]; then 112 if [ "${B}" != "${S}" ]; then
113 cat <<- EOF >> ${CARGO_HOME}/config 113 cat <<- EOF >> ${CARGO_HOME}/config.toml
114 114
115 [build] 115 [build]
116 # Use out of tree build destination to avoid polluting the source tree 116 # Use out of tree build destination to avoid polluting the source tree
@@ -118,7 +118,7 @@ cargo_common_do_configure () {
118 EOF 118 EOF
119 fi 119 fi
120 120
121 cat <<- EOF >> ${CARGO_HOME}/config 121 cat <<- EOF >> ${CARGO_HOME}/config.toml
122 122
123 [term] 123 [term]
124 progress.when = 'always' 124 progress.when = 'always'
@@ -129,7 +129,7 @@ cargo_common_do_configure () {
129python cargo_common_do_patch_paths() { 129python cargo_common_do_patch_paths() {
130 import shutil 130 import shutil
131 131
132 cargo_config = os.path.join(d.getVar("CARGO_HOME"), "config") 132 cargo_config = os.path.join(d.getVar("CARGO_HOME"), "config.toml")
133 if not os.path.exists(cargo_config): 133 if not os.path.exists(cargo_config):
134 return 134 return
135 135
@@ -138,11 +138,11 @@ python cargo_common_do_patch_paths() {
138 return 138 return
139 139
140 patches = dict() 140 patches = dict()
141 workdir = d.getVar('WORKDIR') 141 workdir = d.getVar('UNPACKDIR')
142 fetcher = bb.fetch2.Fetch(src_uri, d) 142 fetcher = bb.fetch2.Fetch(src_uri, d)
143 for url in fetcher.urls: 143 for url in fetcher.urls:
144 ud = fetcher.ud[url] 144 ud = fetcher.ud[url]
145 if ud.type == 'git': 145 if ud.type == 'git' or ud.type == 'gitsm':
146 name = ud.parm.get('name') 146 name = ud.parm.get('name')
147 destsuffix = ud.parm.get('destsuffix') 147 destsuffix = ud.parm.get('destsuffix')
148 if name is not None and destsuffix is not None: 148 if name is not None and destsuffix is not None:
@@ -171,7 +171,7 @@ python cargo_common_do_patch_paths() {
171 # here is better than letting cargo tell (in case the file is missing) 171 # here is better than letting cargo tell (in case the file is missing)
172 # "Cargo.lock should be modified but --frozen was given" 172 # "Cargo.lock should be modified but --frozen was given"
173 173
174 lockfile = d.getVar("CARGO_LOCK_PATH", True) 174 lockfile = d.getVar("CARGO_LOCK_PATH")
175 if not os.path.exists(lockfile): 175 if not os.path.exists(lockfile):
176 bb.fatal(f"{lockfile} file doesn't exist") 176 bb.fatal(f"{lockfile} file doesn't exist")
177 177
diff --git a/meta/classes-recipe/cmake-qemu.bbclass b/meta/classes-recipe/cmake-qemu.bbclass
index 46a89e2827..383fc74bf2 100644
--- a/meta/classes-recipe/cmake-qemu.bbclass
+++ b/meta/classes-recipe/cmake-qemu.bbclass
@@ -19,7 +19,7 @@ inherit qemu cmake
19DEPENDS:append:class-target = "${@' qemu-native' if bb.utils.contains('MACHINE_FEATURES', 'qemu-usermode', True, False, d) else ''}" 19DEPENDS:append:class-target = "${@' qemu-native' if bb.utils.contains('MACHINE_FEATURES', 'qemu-usermode', True, False, d) else ''}"
20 20
21cmake_do_generate_toolchain_file:append:class-target() { 21cmake_do_generate_toolchain_file:append:class-target() {
22 if [ "${@bb.utils.contains('MACHINE_FEATURES', 'qemu-usermode', 'True', 'False', d)}" ]; then 22 if ${@bb.utils.contains('MACHINE_FEATURES', 'qemu-usermode', 'true', 'false', d)}; then
23 # Write out a qemu wrapper that will be used as exe_wrapper so that cmake 23 # Write out a qemu wrapper that will be used as exe_wrapper so that cmake
24 # can run target helper binaries through that. This also allows to execute ctest. 24 # can run target helper binaries through that. This also allows to execute ctest.
25 qemu_binary="${@qemu_wrapper_cmdline(d, '${STAGING_DIR_HOST}', ['${STAGING_DIR_HOST}/${libdir}','${STAGING_DIR_HOST}/${base_libdir}'])}" 25 qemu_binary="${@qemu_wrapper_cmdline(d, '${STAGING_DIR_HOST}', ['${STAGING_DIR_HOST}/${libdir}','${STAGING_DIR_HOST}/${base_libdir}'])}"
diff --git a/meta/classes-recipe/cmake.bbclass b/meta/classes-recipe/cmake.bbclass
index e1c3d7ddb5..449fe2bb44 100644
--- a/meta/classes-recipe/cmake.bbclass
+++ b/meta/classes-recipe/cmake.bbclass
@@ -50,11 +50,34 @@ OECMAKE_C_COMPILER_LAUNCHER ?= "${@oecmake_map_compiler('CC', d)[1]}"
50OECMAKE_CXX_COMPILER ?= "${@oecmake_map_compiler('CXX', d)[0]}" 50OECMAKE_CXX_COMPILER ?= "${@oecmake_map_compiler('CXX', d)[0]}"
51OECMAKE_CXX_COMPILER_LAUNCHER ?= "${@oecmake_map_compiler('CXX', d)[1]}" 51OECMAKE_CXX_COMPILER_LAUNCHER ?= "${@oecmake_map_compiler('CXX', d)[1]}"
52 52
53# Native C/C++ Compiler (without cpu arch/tune arguments)
54OECMAKE_NATIVE_C_COMPILER ?= "${@oecmake_map_compiler('BUILD_CC', d)[0]}"
55OECMAKE_NATIVE_C_COMPILER_LAUNCHER ?= "${@oecmake_map_compiler('BUILD_CC', d)[1]}"
56OECMAKE_NATIVE_CXX_COMPILER ?= "${@oecmake_map_compiler('BUILD_CXX', d)[0]}"
57OECMAKE_NATIVE_CXX_COMPILER_LAUNCHER ?= "${@oecmake_map_compiler('BUILD_CXX', d)[1]}"
58OECMAKE_NATIVE_AR ?= "${BUILD_AR}"
59OECMAKE_NATIVE_RANLIB ?= "${BUILD_RANLIB}"
60OECMAKE_NATIVE_NM ?= "${BUILD_NM}"
61
62# Native compiler flags
63OECMAKE_NATIVE_C_FLAGS ?= "${BUILD_CC_ARCH} ${BUILD_CFLAGS}"
64OECMAKE_NATIVE_CXX_FLAGS ?= "${BUILD_CC_ARCH} ${BUILD_CXXFLAGS}"
65OECMAKE_NATIVE_C_FLAGS_RELEASE ?= "-DNDEBUG"
66OECMAKE_NATIVE_CXX_FLAGS_RELEASE ?= "-DNDEBUG"
67OECMAKE_NATIVE_C_LINK_FLAGS ?= "${BUILD_CC_ARCH} ${BUILD_CPPFLAGS} ${BUILD_LDFLAGS}"
68OECMAKE_NATIVE_CXX_LINK_FLAGS ?= "${BUILD_CC_ARCH} ${BUILD_CXXFLAGS} ${BUILD_LDFLAGS}"
69BUILD_CXXFLAGS += "${BUILD_CC_ARCH}"
70BUILD_CFLAGS += "${BUILD_CC_ARCH}"
71
53# clear compiler vars for allarch to avoid sig hash difference 72# clear compiler vars for allarch to avoid sig hash difference
54OECMAKE_C_COMPILER:allarch = "" 73OECMAKE_C_COMPILER:allarch = ""
55OECMAKE_C_COMPILER_LAUNCHER:allarch = "" 74OECMAKE_C_COMPILER_LAUNCHER:allarch = ""
56OECMAKE_CXX_COMPILER:allarch = "" 75OECMAKE_CXX_COMPILER:allarch = ""
57OECMAKE_CXX_COMPILER_LAUNCHER:allarch = "" 76OECMAKE_CXX_COMPILER_LAUNCHER:allarch = ""
77OECMAKE_NATIVE_C_COMPILER:allarch = ""
78OECMAKE_NATIVE_C_COMPILER_LAUNCHER:allarch = ""
79OECMAKE_NATIVE_CXX_COMPILER:allarch = ""
80OECMAKE_NATIVE_CXX_COMPILER_LAUNCHER:allarch = ""
58 81
59OECMAKE_RPATH ?= "" 82OECMAKE_RPATH ?= ""
60OECMAKE_PERLNATIVE_DIR ??= "" 83OECMAKE_PERLNATIVE_DIR ??= ""
@@ -67,6 +90,8 @@ EXTRA_OECMAKE:append = " ${PACKAGECONFIG_CONFARGS}"
67export CMAKE_BUILD_PARALLEL_LEVEL 90export CMAKE_BUILD_PARALLEL_LEVEL
68CMAKE_BUILD_PARALLEL_LEVEL:task-compile = "${@oe.utils.parallel_make(d, False)}" 91CMAKE_BUILD_PARALLEL_LEVEL:task-compile = "${@oe.utils.parallel_make(d, False)}"
69CMAKE_BUILD_PARALLEL_LEVEL:task-install = "${@oe.utils.parallel_make(d, True)}" 92CMAKE_BUILD_PARALLEL_LEVEL:task-install = "${@oe.utils.parallel_make(d, True)}"
93CMAKE_BUILD_PARALLEL_LEVEL:task-compile-ptest-base = "${@oe.utils.parallel_make(d, False)}"
94CMAKE_BUILD_PARALLEL_LEVEL:task-install-ptest-base = "${@oe.utils.parallel_make(d, True)}"
70 95
71OECMAKE_TARGET_COMPILE ?= "all" 96OECMAKE_TARGET_COMPILE ?= "all"
72OECMAKE_TARGET_INSTALL ?= "install" 97OECMAKE_TARGET_INSTALL ?= "install"
@@ -154,6 +179,41 @@ list(APPEND CMAKE_C_IMPLICIT_INCLUDE_DIRECTORIES ${includedir})
154list(APPEND CMAKE_CXX_IMPLICIT_INCLUDE_DIRECTORIES ${includedir}) 179list(APPEND CMAKE_CXX_IMPLICIT_INCLUDE_DIRECTORIES ${includedir})
155 180
156EOF 181EOF
182 cat > ${WORKDIR}/toolchain-native.cmake <<EOF
183set( CMAKE_SYSTEM_NAME ${@map_host_os_to_system_name(d.getVar('BUILD_OS'))} )
184set( CMAKE_SYSTEM_PROCESSOR ${@map_host_arch_to_uname_arch(d.getVar('BUILD_ARCH'))} )
185set( CMAKE_C_COMPILER ${OECMAKE_NATIVE_C_COMPILER} )
186set( CMAKE_CXX_COMPILER ${OECMAKE_NATIVE_CXX_COMPILER} )
187set( CMAKE_ASM_COMPILER ${OECMAKE_NATIVE_C_COMPILER} )
188set( CMAKE_AR ${OECMAKE_NATIVE_AR} CACHE FILEPATH "Archiver" )
189set( CMAKE_RANLIB ${OECMAKE_NATIVE_RANLIB} CACHE FILEPATH "Archive Indexer" )
190set( CMAKE_NM ${OECMAKE_NATIVE_NM} CACHE FILEPATH "Symbol Lister" )
191set( CMAKE_C_FLAGS "${OECMAKE_NATIVE_C_FLAGS}" CACHE STRING "CFLAGS" )
192set( CMAKE_CXX_FLAGS "${OECMAKE_NATIVE_CXX_FLAGS}" CACHE STRING "CXXFLAGS" )
193set( CMAKE_ASM_FLAGS "${OECMAKE_NATIVE_C_FLAGS}" CACHE STRING "ASM FLAGS" )
194set( CMAKE_C_FLAGS_RELEASE "${OECMAKE_NATIVE_C_FLAGS_RELEASE}" CACHE STRING "Additional CFLAGS for release" )
195set( CMAKE_CXX_FLAGS_RELEASE "${OECMAKE_NATIVE_CXX_FLAGS_RELEASE}" CACHE STRING "Additional CXXFLAGS for release" )
196set( CMAKE_ASM_FLAGS_RELEASE "${OECMAKE_NATIVE_C_FLAGS_RELEASE}" CACHE STRING "Additional ASM FLAGS for release" )
197set( CMAKE_C_LINK_FLAGS "${OECMAKE_NATIVE_C_LINK_FLAGS}" CACHE STRING "LDFLAGS" )
198set( CMAKE_CXX_LINK_FLAGS "${OECMAKE_NATIVE_CXX_LINK_FLAGS}" CACHE STRING "LDFLAGS" )
199
200set( CMAKE_FIND_ROOT_PATH ${STAGING_DIR_NATIVE} )
201set( CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY )
202set( CMAKE_FIND_ROOT_PATH_MODE_PROGRAM BOTH )
203set( CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY )
204set( CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY )
205
206# Use native cmake modules
207list(APPEND CMAKE_MODULE_PATH "${STAGING_DATADIR_NATIVE}/cmake/Modules/")
208
209# add for non /usr/lib libdir, e.g. /usr/lib64
210set( CMAKE_LIBRARY_PATH ${STAGING_BASE_LIBDIR_NATIVE} ${STAGING_LIBDIR_NATIVE})
211
212# add include dir to implicit includes in case it differs from /usr/include
213list(APPEND CMAKE_C_IMPLICIT_INCLUDE_DIRECTORIES ${STAGING_INCDIR_NATIVE})
214list(APPEND CMAKE_CXX_IMPLICIT_INCLUDE_DIRECTORIES ${STAGING_INCDIR_NATIVE})
215
216EOF
157} 217}
158 218
159addtask generate_toolchain_file after do_patch before do_configure 219addtask generate_toolchain_file after do_patch before do_configure
@@ -168,7 +228,7 @@ OECMAKE_ARGS = "\
168 -DCMAKE_INSTALL_SBINDIR:PATH=${@os.path.relpath(d.getVar('sbindir'), d.getVar('prefix') + '/')} \ 228 -DCMAKE_INSTALL_SBINDIR:PATH=${@os.path.relpath(d.getVar('sbindir'), d.getVar('prefix') + '/')} \
169 -DCMAKE_INSTALL_LIBEXECDIR:PATH=${@os.path.relpath(d.getVar('libexecdir'), d.getVar('prefix') + '/')} \ 229 -DCMAKE_INSTALL_LIBEXECDIR:PATH=${@os.path.relpath(d.getVar('libexecdir'), d.getVar('prefix') + '/')} \
170 -DCMAKE_INSTALL_SYSCONFDIR:PATH=${sysconfdir} \ 230 -DCMAKE_INSTALL_SYSCONFDIR:PATH=${sysconfdir} \
171 -DCMAKE_INSTALL_SHAREDSTATEDIR:PATH=${@os.path.relpath(d.getVar('sharedstatedir'), d. getVar('prefix') + '/')} \ 231 -DCMAKE_INSTALL_SHAREDSTATEDIR:PATH=${@os.path.relpath(d.getVar('sharedstatedir'), d.getVar('prefix') + '/')} \
172 -DCMAKE_INSTALL_LOCALSTATEDIR:PATH=${localstatedir} \ 232 -DCMAKE_INSTALL_LOCALSTATEDIR:PATH=${localstatedir} \
173 -DCMAKE_INSTALL_LIBDIR:PATH=${@os.path.relpath(d.getVar('libdir'), d.getVar('prefix') + '/')} \ 233 -DCMAKE_INSTALL_LIBDIR:PATH=${@os.path.relpath(d.getVar('libdir'), d.getVar('prefix') + '/')} \
174 -DCMAKE_INSTALL_INCLUDEDIR:PATH=${@os.path.relpath(d.getVar('includedir'), d.getVar('prefix') + '/')} \ 234 -DCMAKE_INSTALL_INCLUDEDIR:PATH=${@os.path.relpath(d.getVar('includedir'), d.getVar('prefix') + '/')} \
diff --git a/meta/classes-recipe/cml1.bbclass b/meta/classes-recipe/cml1.bbclass
index 03e5fe6f47..3c2b4da4af 100644
--- a/meta/classes-recipe/cml1.bbclass
+++ b/meta/classes-recipe/cml1.bbclass
@@ -31,7 +31,7 @@ CROSS_CURSES_LIB = "-lncurses -ltinfo"
31CROSS_CURSES_INC = '-DCURSES_LOC="<curses.h>"' 31CROSS_CURSES_INC = '-DCURSES_LOC="<curses.h>"'
32TERMINFO = "${STAGING_DATADIR_NATIVE}/terminfo" 32TERMINFO = "${STAGING_DATADIR_NATIVE}/terminfo"
33 33
34KCONFIG_CONFIG_COMMAND ??= "menuconfig" 34KCONFIG_CONFIG_COMMAND ??= "menuconfig ${EXTRA_OEMAKE}"
35KCONFIG_CONFIG_ENABLE_MENUCONFIG ??= "true" 35KCONFIG_CONFIG_ENABLE_MENUCONFIG ??= "true"
36KCONFIG_CONFIG_ROOTDIR ??= "${B}" 36KCONFIG_CONFIG_ROOTDIR ??= "${B}"
37python do_menuconfig() { 37python do_menuconfig() {
@@ -58,7 +58,7 @@ python do_menuconfig() {
58 # ensure that environment variables are overwritten with this tasks 'd' values 58 # ensure that environment variables are overwritten with this tasks 'd' values
59 d.appendVar("OE_TERMINAL_EXPORTS", " PKG_CONFIG_DIR PKG_CONFIG_PATH PKG_CONFIG_LIBDIR PKG_CONFIG_SYSROOT_DIR") 59 d.appendVar("OE_TERMINAL_EXPORTS", " PKG_CONFIG_DIR PKG_CONFIG_PATH PKG_CONFIG_LIBDIR PKG_CONFIG_SYSROOT_DIR")
60 60
61 oe_terminal("sh -c 'make %s; if [ \\$? -ne 0 ]; then echo \"Command failed.\"; printf \"Press any key to continue... \"; read r; fi'" % d.getVar('KCONFIG_CONFIG_COMMAND'), 61 oe_terminal("sh -c 'make %s; if [ $? -ne 0 ]; then echo \"Command failed.\"; printf \"Press any key to continue... \"; read r; fi'" % d.getVar('KCONFIG_CONFIG_COMMAND'),
62 d.getVar('PN') + ' Configuration', d) 62 d.getVar('PN') + ' Configuration', d)
63 63
64 try: 64 try:
@@ -93,10 +93,9 @@ python do_diffconfig() {
93 93
94 if isdiff: 94 if isdiff:
95 statement = 'diff --unchanged-line-format= --old-line-format= --new-line-format="%L" ' + configorig + ' ' + config + '>' + fragment 95 statement = 'diff --unchanged-line-format= --old-line-format= --new-line-format="%L" ' + configorig + ' ' + config + '>' + fragment
96 subprocess.call(statement, shell=True)
97 # No need to check the exit code as we know it's going to be 96 # No need to check the exit code as we know it's going to be
98 # non-zero, but that's what we expect. 97 # non-zero, but that's what we expect.
99 shutil.copy(configorig, config) 98 subprocess.call(statement, shell=True)
100 99
101 bb.plain("Config fragment has been dumped into:\n %s" % fragment) 100 bb.plain("Config fragment has been dumped into:\n %s" % fragment)
102 else: 101 else:
@@ -113,3 +112,10 @@ do_showconfig() {
113} 112}
114do_showconfig[nostamp] = "1" 113do_showconfig[nostamp] = "1"
115addtask showconfig after do_configure 114addtask showconfig after do_configure
115
116do_savedefconfig() {
117 bbplain "Saving defconfig to:\n${B}/defconfig"
118 oe_runmake -C ${B} savedefconfig
119}
120do_savedefconfig[nostamp] = "1"
121addtask savedefconfig after do_configure
diff --git a/meta/classes-recipe/core-image.bbclass b/meta/classes-recipe/core-image.bbclass
index 40fc15cb04..4072e420c5 100644
--- a/meta/classes-recipe/core-image.bbclass
+++ b/meta/classes-recipe/core-image.bbclass
@@ -26,11 +26,6 @@
26# - ssh-server-openssh - SSH server (openssh) 26# - ssh-server-openssh - SSH server (openssh)
27# - hwcodecs - Install hardware acceleration codecs 27# - hwcodecs - Install hardware acceleration codecs
28# - package-management - installs package management tools and preserves the package manager database 28# - package-management - installs package management tools and preserves the package manager database
29# - debug-tweaks - makes an image suitable for development, e.g. allowing passwordless root logins
30# - empty-root-password
31# - allow-empty-password
32# - allow-root-login
33# - post-install-logging
34# - serial-autologin-root - with 'empty-root-password': autologin 'root' on the serial console 29# - serial-autologin-root - with 'empty-root-password': autologin 'root' on the serial console
35# - dev-pkgs - development packages (headers, etc.) for all installed packages in the rootfs 30# - dev-pkgs - development packages (headers, etc.) for all installed packages in the rootfs
36# - dbg-pkgs - debug symbol packages for all installed packages in the rootfs 31# - dbg-pkgs - debug symbol packages for all installed packages in the rootfs
@@ -43,6 +38,12 @@
43# - stateless-rootfs - systemctl-native not run, image populated by systemd at runtime 38# - stateless-rootfs - systemctl-native not run, image populated by systemd at runtime
44# - splash - bootup splash screen 39# - splash - bootup splash screen
45# 40#
41# Features for development purposes (previously part of debug-tweaks):
42# - empty-root-password - the root user has no password set
43# - allow-empty-password - users can have an empty password
44# - allow-root-login - the root user can login
45# - post-install-logging - log the output of postinstall scriptlets
46#
46FEATURE_PACKAGES_weston = "packagegroup-core-weston" 47FEATURE_PACKAGES_weston = "packagegroup-core-weston"
47FEATURE_PACKAGES_x11 = "packagegroup-core-x11" 48FEATURE_PACKAGES_x11 = "packagegroup-core-x11"
48FEATURE_PACKAGES_x11-base = "packagegroup-core-x11-base" 49FEATURE_PACKAGES_x11-base = "packagegroup-core-x11-base"
diff --git a/meta/classes-recipe/create-spdx-image-3.0.bbclass b/meta/classes-recipe/create-spdx-image-3.0.bbclass
new file mode 100644
index 0000000000..e0f1766bb7
--- /dev/null
+++ b/meta/classes-recipe/create-spdx-image-3.0.bbclass
@@ -0,0 +1,85 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: GPL-2.0-only
5#
6# SPDX image tasks
7
8SPDX_ROOTFS_PACKAGES = "${SPDXDIR}/rootfs-packages.json"
9SPDXIMAGEDEPLOYDIR = "${SPDXDIR}/image-deploy"
10SPDXROOTFSDEPLOY = "${SPDXDIR}/rootfs-deploy"
11
12python spdx_collect_rootfs_packages() {
13 import json
14 from pathlib import Path
15 from oe.rootfs import image_list_installed_packages
16
17 root_packages_file = Path(d.getVar("SPDX_ROOTFS_PACKAGES"))
18
19 packages = image_list_installed_packages(d)
20 if not packages:
21 packages = {}
22
23 root_packages_file.parent.mkdir(parents=True, exist_ok=True)
24 with root_packages_file.open("w") as f:
25 json.dump(packages, f)
26}
27ROOTFS_POSTUNINSTALL_COMMAND =+ "spdx_collect_rootfs_packages"
28
29python do_create_rootfs_spdx() {
30 import oe.spdx30_tasks
31 oe.spdx30_tasks.create_rootfs_spdx(d)
32}
33addtask do_create_rootfs_spdx after do_rootfs before do_image
34SSTATETASKS += "do_create_rootfs_spdx"
35do_create_rootfs_spdx[sstate-inputdirs] = "${SPDXROOTFSDEPLOY}"
36do_create_rootfs_spdx[sstate-outputdirs] = "${DEPLOY_DIR_SPDX}"
37do_create_rootfs_spdx[recrdeptask] += "do_create_spdx do_create_package_spdx"
38do_create_rootfs_spdx[cleandirs] += "${SPDXROOTFSDEPLOY}"
39do_create_rootfs_spdx[file-checksums] += "${SPDX3_LIB_DEP_FILES}"
40
41python do_create_rootfs_spdx_setscene() {
42 sstate_setscene(d)
43}
44addtask do_create_rootfs_spdx_setscene
45
46python do_create_image_spdx() {
47 import oe.spdx30_tasks
48 oe.spdx30_tasks.create_image_spdx(d)
49}
50addtask do_create_image_spdx after do_image_complete do_create_rootfs_spdx before do_build
51SSTATETASKS += "do_create_image_spdx"
52SSTATE_SKIP_CREATION:task-create-image-spdx = "1"
53do_create_image_spdx[sstate-inputdirs] = "${SPDXIMAGEWORK}"
54do_create_image_spdx[sstate-outputdirs] = "${DEPLOY_DIR_SPDX}"
55do_create_image_spdx[cleandirs] = "${SPDXIMAGEWORK}"
56do_create_image_spdx[dirs] = "${SPDXIMAGEWORK}"
57do_create_image_spdx[file-checksums] += "${SPDX3_LIB_DEP_FILES}"
58do_create_image_spdx[vardeps] += "\
59 SPDX_IMAGE_PURPOSE \
60 "
61
62python do_create_image_spdx_setscene() {
63 sstate_setscene(d)
64}
65addtask do_create_image_spdx_setscene
66
67
68python do_create_image_sbom_spdx() {
69 import oe.spdx30_tasks
70 oe.spdx30_tasks.create_image_sbom_spdx(d)
71}
72addtask do_create_image_sbom_spdx after do_create_rootfs_spdx do_create_image_spdx before do_build
73SSTATETASKS += "do_create_image_sbom_spdx"
74SSTATE_SKIP_CREATION:task-create-image-sbom = "1"
75do_create_image_sbom_spdx[sstate-inputdirs] = "${SPDXIMAGEDEPLOYDIR}"
76do_create_image_sbom_spdx[sstate-outputdirs] = "${DEPLOY_DIR_IMAGE}"
77do_create_image_sbom_spdx[stamp-extra-info] = "${MACHINE_ARCH}"
78do_create_image_sbom_spdx[cleandirs] = "${SPDXIMAGEDEPLOYDIR}"
79do_create_image_sbom_spdx[recrdeptask] += "do_create_spdx do_create_package_spdx"
80do_create_image_sbom_spdx[file-checksums] += "${SPDX3_LIB_DEP_FILES}"
81
82python do_create_image_sbom_spdx_setscene() {
83 sstate_setscene(d)
84}
85addtask do_create_image_sbom_spdx_setscene
diff --git a/meta/classes-recipe/create-spdx-sdk-3.0.bbclass b/meta/classes-recipe/create-spdx-sdk-3.0.bbclass
new file mode 100644
index 0000000000..855fb3d09f
--- /dev/null
+++ b/meta/classes-recipe/create-spdx-sdk-3.0.bbclass
@@ -0,0 +1,74 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: GPL-2.0-only
5#
6# SPDX SDK tasks
7
8do_populate_sdk[recrdeptask] += "do_create_spdx do_create_package_spdx"
9do_populate_sdk[cleandirs] += "${SPDXSDKWORK}"
10do_populate_sdk[postfuncs] += "sdk_create_sbom"
11do_populate_sdk[file-checksums] += "${SPDX3_LIB_DEP_FILES}"
12POPULATE_SDK_POST_HOST_COMMAND:append:task-populate-sdk = " sdk_host_create_spdx"
13POPULATE_SDK_POST_TARGET_COMMAND:append:task-populate-sdk = " sdk_target_create_spdx"
14
15do_populate_sdk_ext[recrdeptask] += "do_create_spdx do_create_package_spdx"
16do_populate_sdk_ext[cleandirs] += "${SPDXSDKEXTWORK}"
17do_populate_sdk_ext[postfuncs] += "sdk_ext_create_sbom"
18do_populate_sdk_ext[file-checksums] += "${SPDX3_LIB_DEP_FILES}"
19POPULATE_SDK_POST_HOST_COMMAND:append:task-populate-sdk-ext = " sdk_ext_host_create_spdx"
20POPULATE_SDK_POST_TARGET_COMMAND:append:task-populate-sdk-ext = " sdk_ext_target_create_spdx"
21
22python sdk_host_create_spdx() {
23 from pathlib import Path
24 import oe.spdx30_tasks
25 spdx_work_dir = Path(d.getVar('SPDXSDKWORK'))
26
27 oe.spdx30_tasks.sdk_create_spdx(d, "host", spdx_work_dir, d.getVar("TOOLCHAIN_OUTPUTNAME"))
28}
29
30python sdk_target_create_spdx() {
31 from pathlib import Path
32 import oe.spdx30_tasks
33 spdx_work_dir = Path(d.getVar('SPDXSDKWORK'))
34
35 oe.spdx30_tasks.sdk_create_spdx(d, "target", spdx_work_dir, d.getVar("TOOLCHAIN_OUTPUTNAME"))
36}
37
38python sdk_ext_host_create_spdx() {
39 from pathlib import Path
40 import oe.spdx30_tasks
41 spdx_work_dir = Path(d.getVar('SPDXSDKEXTWORK'))
42
43 # TODO: This doesn't seem to work
44 oe.spdx30_tasks.sdk_create_spdx(d, "host", spdx_work_dir, d.getVar("TOOLCHAINEXT_OUTPUTNAME"))
45}
46
47python sdk_ext_target_create_spdx() {
48 from pathlib import Path
49 import oe.spdx30_tasks
50 spdx_work_dir = Path(d.getVar('SPDXSDKEXTWORK'))
51
52 # TODO: This doesn't seem to work
53 oe.spdx30_tasks.sdk_create_spdx(d, "target", spdx_work_dir, d.getVar("TOOLCHAINEXT_OUTPUTNAME"))
54}
55
56
57python sdk_create_sbom() {
58 from pathlib import Path
59 import oe.spdx30_tasks
60 sdk_deploydir = Path(d.getVar("SDKDEPLOYDIR"))
61 spdx_work_dir = Path(d.getVar('SPDXSDKWORK'))
62
63 oe.spdx30_tasks.create_sdk_sbom(d, sdk_deploydir, spdx_work_dir, d.getVar("TOOLCHAIN_OUTPUTNAME"))
64}
65
66python sdk_ext_create_sbom() {
67 from pathlib import Path
68 import oe.spdx30_tasks
69 sdk_deploydir = Path(d.getVar("SDKEXTDEPLOYDIR"))
70 spdx_work_dir = Path(d.getVar('SPDXSDKEXTWORK'))
71
72 oe.spdx30_tasks.create_sdk_sbom(d, sdk_deploydir, spdx_work_dir, d.getVar("TOOLCHAINEXT_OUTPUTNAME"))
73}
74
diff --git a/meta/classes-recipe/cross-canadian.bbclass b/meta/classes-recipe/cross-canadian.bbclass
index 1670217d69..059d9aa95f 100644
--- a/meta/classes-recipe/cross-canadian.bbclass
+++ b/meta/classes-recipe/cross-canadian.bbclass
@@ -36,7 +36,7 @@ python () {
36 if d.getVar("MODIFYTOS") != "1": 36 if d.getVar("MODIFYTOS") != "1":
37 return 37 return
38 38
39 if d.getVar("TCLIBC") in [ 'baremetal', 'newlib' ]: 39 if d.getVar("TCLIBC") in [ 'baremetal', 'newlib', 'picolibc' ]:
40 return 40 return
41 41
42 tos = d.getVar("TARGET_OS") 42 tos = d.getVar("TARGET_OS")
diff --git a/meta/classes-recipe/cross.bbclass b/meta/classes-recipe/cross.bbclass
index 93de9a5274..9abf166e50 100644
--- a/meta/classes-recipe/cross.bbclass
+++ b/meta/classes-recipe/cross.bbclass
@@ -80,17 +80,17 @@ do_packagedata[stamp-extra-info] = ""
80 80
81USE_NLS = "no" 81USE_NLS = "no"
82 82
83export CC = "${BUILD_CC}" 83CC = "${BUILD_CC}"
84export CXX = "${BUILD_CXX}" 84CXX = "${BUILD_CXX}"
85export FC = "${BUILD_FC}" 85FC = "${BUILD_FC}"
86export CPP = "${BUILD_CPP}" 86CPP = "${BUILD_CPP}"
87export LD = "${BUILD_LD}" 87LD = "${BUILD_LD}"
88export CCLD = "${BUILD_CCLD}" 88CCLD = "${BUILD_CCLD}"
89export AR = "${BUILD_AR}" 89AR = "${BUILD_AR}"
90export AS = "${BUILD_AS}" 90AS = "${BUILD_AS}"
91export RANLIB = "${BUILD_RANLIB}" 91RANLIB = "${BUILD_RANLIB}"
92export STRIP = "${BUILD_STRIP}" 92STRIP = "${BUILD_STRIP}"
93export NM = "${BUILD_NM}" 93NM = "${BUILD_NM}"
94 94
95inherit nopackages 95inherit nopackages
96 96
diff --git a/meta/classes-recipe/crosssdk.bbclass b/meta/classes-recipe/crosssdk.bbclass
index 824b1bcff4..3541c2c393 100644
--- a/meta/classes-recipe/crosssdk.bbclass
+++ b/meta/classes-recipe/crosssdk.bbclass
@@ -4,6 +4,7 @@
4# SPDX-License-Identifier: MIT 4# SPDX-License-Identifier: MIT
5# 5#
6 6
7BB_DEFER_BBCLASSES:remove = "cross"
7inherit cross 8inherit cross
8 9
9CLASSOVERRIDE = "class-crosssdk" 10CLASSOVERRIDE = "class-crosssdk"
diff --git a/meta/classes-recipe/cython.bbclass b/meta/classes-recipe/cython.bbclass
new file mode 100644
index 0000000000..dd9fc732bc
--- /dev/null
+++ b/meta/classes-recipe/cython.bbclass
@@ -0,0 +1,8 @@
1DEPENDS:append = " python3-cython-native"
2
3do_compile[postfuncs] = "strip_cython_metadata"
4strip_cython_metadata() {
5 # Remove the Cython Metadata headers that we don't need after the build, and
6 # may contain build paths.
7 find ${S} -name "*.c" -print0 | xargs --no-run-if-empty --null sed -i -e "/BEGIN: Cython Metadata/,/END: Cython Metadata/d"
8}
diff --git a/meta/classes-recipe/devicetree.bbclass b/meta/classes-recipe/devicetree.bbclass
index bd50d7fa1d..ce9d008aac 100644
--- a/meta/classes-recipe/devicetree.bbclass
+++ b/meta/classes-recipe/devicetree.bbclass
@@ -40,7 +40,7 @@ PACKAGE_ARCH = "${MACHINE_ARCH}"
40SYSROOT_DIRS += "/boot/devicetree" 40SYSROOT_DIRS += "/boot/devicetree"
41FILES:${PN} = "/boot/devicetree/*.dtb /boot/devicetree/*.dtbo" 41FILES:${PN} = "/boot/devicetree/*.dtb /boot/devicetree/*.dtbo"
42 42
43S = "${WORKDIR}" 43S = "${UNPACKDIR}"
44B = "${WORKDIR}/build" 44B = "${WORKDIR}/build"
45 45
46# Default kernel includes, these represent what are normally used for in-kernel 46# Default kernel includes, these represent what are normally used for in-kernel
@@ -108,7 +108,11 @@ def devicetree_compile(dtspath, includes, d):
108 ppargs.append("-I{0}".format(i)) 108 ppargs.append("-I{0}".format(i))
109 ppargs += ["-o", "{0}.pp".format(dts), dtspath] 109 ppargs += ["-o", "{0}.pp".format(dts), dtspath]
110 bb.note("Running {0}".format(" ".join(ppargs))) 110 bb.note("Running {0}".format(" ".join(ppargs)))
111 subprocess.run(ppargs, check = True) 111 try:
112 subprocess.run(ppargs, check=True, capture_output=True)
113 except subprocess.CalledProcessError as e:
114 bb.fatal(f"Command '{' '.join(ppargs)}' failed with return code {e.returncode}\nstdout: {e.stdout.decode()}\nstderr: {e.stderr.decode()}\ndtspath: {os.path.abspath(dtspath)}")
115
112 116
113 # determine if the file is an overlay or not (using the preprocessed file) 117 # determine if the file is an overlay or not (using the preprocessed file)
114 isoverlay = devicetree_source_is_overlay("{0}.pp".format(dts)) 118 isoverlay = devicetree_source_is_overlay("{0}.pp".format(dts))
@@ -124,7 +128,11 @@ def devicetree_compile(dtspath, includes, d):
124 dtcargs += ["-o", "{0}.{1}".format(dtname, "dtbo" if isoverlay else "dtb")] 128 dtcargs += ["-o", "{0}.{1}".format(dtname, "dtbo" if isoverlay else "dtb")]
125 dtcargs += ["-I", "dts", "-O", "dtb", "{0}.pp".format(dts)] 129 dtcargs += ["-I", "dts", "-O", "dtb", "{0}.pp".format(dts)]
126 bb.note("Running {0}".format(" ".join(dtcargs))) 130 bb.note("Running {0}".format(" ".join(dtcargs)))
127 subprocess.run(dtcargs, check = True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) 131 try:
132 subprocess.run(dtcargs, check=True, capture_output=True)
133 except subprocess.CalledProcessError as e:
134 bb.fatal(f"Command '{' '.join(dtcargs)}' failed with return code {e.returncode}\nstdout: {e.stdout.decode()}\nstderr: {e.stderr.decode()}\ndtname: {dtname}")
135
128 136
129python devicetree_do_compile() { 137python devicetree_do_compile() {
130 import re 138 import re
@@ -143,14 +151,16 @@ python devicetree_do_compile() {
143} 151}
144 152
145devicetree_do_install() { 153devicetree_do_install() {
146 for DTB_FILE in `ls *.dtb *.dtbo`; do 154 for dtb_file in *.dtb *.dtbo; do
147 install -Dm 0644 ${B}/${DTB_FILE} ${D}/boot/devicetree/${DTB_FILE} 155 [ -e "$dtb_file" ] || continue
156 install -Dm 0644 "${B}/$dtb_file" "${D}/boot/devicetree/$dtb_file"
148 done 157 done
149} 158}
150 159
151devicetree_do_deploy() { 160devicetree_do_deploy() {
152 for DTB_FILE in `ls *.dtb *.dtbo`; do 161 for dtb_file in *.dtb *.dtbo; do
153 install -Dm 0644 ${B}/${DTB_FILE} ${DEPLOYDIR}/devicetree/${DTB_FILE} 162 [ -e "$dtb_file" ] || continue
163 install -Dm 0644 "${B}/$dtb_file" "${DEPLOYDIR}/devicetree/$dtb_file"
154 done 164 done
155} 165}
156addtask deploy before do_build after do_install 166addtask deploy before do_build after do_install
diff --git a/meta/classes-recipe/devupstream.bbclass b/meta/classes-recipe/devupstream.bbclass
index d941763fb7..60026a527f 100644
--- a/meta/classes-recipe/devupstream.bbclass
+++ b/meta/classes-recipe/devupstream.bbclass
@@ -13,9 +13,6 @@
13# SRC_URI:class-devupstream = "git://git.example.com/example;branch=master" 13# SRC_URI:class-devupstream = "git://git.example.com/example;branch=master"
14# SRCREV:class-devupstream = "abcdef" 14# SRCREV:class-devupstream = "abcdef"
15# 15#
16# If the first entry in SRC_URI is a git: URL then S is rewritten to
17# WORKDIR/git.
18#
19# There are a few caveats that remain to be solved: 16# There are a few caveats that remain to be solved:
20# - You can't build native or nativesdk recipes using for example 17# - You can't build native or nativesdk recipes using for example
21# devupstream:native, you can only build target recipes. 18# devupstream:native, you can only build target recipes.
@@ -39,9 +36,6 @@ python devupstream_virtclass_handler () {
39 src_uri = d.getVar("SRC_URI:class-devupstream") or d.getVar("SRC_URI") 36 src_uri = d.getVar("SRC_URI:class-devupstream") or d.getVar("SRC_URI")
40 uri = bb.fetch2.URI(src_uri.split()[0]) 37 uri = bb.fetch2.URI(src_uri.split()[0])
41 38
42 if uri.scheme == "git" and not d.getVar("S:class-devupstream"):
43 d.setVar("S", "${WORKDIR}/git")
44
45 # Modify the PV if the recipe hasn't already overridden it 39 # Modify the PV if the recipe hasn't already overridden it
46 pv = d.getVar("PV") 40 pv = d.getVar("PV")
47 proto_marker = "+" + uri.scheme 41 proto_marker = "+" + uri.scheme
diff --git a/meta/classes-recipe/features_check.bbclass b/meta/classes-recipe/features_check.bbclass
index 163a7bc3fc..4e122ecaef 100644
--- a/meta/classes-recipe/features_check.bbclass
+++ b/meta/classes-recipe/features_check.bbclass
@@ -16,7 +16,7 @@
16 16
17 17
18python () { 18python () {
19 if d.getVar('PARSE_ALL_RECIPES', False): 19 if bb.utils.to_boolean(d.getVar('PARSE_ALL_RECIPES', False)):
20 return 20 return
21 21
22 unused = True 22 unused = True
diff --git a/meta/classes-recipe/fontcache.bbclass b/meta/classes-recipe/fontcache.bbclass
index 6f4978369d..deadcd2fbb 100644
--- a/meta/classes-recipe/fontcache.bbclass
+++ b/meta/classes-recipe/fontcache.bbclass
@@ -9,8 +9,7 @@
9# packages. 9# packages.
10# 10#
11 11
12PACKAGE_WRITE_DEPS += "qemu-native" 12PACKAGE_WRITE_DEPS += "qemuwrapper-cross"
13inherit qemu
14 13
15FONT_PACKAGES ??= "${PN}" 14FONT_PACKAGES ??= "${PN}"
16FONT_PACKAGES:class-native = "" 15FONT_PACKAGES:class-native = ""
diff --git a/meta/classes-recipe/gio-module-cache.bbclass b/meta/classes-recipe/gio-module-cache.bbclass
index d12e03c4a0..3714678c7c 100644
--- a/meta/classes-recipe/gio-module-cache.bbclass
+++ b/meta/classes-recipe/gio-module-cache.bbclass
@@ -4,8 +4,7 @@
4# SPDX-License-Identifier: MIT 4# SPDX-License-Identifier: MIT
5# 5#
6 6
7PACKAGE_WRITE_DEPS += "qemu-native" 7PACKAGE_WRITE_DEPS += "qemuwrapper-cross"
8inherit qemu
9 8
10GIO_MODULE_PACKAGES ??= "${PN}" 9GIO_MODULE_PACKAGES ??= "${PN}"
11 10
diff --git a/meta/classes-recipe/go-mod-update-modules.bbclass b/meta/classes-recipe/go-mod-update-modules.bbclass
new file mode 100644
index 0000000000..5fccd0bb0d
--- /dev/null
+++ b/meta/classes-recipe/go-mod-update-modules.bbclass
@@ -0,0 +1,152 @@
1addtask do_update_modules after do_configure
2do_update_modules[nostamp] = "1"
3do_update_modules[network] = "1"
4
5# This class maintains two files, BPN-go-mods.inc and BPN-licenses.inc.
6#
7# -go-mods.inc will append SRC_URI with all of the Go modules that are
8# dependencies of this recipe.
9#
10# -licenses.inc will append LICENSE and LIC_FILES_CHKSUM with the found licenses
11# in the modules.
12#
13# These files are machine-generated and should not be modified.
14
15python do_update_modules() {
16 import subprocess, tempfile, json, re, urllib.parse
17 from oe.license import tidy_licenses
18 from oe.license_finder import find_licenses
19
20 def unescape_path(path):
21 """Unescape capital letters using exclamation points."""
22 return re.sub(r'!([a-z])', lambda m: m.group(1).upper(), path)
23
24 def fold_uri(uri):
25 """Fold URI for sorting shorter module paths before longer."""
26 return uri.replace(';', ' ').replace('/', '!')
27
28 def parse_existing_licenses():
29 hashes = {}
30 for url in d.getVar("LIC_FILES_CHKSUM").split():
31 (method, host, path, user, pswd, parm) = bb.fetch.decodeurl(url)
32 if "spdx" in parm and parm["spdx"] != "Unknown":
33 hashes[parm["md5"]] = urllib.parse.unquote_plus(parm["spdx"])
34 return hashes
35
36 bpn = d.getVar("BPN")
37 thisdir = d.getVar("THISDIR")
38 s_dir = d.getVar("S")
39
40 with tempfile.TemporaryDirectory(prefix='go-mod-') as mod_cache_dir:
41 notice = """
42# This file has been generated by go-mod-update-modules.bbclass
43#
44# Do not modify it by hand, as the contents will be replaced when
45# running the update-modules task.
46
47"""
48
49 env = dict(os.environ, GOMODCACHE=mod_cache_dir)
50
51 source = d.expand("${UNPACKDIR}/${GO_SRCURI_DESTSUFFIX}")
52 output = subprocess.check_output(("go", "mod", "edit", "-json"), cwd=source, env=env, text=True)
53 go_mod = json.loads(output)
54
55 output = subprocess.check_output(("go", "list", "-json=Dir,Module", "-deps", f"{go_mod['Module']['Path']}/..."), cwd=source, env=env, text=True)
56
57 #
58 # Licenses
59 #
60
61 # load hashes from the existing licenses.inc
62 extra_hashes = parse_existing_licenses()
63
64 # The output of this isn't actually valid JSON, but a series of dicts.
65 # Wrap in [] and join the dicts with ,
66 # Very frustrating that the json parser in python can't repeatedly
67 # parse from a stream.
68 pkgs = json.loads('[' + output.replace('}\n{', '},\n{') + ']')
69 # Collect licenses for the dependencies.
70 licenses = set()
71 lic_files_chksum = []
72 lic_files = {}
73
74 for pkg in pkgs:
75 mod = pkg.get('Module', None)
76 if not mod or mod.get('Main', False):
77 continue
78
79 mod_dir = mod['Dir']
80
81 if not mod_dir.startswith(mod_cache_dir):
82 continue
83
84 path = os.path.relpath(mod_dir, mod_cache_dir)
85
86 for license_name, license_file, license_md5 in find_licenses(mod['Dir'], d, first_only=True, extra_hashes=extra_hashes):
87 lic_files[os.path.join(path, license_file)] = (license_name, license_md5)
88
89 for lic_file in lic_files:
90 license_name, license_md5 = lic_files[lic_file]
91 if license_name == "Unknown":
92 bb.warn(f"Unknown license: {lic_file} {license_md5}")
93
94 licenses.add(lic_files[lic_file][0])
95 lic_files_chksum.append(
96 f'file://pkg/mod/{lic_file};md5={license_md5};spdx={urllib.parse.quote_plus(license_name)}')
97
98 licenses_filename = os.path.join(thisdir, f"{bpn}-licenses.inc")
99 with open(licenses_filename, "w") as f:
100 f.write(notice)
101 f.write(f'LICENSE += "& {" & ".join(tidy_licenses(licenses))}"\n\n')
102 f.write('LIC_FILES_CHKSUM += "\\\n')
103 for lic in sorted(lic_files_chksum, key=fold_uri):
104 f.write(' ' + lic + ' \\\n')
105 f.write('"\n')
106
107 #
108 # Sources
109 #
110
111 # Collect the module cache files downloaded by the go list command as
112 # the go list command knows best what the go list command needs and it
113 # needs more files in the module cache than the go install command as
114 # it doesn't do the dependency pruning mentioned in the Go module
115 # reference, https://go.dev/ref/mod, for go 1.17 or higher.
116 src_uris = []
117 downloaddir = os.path.join(mod_cache_dir, 'cache', 'download')
118 for dirpath, _, filenames in os.walk(downloaddir):
119 # We want to process files under @v directories
120 path, base = os.path.split(os.path.relpath(dirpath, downloaddir))
121 if base != '@v':
122 continue
123
124 path = unescape_path(path)
125 zipver = None
126 for name in filenames:
127 ver, ext = os.path.splitext(name)
128 if ext == '.zip':
129 chksum = bb.utils.sha256_file(os.path.join(dirpath, name))
130 src_uris.append(f'gomod://{path};version={ver};sha256sum={chksum}')
131 zipver = ver
132 break
133 for name in filenames:
134 ver, ext = os.path.splitext(name)
135 if ext == '.mod' and ver != zipver:
136 chksum = bb.utils.sha256_file(os.path.join(dirpath, name))
137 src_uris.append(f'gomod://{path};version={ver};mod=1;sha256sum={chksum}')
138
139
140 go_mods_filename = os.path.join(thisdir, f"{bpn}-go-mods.inc")
141 with open(go_mods_filename, "w") as f:
142 f.write(notice)
143 f.write('SRC_URI += "\\\n')
144 for uri in sorted(src_uris, key=fold_uri):
145 f.write(' ' + uri + ' \\\n')
146 f.write('"\n')
147
148 subprocess.check_output(("go", "clean", "-modcache"), cwd=source, env=env, text=True)
149}
150
151# This doesn't work as we need to wipe the inc files first so we don't try looking for LICENSE files that don't yet exist
152# RECIPE_UPGRADE_EXTRA_TASKS += "do_update_modules"
diff --git a/meta/classes-recipe/go-mod.bbclass b/meta/classes-recipe/go-mod.bbclass
index ca3a690d05..a15dda8f0e 100644
--- a/meta/classes-recipe/go-mod.bbclass
+++ b/meta/classes-recipe/go-mod.bbclass
@@ -22,9 +22,13 @@ GOBUILDFLAGS:append = " -modcacherw"
22 22
23inherit go 23inherit go
24 24
25export GOMODCACHE = "${S}/pkg/mod"
26GO_MOD_CACHE_DIR = "${@os.path.relpath(d.getVar('GOMODCACHE'), d.getVar('UNPACKDIR'))}"
27do_unpack[cleandirs] += "${GOMODCACHE}"
28
25GO_WORKDIR ?= "${GO_IMPORT}" 29GO_WORKDIR ?= "${GO_IMPORT}"
26do_compile[dirs] += "${B}/src/${GO_WORKDIR}" 30do_compile[dirs] += "${B}/src/${GO_WORKDIR}"
27 31
28export GOMODCACHE = "${B}/.mod" 32# Make go install unpack the module zip files in the module cache directory
29 33# before the license directory is polulated with license files.
30do_compile[cleandirs] += "${B}/.mod" 34addtask do_compile before do_populate_lic
diff --git a/meta/classes-recipe/go.bbclass b/meta/classes-recipe/go.bbclass
index d32509aa6d..e0f667373e 100644
--- a/meta/classes-recipe/go.bbclass
+++ b/meta/classes-recipe/go.bbclass
@@ -7,6 +7,9 @@
7inherit goarch 7inherit goarch
8inherit linuxloader 8inherit linuxloader
9 9
10# if the GO_IMPORT is not set in recipe generate an error
11GO_IMPORT ??= "${@bb.fatal("The recipe needs to set GO_IMPORT for go.bbclass to work")}"
12
10GO_PARALLEL_BUILD ?= "${@oe.utils.parallel_make_argument(d, '-p %d')}" 13GO_PARALLEL_BUILD ?= "${@oe.utils.parallel_make_argument(d, '-p %d')}"
11 14
12export GODEBUG = "gocachehash=1" 15export GODEBUG = "gocachehash=1"
@@ -15,13 +18,12 @@ GOROOT:class-native = "${STAGING_LIBDIR_NATIVE}/go"
15GOROOT:class-nativesdk = "${STAGING_DIR_TARGET}${libdir}/go" 18GOROOT:class-nativesdk = "${STAGING_DIR_TARGET}${libdir}/go"
16GOROOT = "${STAGING_LIBDIR}/go" 19GOROOT = "${STAGING_LIBDIR}/go"
17export GOROOT 20export GOROOT
18export GOROOT_FINAL = "${libdir}/go"
19export GOCACHE = "${B}/.cache" 21export GOCACHE = "${B}/.cache"
20 22
21export GOARCH = "${TARGET_GOARCH}" 23export GOARCH = "${TARGET_GOARCH}"
22export GOOS = "${TARGET_GOOS}" 24export GOOS = "${TARGET_GOOS}"
23export GOHOSTARCH="${BUILD_GOARCH}" 25export GOHOSTARCH = "${BUILD_GOARCH}"
24export GOHOSTOS="${BUILD_GOOS}" 26export GOHOSTOS = "${BUILD_GOOS}"
25 27
26GOARM[export] = "0" 28GOARM[export] = "0"
27GOARM:arm:class-target = "${TARGET_GOARM}" 29GOARM:arm:class-target = "${TARGET_GOARM}"
@@ -80,19 +82,7 @@ export GOPROXY ??= "https://proxy.golang.org,direct"
80export GOTMPDIR ?= "${WORKDIR}/build-tmp" 82export GOTMPDIR ?= "${WORKDIR}/build-tmp"
81GOTMPDIR[vardepvalue] = "" 83GOTMPDIR[vardepvalue] = ""
82 84
83python go_do_unpack() { 85GO_SRCURI_DESTSUFFIX = "${@os.path.join(os.path.basename(d.getVar('S')), 'src', d.getVar('GO_IMPORT')) + '/'}"
84 src_uri = (d.getVar('SRC_URI') or "").split()
85 if len(src_uri) == 0:
86 return
87
88 fetcher = bb.fetch2.Fetch(src_uri, d)
89 for url in fetcher.urls:
90 if fetcher.ud[url].type == 'git':
91 if fetcher.ud[url].parm.get('destsuffix') is None:
92 s_dirname = os.path.basename(d.getVar('S'))
93 fetcher.ud[url].parm['destsuffix'] = os.path.join(s_dirname, 'src', d.getVar('GO_IMPORT')) + '/'
94 fetcher.unpack(d.getVar('WORKDIR'))
95}
96 86
97go_list_packages() { 87go_list_packages() {
98 ${GO} list -f '{{.ImportPath}}' ${GOBUILDFLAGS} ${GO_INSTALL} | \ 88 ${GO} list -f '{{.ImportPath}}' ${GOBUILDFLAGS} ${GO_INSTALL} | \
@@ -151,7 +141,7 @@ go_stage_testdata() {
151 cd "$oldwd" 141 cd "$oldwd"
152} 142}
153 143
154EXPORT_FUNCTIONS do_unpack do_configure do_compile do_install 144EXPORT_FUNCTIONS do_configure do_compile do_install
155 145
156FILES:${PN}-dev = "${libdir}/go/src" 146FILES:${PN}-dev = "${libdir}/go/src"
157FILES:${PN}-staticdev = "${libdir}/go/pkg" 147FILES:${PN}-staticdev = "${libdir}/go/pkg"
diff --git a/meta/classes-recipe/grub-efi-cfg.bbclass b/meta/classes-recipe/grub-efi-cfg.bbclass
index 52e85a3bb0..9a5cb99c52 100644
--- a/meta/classes-recipe/grub-efi-cfg.bbclass
+++ b/meta/classes-recipe/grub-efi-cfg.bbclass
@@ -23,6 +23,7 @@ GRUB_TIMEOUT ?= "10"
23GRUB_OPTS ?= "serial --unit=0 --speed=115200 --word=8 --parity=no --stop=1" 23GRUB_OPTS ?= "serial --unit=0 --speed=115200 --word=8 --parity=no --stop=1"
24 24
25GRUB_ROOT ?= "${ROOT}" 25GRUB_ROOT ?= "${ROOT}"
26GRUB_TITLE ?= ""
26APPEND ?= "" 27APPEND ?= ""
27 28
28# Uses MACHINE specific KERNEL_IMAGETYPE 29# Uses MACHINE specific KERNEL_IMAGETYPE
@@ -91,10 +92,15 @@ python build_efi_cfg() {
91 if not overrides: 92 if not overrides:
92 bb.fatal('OVERRIDES not defined') 93 bb.fatal('OVERRIDES not defined')
93 94
95 localdata.need_overrides()
94 localdata.setVar('OVERRIDES', 'grub_' + label + ':' + overrides) 96 localdata.setVar('OVERRIDES', 'grub_' + label + ':' + overrides)
95 97
96 for btype in btypes: 98 for btype in btypes:
97 cfgfile.write('\nmenuentry \'%s%s\'{\n' % (label, btype[0])) 99 title = localdata.getVar('GRUB_TITLE')
100 if not title or len(title) == 0:
101 title = label
102
103 cfgfile.write('\nmenuentry \'%s%s\'{\n' % (title, btype[0]))
98 lb = label 104 lb = label
99 if label == "install": 105 if label == "install":
100 lb = "install-efi" 106 lb = "install-efi"
diff --git a/meta/classes-recipe/gtk-icon-cache.bbclass b/meta/classes-recipe/gtk-icon-cache.bbclass
index 9ecb49916c..fad8c4c65f 100644
--- a/meta/classes-recipe/gtk-icon-cache.bbclass
+++ b/meta/classes-recipe/gtk-icon-cache.bbclass
@@ -16,7 +16,7 @@ GTKIC_CMD = "${@ 'gtk4-update-icon-cache' if d.getVar('GTKIC_VERSION') == '4' el
16inherit features_check 16inherit features_check
17ANY_OF_DISTRO_FEATURES = "${GTK3DISTROFEATURES}" 17ANY_OF_DISTRO_FEATURES = "${GTK3DISTROFEATURES}"
18 18
19DEPENDS +=" ${@ '' if d.getVar('BPN') == 'hicolor-icon-theme' else 'hicolor-icon-theme' } \ 19DEPENDS += "${@ '' if d.getVar('BPN') == 'hicolor-icon-theme' else 'hicolor-icon-theme' } \
20 ${@ '' if d.getVar('BPN') == 'gdk-pixbuf' else 'gdk-pixbuf' } \ 20 ${@ '' if d.getVar('BPN') == 'gdk-pixbuf' else 'gdk-pixbuf' } \
21 ${@ '' if d.getVar('BPN') == d.getVar('GTKPN') else d.getVar('GTKPN') } \ 21 ${@ '' if d.getVar('BPN') == d.getVar('GTKPN') else d.getVar('GTKPN') } \
22 ${GTKPN}-native \ 22 ${GTKPN}-native \
diff --git a/meta/classes-recipe/gtk-immodules-cache.bbclass b/meta/classes-recipe/gtk-immodules-cache.bbclass
index 8fbe1dd1fb..585838c105 100644
--- a/meta/classes-recipe/gtk-immodules-cache.bbclass
+++ b/meta/classes-recipe/gtk-immodules-cache.bbclass
@@ -8,9 +8,7 @@
8# 8#
9# Usage: Set GTKIMMODULES_PACKAGES to the packages that needs to update the inputmethod modules 9# Usage: Set GTKIMMODULES_PACKAGES to the packages that needs to update the inputmethod modules
10 10
11PACKAGE_WRITE_DEPS += "qemu-native" 11PACKAGE_WRITE_DEPS += "qemuwrapper-cross"
12
13inherit qemu
14 12
15GTKIMMODULES_PACKAGES ?= "${PN}" 13GTKIMMODULES_PACKAGES ?= "${PN}"
16 14
diff --git a/meta/classes-recipe/image-live.bbclass b/meta/classes-recipe/image-live.bbclass
index d2e95ef51c..c3054be630 100644
--- a/meta/classes-recipe/image-live.bbclass
+++ b/meta/classes-recipe/image-live.bbclass
@@ -147,7 +147,10 @@ build_iso() {
147 isohybrid_args="-u" 147 isohybrid_args="-u"
148 fi 148 fi
149 149
150 isohybrid $isohybrid_args ${IMGDEPLOYDIR}/${IMAGE_NAME}.iso 150 # EFI only does not need isohybrid
151 if [ "${PCBIOS}" = "1" ] || [ "${EFI}" != "1" ]; then
152 isohybrid $isohybrid_args ${IMGDEPLOYDIR}/${IMAGE_NAME}.iso
153 fi
151} 154}
152 155
153build_fat_img() { 156build_fat_img() {
diff --git a/meta/classes-recipe/image.bbclass b/meta/classes-recipe/image.bbclass
index 28be6c6362..24a19fce1a 100644
--- a/meta/classes-recipe/image.bbclass
+++ b/meta/classes-recipe/image.bbclass
@@ -30,7 +30,7 @@ POPULATE_SDK_POST_TARGET_COMMAND += "rootfs_sysroot_relativelinks"
30 30
31LICENSE ?= "MIT" 31LICENSE ?= "MIT"
32PACKAGES = "" 32PACKAGES = ""
33DEPENDS += "${@' '.join(["%s-qemuwrapper-cross" % m for m in d.getVar("MULTILIB_VARIANTS").split()])} qemuwrapper-cross depmodwrapper-cross cross-localedef-native" 33DEPENDS += "depmodwrapper-cross cross-localedef-native"
34RDEPENDS += "${PACKAGE_INSTALL} ${LINGUAS_INSTALL} ${IMAGE_INSTALL_DEBUGFS}" 34RDEPENDS += "${PACKAGE_INSTALL} ${LINGUAS_INSTALL} ${IMAGE_INSTALL_DEBUGFS}"
35RRECOMMENDS += "${PACKAGE_INSTALL_ATTEMPTONLY}" 35RRECOMMENDS += "${PACKAGE_INSTALL_ATTEMPTONLY}"
36PATH:prepend = "${@":".join(all_multilib_tune_values(d, 'STAGING_BINDIR_CROSS').split())}:" 36PATH:prepend = "${@":".join(all_multilib_tune_values(d, 'STAGING_BINDIR_CROSS').split())}:"
@@ -40,7 +40,7 @@ INHIBIT_DEFAULT_DEPS = "1"
40# IMAGE_FEATURES may contain any available package group 40# IMAGE_FEATURES may contain any available package group
41IMAGE_FEATURES ?= "" 41IMAGE_FEATURES ?= ""
42IMAGE_FEATURES[type] = "list" 42IMAGE_FEATURES[type] = "list"
43IMAGE_FEATURES[validitems] += "debug-tweaks read-only-rootfs read-only-rootfs-delayed-postinsts stateless-rootfs empty-root-password allow-empty-password allow-root-login serial-autologin-root post-install-logging overlayfs-etc" 43IMAGE_FEATURES[validitems] += "read-only-rootfs read-only-rootfs-delayed-postinsts stateless-rootfs empty-root-password allow-empty-password allow-root-login serial-autologin-root post-install-logging overlayfs-etc"
44 44
45# Generate companion debugfs? 45# Generate companion debugfs?
46IMAGE_GEN_DEBUGFS ?= "0" 46IMAGE_GEN_DEBUGFS ?= "0"
@@ -88,6 +88,11 @@ PACKAGE_INSTALL_ATTEMPTONLY ?= "${FEATURE_INSTALL_OPTIONAL}"
88 88
89IMGDEPLOYDIR = "${WORKDIR}/deploy-${PN}-image-complete" 89IMGDEPLOYDIR = "${WORKDIR}/deploy-${PN}-image-complete"
90 90
91IMGMANIFESTDIR = "${WORKDIR}/image-task-manifest"
92
93IMAGE_OUTPUT_MANIFEST_DIR = "${WORKDIR}/deploy-image-output-manifest"
94IMAGE_OUTPUT_MANIFEST = "${IMAGE_OUTPUT_MANIFEST_DIR}/manifest.json"
95
91# Images are generally built explicitly, do not need to be part of world. 96# Images are generally built explicitly, do not need to be part of world.
92EXCLUDE_FROM_WORLD = "1" 97EXCLUDE_FROM_WORLD = "1"
93 98
@@ -194,8 +199,6 @@ IMAGE_LOCALES_ARCHIVE ?= '1'
194# aren't yet available. 199# aren't yet available.
195PSEUDO_PASSWD = "${IMAGE_ROOTFS}:${STAGING_DIR_NATIVE}" 200PSEUDO_PASSWD = "${IMAGE_ROOTFS}:${STAGING_DIR_NATIVE}"
196 201
197PSEUDO_IGNORE_PATHS .= ",${WORKDIR}/intercept_scripts,${WORKDIR}/oe-rootfs-repo,${WORKDIR}/sstate-build-image_complete"
198
199PACKAGE_EXCLUDE ??= "" 202PACKAGE_EXCLUDE ??= ""
200PACKAGE_EXCLUDE[type] = "list" 203PACKAGE_EXCLUDE[type] = "list"
201 204
@@ -277,14 +280,28 @@ fakeroot python do_image () {
277 execute_pre_post_process(d, pre_process_cmds) 280 execute_pre_post_process(d, pre_process_cmds)
278} 281}
279do_image[dirs] = "${TOPDIR}" 282do_image[dirs] = "${TOPDIR}"
283do_image[cleandirs] += "${IMGMANIFESTDIR}"
280addtask do_image after do_rootfs 284addtask do_image after do_rootfs
281 285
282fakeroot python do_image_complete () { 286fakeroot python do_image_complete () {
283 from oe.utils import execute_pre_post_process 287 from oe.utils import execute_pre_post_process
288 from pathlib import Path
289 import json
284 290
285 post_process_cmds = d.getVar("IMAGE_POSTPROCESS_COMMAND") 291 post_process_cmds = d.getVar("IMAGE_POSTPROCESS_COMMAND")
286 292
287 execute_pre_post_process(d, post_process_cmds) 293 execute_pre_post_process(d, post_process_cmds)
294
295 image_manifest_dir = Path(d.getVar('IMGMANIFESTDIR'))
296
297 data = []
298
299 for manifest_path in image_manifest_dir.glob("*.json"):
300 with manifest_path.open("r") as f:
301 data.extend(json.load(f))
302
303 with open(d.getVar("IMAGE_OUTPUT_MANIFEST"), "w") as f:
304 json.dump(data, f)
288} 305}
289do_image_complete[dirs] = "${TOPDIR}" 306do_image_complete[dirs] = "${TOPDIR}"
290SSTATETASKS += "do_image_complete" 307SSTATETASKS += "do_image_complete"
@@ -292,6 +309,8 @@ SSTATE_SKIP_CREATION:task-image-complete = '1'
292do_image_complete[sstate-inputdirs] = "${IMGDEPLOYDIR}" 309do_image_complete[sstate-inputdirs] = "${IMGDEPLOYDIR}"
293do_image_complete[sstate-outputdirs] = "${DEPLOY_DIR_IMAGE}" 310do_image_complete[sstate-outputdirs] = "${DEPLOY_DIR_IMAGE}"
294do_image_complete[stamp-extra-info] = "${MACHINE_ARCH}" 311do_image_complete[stamp-extra-info] = "${MACHINE_ARCH}"
312do_image_complete[sstate-plaindirs] += "${IMAGE_OUTPUT_MANIFEST_DIR}"
313do_image_complete[dirs] += "${IMAGE_OUTPUT_MANIFEST_DIR}"
295addtask do_image_complete after do_image before do_build 314addtask do_image_complete after do_image before do_build
296python do_image_complete_setscene () { 315python do_image_complete_setscene () {
297 sstate_setscene(d) 316 sstate_setscene(d)
@@ -303,28 +322,22 @@ addtask do_image_complete_setscene
303# IMAGE_QA_COMMANDS += " \ 322# IMAGE_QA_COMMANDS += " \
304# image_check_everything_ok \ 323# image_check_everything_ok \
305# " 324# "
325#
306# This task runs all functions in IMAGE_QA_COMMANDS after the rootfs 326# This task runs all functions in IMAGE_QA_COMMANDS after the rootfs
307# construction has completed in order to validate the resulting image. 327# construction has completed in order to validate the resulting image.
308# 328#
309# The functions should use ${IMAGE_ROOTFS} to find the unpacked rootfs 329# The functions should use ${IMAGE_ROOTFS} to find the unpacked rootfs
310# directory, which if QA passes will be the basis for the images. 330# directory, which if QA passes will be the basis for the images.
331#
332# The functions are expected to call oe.qa.handle_error() to report any
333# problems.
311fakeroot python do_image_qa () { 334fakeroot python do_image_qa () {
312 from oe.utils import ImageQAFailed
313
314 qa_cmds = (d.getVar('IMAGE_QA_COMMANDS') or '').split() 335 qa_cmds = (d.getVar('IMAGE_QA_COMMANDS') or '').split()
315 qamsg = ""
316 336
317 for cmd in qa_cmds: 337 for cmd in qa_cmds:
318 try: 338 bb.build.exec_func(cmd, d)
319 bb.build.exec_func(cmd, d) 339
320 except oe.utils.ImageQAFailed as e: 340 oe.qa.exit_if_errors(d)
321 qamsg = qamsg + '\tImage QA function %s failed: %s\n' % (e.name, e.description)
322 except Exception as e:
323 qamsg = qamsg + '\tImage QA function %s failed: %s\n' % (cmd, e)
324
325 if qamsg:
326 imgname = d.getVar('IMAGE_NAME')
327 bb.fatal("QA errors found whilst validating image: %s\n%s" % (imgname, qamsg))
328} 341}
329addtask do_image_qa after do_rootfs before do_image 342addtask do_image_qa after do_rootfs before do_image
330 343
@@ -507,12 +520,14 @@ python () {
507 d.setVar(task, '\n'.join(cmds)) 520 d.setVar(task, '\n'.join(cmds))
508 d.setVarFlag(task, 'func', '1') 521 d.setVarFlag(task, 'func', '1')
509 d.setVarFlag(task, 'fakeroot', '1') 522 d.setVarFlag(task, 'fakeroot', '1')
523 d.setVarFlag(task, 'imagetype', t)
510 524
511 d.appendVarFlag(task, 'prefuncs', ' ' + debug + ' set_image_size') 525 d.appendVarFlag(task, 'prefuncs', ' ' + debug + ' set_image_size')
512 d.prependVarFlag(task, 'postfuncs', 'create_symlinks ') 526 d.prependVarFlag(task, 'postfuncs', 'create_symlinks ')
513 d.appendVarFlag(task, 'subimages', ' ' + ' '.join(subimages)) 527 d.appendVarFlag(task, 'subimages', ' ' + ' '.join(subimages))
514 d.appendVarFlag(task, 'vardeps', ' ' + ' '.join(vardeps)) 528 d.appendVarFlag(task, 'vardeps', ' ' + ' '.join(vardeps))
515 d.appendVarFlag(task, 'vardepsexclude', ' DATETIME DATE ' + ' '.join(vardepsexclude)) 529 d.appendVarFlag(task, 'vardepsexclude', ' DATETIME DATE ' + ' '.join(vardepsexclude))
530 d.appendVarFlag(task, 'postfuncs', ' write_image_output_manifest')
516 531
517 bb.debug(2, "Adding task %s before %s, after %s" % (task, 'do_image_complete', after)) 532 bb.debug(2, "Adding task %s before %s, after %s" % (task, 'do_image_complete', after))
518 bb.build.addtask(task, 'do_image_complete', after, d) 533 bb.build.addtask(task, 'do_image_complete', after, d)
@@ -610,10 +625,47 @@ python create_symlinks() {
610 bb.note("Skipping symlink, source does not exist: %s -> %s" % (dst, src)) 625 bb.note("Skipping symlink, source does not exist: %s -> %s" % (dst, src))
611} 626}
612 627
628python write_image_output_manifest() {
629 import json
630 from pathlib import Path
631
632 taskname = d.getVar("BB_CURRENTTASK")
633 image_deploy_dir = Path(d.getVar('IMGDEPLOYDIR'))
634 image_manifest_dir = Path(d.getVar('IMGMANIFESTDIR'))
635 manifest_path = image_manifest_dir / ("do_" + d.getVar("BB_CURRENTTASK") + ".json")
636
637 image_name = d.getVar("IMAGE_NAME")
638 image_basename = d.getVar("IMAGE_BASENAME")
639 machine = d.getVar("MACHINE")
640
641 subimages = (d.getVarFlag("do_" + taskname, 'subimages', False) or "").split()
642 imagetype = d.getVarFlag("do_" + taskname, 'imagetype', False)
643
644 data = {
645 "taskname": taskname,
646 "imagetype": imagetype,
647 "images": []
648 }
649
650 for type in subimages:
651 image_filename = image_name + "." + type
652 image_path = image_deploy_dir / image_filename
653 if not image_path.exists():
654 continue
655 data["images"].append({
656 "filename": image_filename,
657 })
658
659 with manifest_path.open("w") as f:
660 json.dump([data], f)
661}
662
613MULTILIBRE_ALLOW_REP += "${base_bindir} ${base_sbindir} ${bindir} ${sbindir} ${libexecdir} ${sysconfdir} ${nonarch_base_libdir}/udev /lib/modules/[^/]*/modules.*" 663MULTILIBRE_ALLOW_REP += "${base_bindir} ${base_sbindir} ${bindir} ${sbindir} ${libexecdir} ${sysconfdir} ${nonarch_base_libdir}/udev /lib/modules/[^/]*/modules.*"
614MULTILIB_CHECK_FILE = "${WORKDIR}/multilib_check.py" 664MULTILIB_CHECK_FILE = "${WORKDIR}/multilib_check.py"
615MULTILIB_TEMP_ROOTFS = "${WORKDIR}/multilib" 665MULTILIB_TEMP_ROOTFS = "${WORKDIR}/multilib"
616 666
667PSEUDO_INCLUDE_PATHS .= ",${MULTILIB_TEMP_ROOTFS}"
668
617do_fetch[noexec] = "1" 669do_fetch[noexec] = "1"
618do_unpack[noexec] = "1" 670do_unpack[noexec] = "1"
619do_patch[noexec] = "1" 671do_patch[noexec] = "1"
@@ -629,37 +681,11 @@ deltask do_package_write_ipk
629deltask do_package_write_deb 681deltask do_package_write_deb
630deltask do_package_write_rpm 682deltask do_package_write_rpm
631 683
632# Prepare the root links to point to the /usr counterparts.
633create_merged_usr_symlinks() {
634 root="$1"
635 install -d $root${base_bindir} $root${base_sbindir} $root${base_libdir}
636 ln -rs $root${base_bindir} $root/bin
637 ln -rs $root${base_sbindir} $root/sbin
638 ln -rs $root${base_libdir} $root/${baselib}
639
640 if [ "${nonarch_base_libdir}" != "${base_libdir}" ]; then
641 install -d $root${nonarch_base_libdir}
642 ln -rs $root${nonarch_base_libdir} $root/lib
643 fi
644
645 # create base links for multilibs
646 multi_libdirs="${@d.getVar('MULTILIB_VARIANTS')}"
647 for d in $multi_libdirs; do
648 install -d $root${exec_prefix}/$d
649 ln -rs $root${exec_prefix}/$d $root/$d
650 done
651}
652
653create_merged_usr_symlinks_rootfs() { 684create_merged_usr_symlinks_rootfs() {
654 create_merged_usr_symlinks ${IMAGE_ROOTFS} 685 create_merged_usr_symlinks ${IMAGE_ROOTFS}
655} 686}
656 687
657create_merged_usr_symlinks_sdk() {
658 create_merged_usr_symlinks ${SDK_OUTPUT}${SDKTARGETSYSROOT}
659}
660
661ROOTFS_PREPROCESS_COMMAND += "${@bb.utils.contains('DISTRO_FEATURES', 'usrmerge', 'create_merged_usr_symlinks_rootfs', '',d)}" 688ROOTFS_PREPROCESS_COMMAND += "${@bb.utils.contains('DISTRO_FEATURES', 'usrmerge', 'create_merged_usr_symlinks_rootfs', '',d)}"
662POPULATE_SDK_PRE_TARGET_COMMAND += "${@bb.utils.contains('DISTRO_FEATURES', 'usrmerge', 'create_merged_usr_symlinks_sdk', '',d)}"
663 689
664reproducible_final_image_task () { 690reproducible_final_image_task () {
665 if [ "$REPRODUCIBLE_TIMESTAMP_ROOTFS" = "" ]; then 691 if [ "$REPRODUCIBLE_TIMESTAMP_ROOTFS" = "" ]; then
@@ -673,12 +699,6 @@ reproducible_final_image_task () {
673 find ${IMAGE_ROOTFS} -print0 | xargs -0 touch -h --date=@$REPRODUCIBLE_TIMESTAMP_ROOTFS 699 find ${IMAGE_ROOTFS} -print0 | xargs -0 touch -h --date=@$REPRODUCIBLE_TIMESTAMP_ROOTFS
674} 700}
675 701
676systemd_preset_all () { 702IMAGE_PREPROCESS_COMMAND:append = " reproducible_final_image_task "
677 if [ -e ${IMAGE_ROOTFS}${root_prefix}/lib/systemd/systemd ]; then
678 systemctl --root="${IMAGE_ROOTFS}" --preset-mode=enable-only preset-all
679 fi
680}
681
682IMAGE_PREPROCESS_COMMAND:append = " ${@ 'systemd_preset_all' if bb.utils.contains('DISTRO_FEATURES', 'systemd', True, False, d) and not bb.utils.contains('IMAGE_FEATURES', 'stateless-rootfs', True, False, d) else ''} reproducible_final_image_task "
683 703
684CVE_PRODUCT = "" 704CVE_PRODUCT = ""
diff --git a/meta/classes-recipe/image_types.bbclass b/meta/classes-recipe/image_types.bbclass
index b4a83ae284..e6ef0ce11e 100644
--- a/meta/classes-recipe/image_types.bbclass
+++ b/meta/classes-recipe/image_types.bbclass
@@ -113,7 +113,7 @@ IMAGE_CMD:btrfs () {
113 113
114oe_mksquashfs () { 114oe_mksquashfs () {
115 local comp=$1; shift 115 local comp=$1; shift
116 local extra_imagecmd=$@ 116 local extra_imagecmd="$@"
117 117
118 if [ "$comp" = "zstd" ]; then 118 if [ "$comp" = "zstd" ]; then
119 suffix="zst" 119 suffix="zst"
@@ -145,7 +145,8 @@ IMAGE_CMD:vfat = "oe_mkvfatfs ${EXTRA_IMAGECMD}"
145 145
146IMAGE_CMD_TAR ?= "tar" 146IMAGE_CMD_TAR ?= "tar"
147# ignore return code 1 "file changed as we read it" as other tasks(e.g. do_image_wic) may be hardlinking rootfs 147# ignore return code 1 "file changed as we read it" as other tasks(e.g. do_image_wic) may be hardlinking rootfs
148IMAGE_CMD:tar = "${IMAGE_CMD_TAR} --sort=name --format=posix --numeric-owner -cf ${IMGDEPLOYDIR}/${IMAGE_NAME}.tar -C ${IMAGE_ROOTFS} . || [ $? -eq 1 ]" 148IMAGE_CMD:tar = "${IMAGE_CMD_TAR} --sort=name --format=posix --pax-option=delete=atime,delete=ctime --numeric-owner -cf ${IMGDEPLOYDIR}/${IMAGE_NAME}.tar -C ${IMAGE_ROOTFS} . || [ $? -eq 1 ]"
149SPDX_IMAGE_PURPOSE:tar = "archive"
149 150
150do_image_cpio[cleandirs] += "${WORKDIR}/cpio_append" 151do_image_cpio[cleandirs] += "${WORKDIR}/cpio_append"
151IMAGE_CMD:cpio () { 152IMAGE_CMD:cpio () {
@@ -167,6 +168,7 @@ IMAGE_CMD:cpio () {
167 fi 168 fi
168 fi 169 fi
169} 170}
171SPDX_IMAGE_PURPOSE:cpio = "archive"
170 172
171UBI_VOLNAME ?= "${MACHINE}-rootfs" 173UBI_VOLNAME ?= "${MACHINE}-rootfs"
172UBI_VOLTYPE ?= "dynamic" 174UBI_VOLTYPE ?= "dynamic"
@@ -281,6 +283,7 @@ EXTRA_IMAGECMD:f2fs ?= ""
281# otherwise mkfs.vfat will automatically pick one. 283# otherwise mkfs.vfat will automatically pick one.
282EXTRA_IMAGECMD:vfat ?= "" 284EXTRA_IMAGECMD:vfat ?= ""
283 285
286do_image_tar[depends] += "tar-replacement-native:do_populate_sysroot"
284do_image_cpio[depends] += "cpio-native:do_populate_sysroot" 287do_image_cpio[depends] += "cpio-native:do_populate_sysroot"
285do_image_jffs2[depends] += "mtd-utils-native:do_populate_sysroot" 288do_image_jffs2[depends] += "mtd-utils-native:do_populate_sysroot"
286do_image_cramfs[depends] += "util-linux-native:do_populate_sysroot" 289do_image_cramfs[depends] += "util-linux-native:do_populate_sysroot"
@@ -335,8 +338,8 @@ CONVERSION_CMD:lzma = "lzma -k -f -7 ${IMAGE_NAME}.${type}"
335CONVERSION_CMD:gz = "gzip -f -9 -n -c --rsyncable ${IMAGE_NAME}.${type} > ${IMAGE_NAME}.${type}.gz" 338CONVERSION_CMD:gz = "gzip -f -9 -n -c --rsyncable ${IMAGE_NAME}.${type} > ${IMAGE_NAME}.${type}.gz"
336CONVERSION_CMD:bz2 = "pbzip2 -f -k ${IMAGE_NAME}.${type}" 339CONVERSION_CMD:bz2 = "pbzip2 -f -k ${IMAGE_NAME}.${type}"
337CONVERSION_CMD:xz = "xz -f -k -c ${XZ_COMPRESSION_LEVEL} ${XZ_DEFAULTS} --check=${XZ_INTEGRITY_CHECK} ${IMAGE_NAME}.${type} > ${IMAGE_NAME}.${type}.xz" 340CONVERSION_CMD:xz = "xz -f -k -c ${XZ_COMPRESSION_LEVEL} ${XZ_DEFAULTS} --check=${XZ_INTEGRITY_CHECK} ${IMAGE_NAME}.${type} > ${IMAGE_NAME}.${type}.xz"
338CONVERSION_CMD:lz4 = "lz4 -9 -z -l ${IMAGE_NAME}.${type} ${IMAGE_NAME}.${type}.lz4" 341CONVERSION_CMD:lz4 = "lz4 -f -9 -z -l ${IMAGE_NAME}.${type} ${IMAGE_NAME}.${type}.lz4"
339CONVERSION_CMD:lzo = "lzop -9 ${IMAGE_NAME}.${type}" 342CONVERSION_CMD:lzo = "lzop -f -9 ${IMAGE_NAME}.${type}"
340CONVERSION_CMD:zip = "zip ${ZIP_COMPRESSION_LEVEL} ${IMAGE_NAME}.${type}.zip ${IMAGE_NAME}.${type}" 343CONVERSION_CMD:zip = "zip ${ZIP_COMPRESSION_LEVEL} ${IMAGE_NAME}.${type}.zip ${IMAGE_NAME}.${type}"
341CONVERSION_CMD:7zip = "7za a -mx=${7ZIP_COMPRESSION_LEVEL} -mm=${7ZIP_COMPRESSION_METHOD} ${IMAGE_NAME}.${type}.${7ZIP_EXTENSION} ${IMAGE_NAME}.${type}" 344CONVERSION_CMD:7zip = "7za a -mx=${7ZIP_COMPRESSION_LEVEL} -mm=${7ZIP_COMPRESSION_METHOD} ${IMAGE_NAME}.${type}.${7ZIP_EXTENSION} ${IMAGE_NAME}.${type}"
342CONVERSION_CMD:zst = "zstd -f -k -c ${ZSTD_DEFAULTS} ${IMAGE_NAME}.${type} > ${IMAGE_NAME}.${type}.zst" 345CONVERSION_CMD:zst = "zstd -f -k -c ${ZSTD_DEFAULTS} ${IMAGE_NAME}.${type} > ${IMAGE_NAME}.${type}.zst"
@@ -364,7 +367,7 @@ CONVERSION_DEPENDS_xz = "xz-native"
364CONVERSION_DEPENDS_lz4 = "lz4-native" 367CONVERSION_DEPENDS_lz4 = "lz4-native"
365CONVERSION_DEPENDS_lzo = "lzop-native" 368CONVERSION_DEPENDS_lzo = "lzop-native"
366CONVERSION_DEPENDS_zip = "zip-native" 369CONVERSION_DEPENDS_zip = "zip-native"
367CONVERSION_DEPENDS_7zip = "p7zip-native" 370CONVERSION_DEPENDS_7zip = "7zip-native"
368CONVERSION_DEPENDS_zst = "zstd-native" 371CONVERSION_DEPENDS_zst = "zstd-native"
369CONVERSION_DEPENDS_sum = "mtd-utils-native" 372CONVERSION_DEPENDS_sum = "mtd-utils-native"
370CONVERSION_DEPENDS_bmap = "bmaptool-native" 373CONVERSION_DEPENDS_bmap = "bmaptool-native"
@@ -389,3 +392,5 @@ IMAGE_TYPES_MASKED ?= ""
389 392
390# bmap requires python3 to be in the PATH 393# bmap requires python3 to be in the PATH
391EXTRANATIVEPATH += "${@'python3-native' if d.getVar('IMAGE_FSTYPES').find('.bmap') else ''}" 394EXTRANATIVEPATH += "${@'python3-native' if d.getVar('IMAGE_FSTYPES').find('.bmap') else ''}"
395# reproducible tar requires our tar, not the host's
396EXTRANATIVEPATH += "${@'tar-native' if 'tar' in d.getVar('IMAGE_FSTYPES') else ''}"
diff --git a/meta/classes-recipe/image_types_wic.bbclass b/meta/classes-recipe/image_types_wic.bbclass
index cf3be909b3..6180874a4c 100644
--- a/meta/classes-recipe/image_types_wic.bbclass
+++ b/meta/classes-recipe/image_types_wic.bbclass
@@ -15,6 +15,7 @@ WICVARS ?= "\
15 HOSTTOOLS_DIR \ 15 HOSTTOOLS_DIR \
16 IMAGE_BASENAME \ 16 IMAGE_BASENAME \
17 IMAGE_BOOT_FILES \ 17 IMAGE_BOOT_FILES \
18 IMAGE_CLASSES \
18 IMAGE_EFI_BOOT_FILES \ 19 IMAGE_EFI_BOOT_FILES \
19 IMAGE_LINK_NAME \ 20 IMAGE_LINK_NAME \
20 IMAGE_ROOTFS \ 21 IMAGE_ROOTFS \
@@ -26,9 +27,10 @@ WICVARS ?= "\
26 INITRD \ 27 INITRD \
27 INITRD_LIVE \ 28 INITRD_LIVE \
28 ISODIR \ 29 ISODIR \
30 KERNEL_CONSOLE \
29 KERNEL_IMAGETYPE \ 31 KERNEL_IMAGETYPE \
30 MACHINE \ 32 MACHINE \
31 PSEUDO_IGNORE_PATHS \ 33 PSEUDO_INCLUDE_PATHS \
32 RECIPE_SYSROOT_NATIVE \ 34 RECIPE_SYSROOT_NATIVE \
33 ROOTFS_SIZE \ 35 ROOTFS_SIZE \
34 STAGING_DATADIR \ 36 STAGING_DATADIR \
@@ -55,6 +57,16 @@ def wks_search(files, search_path):
55 if searched: 57 if searched:
56 return searched 58 return searched
57 59
60def wks_checksums(files, search_path):
61 ret = ""
62 for f in files:
63 found, hist = bb.utils.which(search_path, f, history=True)
64 ret = ret + " " + " ".join(h + ":False" for h in hist[:-1])
65 if found:
66 ret = ret + " " + found + ":True"
67 return ret
68
69
58WIC_CREATE_EXTRA_ARGS ?= "" 70WIC_CREATE_EXTRA_ARGS ?= ""
59 71
60IMAGE_CMD:wic () { 72IMAGE_CMD:wic () {
@@ -70,7 +82,7 @@ IMAGE_CMD:wic () {
70 if [ -z "$wks" ]; then 82 if [ -z "$wks" ]; then
71 bbfatal "No kickstart files from WKS_FILES were found: ${WKS_FILES}. Please set WKS_FILE or WKS_FILES appropriately." 83 bbfatal "No kickstart files from WKS_FILES were found: ${WKS_FILES}. Please set WKS_FILE or WKS_FILES appropriately."
72 fi 84 fi
73 BUILDDIR="${TOPDIR}" PSEUDO_UNLOAD=1 wic create "$wks" --vars "${STAGING_DIR}/${MACHINE}/imgdata/" -e "${IMAGE_BASENAME}" -o "$build_wic/" -w "$tmp_wic" ${WIC_CREATE_EXTRA_ARGS} 85 BUILDDIR="${TOPDIR}" PSEUDO_UNLOAD=1 wic create --debug "$wks" --vars "${STAGING_DIR}/${MACHINE}/imgdata/" -e "${IMAGE_BASENAME}" -o "$build_wic/" -w "$tmp_wic" ${WIC_CREATE_EXTRA_ARGS}
74 86
75 # look to see if the user specifies a custom imager 87 # look to see if the user specifies a custom imager
76 IMAGER=direct 88 IMAGER=direct
@@ -91,13 +103,12 @@ IMAGE_CMD:wic () {
91 mv "$build_wic/$(basename "${wks%.wks}")"*.${IMAGER} "$out.wic" 103 mv "$build_wic/$(basename "${wks%.wks}")"*.${IMAGER} "$out.wic"
92} 104}
93IMAGE_CMD:wic[vardepsexclude] = "WKS_FULL_PATH WKS_FILES TOPDIR" 105IMAGE_CMD:wic[vardepsexclude] = "WKS_FULL_PATH WKS_FILES TOPDIR"
106SPDX_IMAGE_PURPOSE:wic = "diskImage"
94do_image_wic[cleandirs] = "${WORKDIR}/build-wic" 107do_image_wic[cleandirs] = "${WORKDIR}/build-wic"
95 108
96PSEUDO_IGNORE_PATHS .= ",${WORKDIR}/build-wic"
97
98# Rebuild when the wks file or vars in WICVARS change 109# Rebuild when the wks file or vars in WICVARS change
99USING_WIC = "${@bb.utils.contains_any('IMAGE_FSTYPES', 'wic ' + ' '.join('wic.%s' % c for c in '${CONVERSIONTYPES}'.split()), '1', '', d)}" 110USING_WIC = "${@bb.utils.contains_any('IMAGE_FSTYPES', 'wic ' + ' '.join('wic.%s' % c for c in '${CONVERSIONTYPES}'.split()), '1', '', d)}"
100WKS_FILE_CHECKSUM = "${@'${WKS_FULL_PATH}:%s' % os.path.exists('${WKS_FULL_PATH}') if '${USING_WIC}' else ''}" 111WKS_FILE_CHECKSUM = "${@wks_checksums(d.getVar('WKS_FILES').split(), d.getVar('WKS_SEARCH_PATH')) if '${USING_WIC}' else ''}"
101do_image_wic[file-checksums] += "${WKS_FILE_CHECKSUM}" 112do_image_wic[file-checksums] += "${WKS_FILE_CHECKSUM}"
102do_image_wic[depends] += "${@' '.join('%s-native:do_populate_sysroot' % r for r in ('parted', 'gptfdisk', 'dosfstools', 'mtools'))}" 113do_image_wic[depends] += "${@' '.join('%s-native:do_populate_sysroot' % r for r in ('parted', 'gptfdisk', 'dosfstools', 'mtools'))}"
103 114
@@ -108,10 +119,12 @@ do_image_wic[deptask] += "do_image_complete"
108WKS_FILE_DEPENDS_DEFAULT = '${@bb.utils.contains_any("BUILD_ARCH", [ 'x86_64', 'i686' ], "syslinux-native", "",d)}' 119WKS_FILE_DEPENDS_DEFAULT = '${@bb.utils.contains_any("BUILD_ARCH", [ 'x86_64', 'i686' ], "syslinux-native", "",d)}'
109WKS_FILE_DEPENDS_DEFAULT += "bmaptool-native cdrtools-native btrfs-tools-native squashfs-tools-native e2fsprogs-native erofs-utils-native" 120WKS_FILE_DEPENDS_DEFAULT += "bmaptool-native cdrtools-native btrfs-tools-native squashfs-tools-native e2fsprogs-native erofs-utils-native"
110# Unified kernel images need objcopy 121# Unified kernel images need objcopy
111WKS_FILE_DEPENDS_DEFAULT += "virtual/${TARGET_PREFIX}binutils" 122WKS_FILE_DEPENDS_DEFAULT += "virtual/cross-binutils"
112WKS_FILE_DEPENDS_BOOTLOADERS = "" 123WKS_FILE_DEPENDS_BOOTLOADERS = ""
113WKS_FILE_DEPENDS_BOOTLOADERS:x86 = "syslinux grub-efi systemd-boot os-release" 124WKS_FILE_DEPENDS_BOOTLOADERS:aarch64 = "grub-efi systemd-boot"
114WKS_FILE_DEPENDS_BOOTLOADERS:x86-64 = "syslinux grub-efi systemd-boot os-release" 125WKS_FILE_DEPENDS_BOOTLOADERS:arm = "systemd-boot"
126WKS_FILE_DEPENDS_BOOTLOADERS:x86 = "syslinux grub-efi systemd-boot"
127WKS_FILE_DEPENDS_BOOTLOADERS:x86-64 = "syslinux grub-efi systemd-boot"
115WKS_FILE_DEPENDS_BOOTLOADERS:x86-x32 = "syslinux grub-efi" 128WKS_FILE_DEPENDS_BOOTLOADERS:x86-x32 = "syslinux grub-efi"
116 129
117WKS_FILE_DEPENDS ??= "${WKS_FILE_DEPENDS_DEFAULT} ${WKS_FILE_DEPENDS_BOOTLOADERS}" 130WKS_FILE_DEPENDS ??= "${WKS_FILE_DEPENDS_DEFAULT} ${WKS_FILE_DEPENDS_BOOTLOADERS}"
diff --git a/meta/classes-recipe/kernel-arch.bbclass b/meta/classes-recipe/kernel-arch.bbclass
index b32f6137a2..7aea9cd3e8 100644
--- a/meta/classes-recipe/kernel-arch.bbclass
+++ b/meta/classes-recipe/kernel-arch.bbclass
@@ -71,10 +71,14 @@ HOST_AR_KERNEL_ARCH ?= "${TARGET_AR_KERNEL_ARCH}"
71TARGET_OBJCOPY_KERNEL_ARCH ?= "" 71TARGET_OBJCOPY_KERNEL_ARCH ?= ""
72HOST_OBJCOPY_KERNEL_ARCH ?= "${TARGET_OBJCOPY_KERNEL_ARCH}" 72HOST_OBJCOPY_KERNEL_ARCH ?= "${TARGET_OBJCOPY_KERNEL_ARCH}"
73 73
74KERNEL_CC = "${CCACHE}${HOST_PREFIX}gcc ${HOST_CC_KERNEL_ARCH} -fuse-ld=bfd ${DEBUG_PREFIX_MAP} -fdebug-prefix-map=${STAGING_KERNEL_DIR}=${KERNEL_SRC_PATH} -fdebug-prefix-map=${STAGING_KERNEL_BUILDDIR}=${KERNEL_SRC_PATH}" 74KERNEL_CC = "${CCACHE}${HOST_PREFIX}gcc ${HOST_CC_KERNEL_ARCH} \
75 -fuse-ld=bfd ${DEBUG_PREFIX_MAP} \
76 -ffile-prefix-map=${STAGING_KERNEL_DIR}=${KERNEL_SRC_PATH} \
77 -ffile-prefix-map=${STAGING_KERNEL_BUILDDIR}=${KERNEL_SRC_PATH} \
78"
75KERNEL_LD = "${HOST_PREFIX}ld.bfd ${HOST_LD_KERNEL_ARCH}" 79KERNEL_LD = "${HOST_PREFIX}ld.bfd ${HOST_LD_KERNEL_ARCH}"
76KERNEL_AR = "${HOST_PREFIX}ar ${HOST_AR_KERNEL_ARCH}" 80KERNEL_AR = "${HOST_PREFIX}ar ${HOST_AR_KERNEL_ARCH}"
77KERNEL_OBJCOPY = "${HOST_PREFIX}objcopy ${HOST_OBJCOPY_KERNEL_ARCH}" 81KERNEL_OBJCOPY = "${HOST_PREFIX}objcopy ${HOST_OBJCOPY_KERNEL_ARCH}"
78# Code in package.py can't handle options on KERNEL_STRIP 82# Code in package.py can't handle options on KERNEL_STRIP
79KERNEL_STRIP = "${HOST_PREFIX}strip" 83KERNEL_STRIP = "${HOST_PREFIX}strip"
80TOOLCHAIN ?= "gcc" 84TOOLCHAIN = "gcc"
diff --git a/meta/classes-recipe/kernel-fit-extra-artifacts.bbclass b/meta/classes-recipe/kernel-fit-extra-artifacts.bbclass
new file mode 100644
index 0000000000..385fe9895a
--- /dev/null
+++ b/meta/classes-recipe/kernel-fit-extra-artifacts.bbclass
@@ -0,0 +1,19 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# Generate and deploy additional artifacts required for FIT image creation.
8# To use this class, add it to the KERNEL_CLASSES variable.
9
10inherit kernel-uboot
11
12kernel_do_deploy:append() {
13 # Provide the kernel artifacts to post processing recipes e.g. for creating a FIT image
14 uboot_prep_kimage "$deployDir"
15 # For x86 a setup.bin needs to be include"d in a fitImage as well
16 if [ -e ${KERNEL_OUTPUT_DIR}/setup.bin ]; then
17 install -D "${B}/${KERNEL_OUTPUT_DIR}/setup.bin" "$deployDir/"
18 fi
19}
diff --git a/meta/classes-recipe/kernel-fit-image.bbclass b/meta/classes-recipe/kernel-fit-image.bbclass
new file mode 100644
index 0000000000..39845997ed
--- /dev/null
+++ b/meta/classes-recipe/kernel-fit-image.bbclass
@@ -0,0 +1,189 @@
1
2inherit kernel-arch kernel-artifact-names uboot-config deploy
3require conf/image-fitimage.conf
4
5S = "${UNPACKDIR}"
6
7PACKAGE_ARCH = "${MACHINE_ARCH}"
8
9# This bbclass requires KERNEL_CLASSES += "kernel-fit-extra-artifacts"
10EXCLUDE_FROM_WORLD = "1"
11
12DEPENDS += "\
13 u-boot-tools-native dtc-native \
14 ${@'kernel-signing-keys-native' if d.getVar('FIT_GENERATE_KEYS') == '1' else ''} \
15"
16
17python () {
18 image = d.getVar('INITRAMFS_IMAGE')
19 if image and d.getVar('INITRAMFS_IMAGE_BUNDLE') != '1':
20 if d.getVar('INITRAMFS_MULTICONFIG'):
21 mc = d.getVar('BB_CURRENT_MC')
22 d.appendVarFlag('do_compile', 'mcdepends', ' mc:' + mc + ':${INITRAMFS_MULTICONFIG}:${INITRAMFS_IMAGE}:do_image_complete')
23 else:
24 d.appendVarFlag('do_compile', 'depends', ' ${INITRAMFS_IMAGE}:do_image_complete')
25
26 #check if there are any dtb providers
27 providerdtb = d.getVar("PREFERRED_PROVIDER_virtual/dtb")
28 if providerdtb:
29 d.appendVarFlag('do_compile', 'depends', ' virtual/dtb:do_populate_sysroot')
30 d.setVar('EXTERNAL_KERNEL_DEVICETREE', "${RECIPE_SYSROOT}/boot/devicetree")
31}
32
33do_configure[noexec] = "1"
34
35UBOOT_MKIMAGE_KERNEL_TYPE ?= "kernel"
36KERNEL_IMAGEDEST ?= "/boot"
37
38python do_compile() {
39 import shutil
40 import oe.fitimage
41
42 itsfile = "fit-image.its"
43 fitname = "fitImage"
44 kernel_deploydir = d.getVar('DEPLOY_DIR_IMAGE')
45 kernel_deploysubdir = d.getVar('KERNEL_DEPLOYSUBDIR')
46 if kernel_deploysubdir:
47 kernel_deploydir = os.path.join(kernel_deploydir, kernel_deploysubdir)
48
49 # Collect all the its nodes before the its file is generated and mkimage gets executed
50 root_node = oe.fitimage.ItsNodeRootKernel(
51 d.getVar("FIT_DESC"), d.getVar("FIT_ADDRESS_CELLS"),
52 d.getVar('HOST_PREFIX'), d.getVar('UBOOT_ARCH'), d.getVar("FIT_CONF_PREFIX"),
53 oe.types.boolean(d.getVar('UBOOT_SIGN_ENABLE')), d.getVar("UBOOT_SIGN_KEYDIR"),
54 d.getVar("UBOOT_MKIMAGE"), d.getVar("UBOOT_MKIMAGE_DTCOPTS"),
55 d.getVar("UBOOT_MKIMAGE_SIGN"), d.getVar("UBOOT_MKIMAGE_SIGN_ARGS"),
56 d.getVar('FIT_HASH_ALG'), d.getVar('FIT_SIGN_ALG'), d.getVar('FIT_PAD_ALG'),
57 d.getVar('UBOOT_SIGN_KEYNAME'),
58 oe.types.boolean(d.getVar('FIT_SIGN_INDIVIDUAL')), d.getVar('UBOOT_SIGN_IMG_KEYNAME')
59 )
60
61 # Prepare a kernel image section.
62 shutil.copyfile(os.path.join(kernel_deploydir, "linux.bin"), "linux.bin")
63 with open(os.path.join(kernel_deploydir, "linux_comp")) as linux_comp_f:
64 linux_comp = linux_comp_f.read()
65 root_node.fitimage_emit_section_kernel("kernel-1", "linux.bin", linux_comp,
66 d.getVar('UBOOT_LOADADDRESS'), d.getVar('UBOOT_ENTRYPOINT'),
67 d.getVar('UBOOT_MKIMAGE_KERNEL_TYPE'), d.getVar("UBOOT_ENTRYSYMBOL"))
68
69 # Prepare a DTB image section
70 kernel_devicetree = d.getVar('KERNEL_DEVICETREE')
71 external_kernel_devicetree = d.getVar("EXTERNAL_KERNEL_DEVICETREE")
72 if kernel_devicetree:
73 for dtb in kernel_devicetree.split():
74 # In deploy_dir the DTBs are without sub-directories also with KERNEL_DTBVENDORED = "1"
75 dtb_name = os.path.basename(dtb)
76
77 # Skip DTB if it's also provided in EXTERNAL_KERNEL_DEVICETREE directory
78 if external_kernel_devicetree:
79 ext_dtb_path = os.path.join(external_kernel_devicetree, dtb_name)
80 if os.path.exists(ext_dtb_path) and os.path.getsize(ext_dtb_path) > 0:
81 continue
82
83 # Copy the dtb or dtbo file into the FIT image assembly directory
84 shutil.copyfile(os.path.join(kernel_deploydir, dtb_name), dtb_name)
85 root_node.fitimage_emit_section_dtb(dtb_name, dtb_name,
86 d.getVar("UBOOT_DTB_LOADADDRESS"), d.getVar("UBOOT_DTBO_LOADADDRESS"))
87
88 if external_kernel_devicetree:
89 # iterate over all .dtb and .dtbo files in the external kernel devicetree directory
90 # and copy them to the FIT image assembly directory
91 for dtb_name in sorted(os.listdir(external_kernel_devicetree)):
92 if dtb_name.endswith('.dtb') or dtb_name.endswith('.dtbo'):
93 dtb_path = os.path.join(external_kernel_devicetree, dtb_name)
94
95 # For symlinks, add a configuration node that refers to the DTB image node to which the symlink points
96 symlink_target = oe.fitimage.symlink_points_below(dtb_name, external_kernel_devicetree)
97 if symlink_target:
98 root_node.fitimage_emit_section_dtb_alias(dtb_name, symlink_target, True)
99 # For real DTB files add an image node and a configuration node
100 else:
101 shutil.copyfile(dtb_path, dtb_name)
102 root_node.fitimage_emit_section_dtb(dtb_name, dtb_name,
103 d.getVar("UBOOT_DTB_LOADADDRESS"), d.getVar("UBOOT_DTBO_LOADADDRESS"), True)
104
105 # Prepare a u-boot script section
106 fit_uboot_env = d.getVar("FIT_UBOOT_ENV")
107 if fit_uboot_env:
108 root_node.fitimage_emit_section_boot_script("bootscr-"+fit_uboot_env , fit_uboot_env)
109
110 # Prepare a setup section (For x86)
111 setup_bin_path = os.path.join(kernel_deploydir, "setup.bin")
112 if os.path.exists(setup_bin_path):
113 shutil.copyfile(setup_bin_path, "setup.bin")
114 root_node.fitimage_emit_section_setup("setup-1", "setup.bin")
115
116 # Prepare a ramdisk section.
117 initramfs_image = d.getVar('INITRAMFS_IMAGE')
118 if initramfs_image and d.getVar("INITRAMFS_IMAGE_BUNDLE") != '1':
119 # Find and use the first initramfs image archive type we find
120 found = False
121 for img in d.getVar("FIT_SUPPORTED_INITRAMFS_FSTYPES").split():
122 initramfs_path = os.path.join(d.getVar("DEPLOY_DIR_IMAGE"), "%s.%s" % (d.getVar('INITRAMFS_IMAGE_NAME'), img))
123 if os.path.exists(initramfs_path):
124 bb.note("Found initramfs image: " + initramfs_path)
125 found = True
126 root_node.fitimage_emit_section_ramdisk("ramdisk-1", initramfs_path,
127 initramfs_image,
128 d.getVar("UBOOT_RD_LOADADDRESS"),
129 d.getVar("UBOOT_RD_ENTRYPOINT"))
130 break
131 else:
132 bb.note("Did not find initramfs image: " + initramfs_path)
133
134 if not found:
135 bb.fatal("Could not find a valid initramfs type for %s, the supported types are: %s" % (d.getVar('INITRAMFS_IMAGE_NAME'), d.getVar('FIT_SUPPORTED_INITRAMFS_FSTYPES')))
136
137 # Generate the configuration section
138 root_node.fitimage_emit_section_config(d.getVar("FIT_CONF_DEFAULT_DTB"))
139
140 # Write the its file
141 root_node.write_its_file(itsfile)
142
143 # Assemble the FIT image
144 root_node.run_mkimage_assemble(itsfile, fitname)
145
146 # Sign the FIT image if required
147 root_node.run_mkimage_sign(fitname)
148}
149do_compile[depends] += "virtual/kernel:do_deploy"
150
151do_install() {
152 install -d "${D}/${KERNEL_IMAGEDEST}"
153 install -m 0644 "${B}/fitImage" "${D}/${KERNEL_IMAGEDEST}/fitImage"
154}
155
156FILES:${PN} = "${KERNEL_IMAGEDEST}"
157
158
159do_deploy() {
160 deploy_dir="${DEPLOYDIR}"
161 if [ -n "${KERNEL_DEPLOYSUBDIR}" ]; then
162 deploy_dir="${DEPLOYDIR}/${KERNEL_DEPLOYSUBDIR}"
163 fi
164 install -d "$deploy_dir"
165 install -m 0644 "${B}/fitImage" "$deploy_dir/fitImage"
166 install -m 0644 "${B}/fit-image.its" "$deploy_dir/fit-image.its"
167
168 if [ "${INITRAMFS_IMAGE_BUNDLE}" != "1" ]; then
169 ln -snf fit-image.its "$deploy_dir/fitImage-its-${KERNEL_FIT_NAME}.its"
170 if [ -n "${KERNEL_FIT_LINK_NAME}" ] ; then
171 ln -snf fit-image.its "$deploy_dir/fitImage-its-${KERNEL_FIT_LINK_NAME}"
172 fi
173 fi
174
175 if [ -n "${INITRAMFS_IMAGE}" ]; then
176 ln -snf fit-image-its "$deploy_dir/fitImage-its-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.its"
177 if [ -n "${KERNEL_FIT_LINK_NAME}" ]; then
178 ln -snf fit-image.its "$deploy_dir/fitImage-its-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_LINK_NAME}"
179 fi
180
181 if [ "${INITRAMFS_IMAGE_BUNDLE}" != "1" ]; then
182 ln -snf fitImage "$deploy_dir/fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}${KERNEL_FIT_BIN_EXT}"
183 if [ -n "${KERNEL_FIT_LINK_NAME}" ] ; then
184 ln -snf fitImage "$deploy_dir/fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_LINK_NAME}"
185 fi
186 fi
187 fi
188}
189addtask deploy after do_compile before do_build
diff --git a/meta/classes-recipe/kernel-fitimage.bbclass b/meta/classes-recipe/kernel-fitimage.bbclass
deleted file mode 100644
index 4b74ddc201..0000000000
--- a/meta/classes-recipe/kernel-fitimage.bbclass
+++ /dev/null
@@ -1,881 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7inherit kernel-uboot kernel-artifact-names uboot-config
8
9def get_fit_replacement_type(d):
10 kerneltypes = d.getVar('KERNEL_IMAGETYPES') or ""
11 replacementtype = ""
12 if 'fitImage' in kerneltypes.split():
13 uarch = d.getVar("UBOOT_ARCH")
14 if uarch == "arm64":
15 replacementtype = "Image"
16 elif uarch == "riscv":
17 replacementtype = "Image"
18 elif uarch == "mips":
19 replacementtype = "vmlinuz.bin"
20 elif uarch == "x86":
21 replacementtype = "bzImage"
22 elif uarch == "microblaze":
23 replacementtype = "linux.bin"
24 else:
25 replacementtype = "zImage"
26 return replacementtype
27
28KERNEL_IMAGETYPE_REPLACEMENT ?= "${@get_fit_replacement_type(d)}"
29DEPENDS:append = " ${@'u-boot-tools-native dtc-native' if 'fitImage' in (d.getVar('KERNEL_IMAGETYPES') or '').split() else ''}"
30
31python __anonymous () {
32 # Override KERNEL_IMAGETYPE_FOR_MAKE variable, which is internal
33 # to kernel.bbclass . We have to override it, since we pack zImage
34 # (at least for now) into the fitImage .
35 typeformake = d.getVar("KERNEL_IMAGETYPE_FOR_MAKE") or ""
36 if 'fitImage' in typeformake.split():
37 d.setVar('KERNEL_IMAGETYPE_FOR_MAKE', typeformake.replace('fitImage', d.getVar('KERNEL_IMAGETYPE_REPLACEMENT')))
38
39 image = d.getVar('INITRAMFS_IMAGE')
40 if image:
41 d.appendVarFlag('do_assemble_fitimage_initramfs', 'depends', ' ${INITRAMFS_IMAGE}:do_image_complete')
42
43 ubootenv = d.getVar('UBOOT_ENV')
44 if ubootenv:
45 d.appendVarFlag('do_assemble_fitimage', 'depends', ' virtual/bootloader:do_populate_sysroot')
46
47 #check if there are any dtb providers
48 providerdtb = d.getVar("PREFERRED_PROVIDER_virtual/dtb")
49 if providerdtb:
50 d.appendVarFlag('do_assemble_fitimage', 'depends', ' virtual/dtb:do_populate_sysroot')
51 d.appendVarFlag('do_assemble_fitimage_initramfs', 'depends', ' virtual/dtb:do_populate_sysroot')
52 d.setVar('EXTERNAL_KERNEL_DEVICETREE', "${RECIPE_SYSROOT}/boot/devicetree")
53}
54
55
56# Description string
57FIT_DESC ?= "Kernel fitImage for ${DISTRO_NAME}/${PV}/${MACHINE}"
58
59# Kernel fitImage Hash Algo
60FIT_HASH_ALG ?= "sha256"
61
62# Kernel fitImage Signature Algo
63FIT_SIGN_ALG ?= "rsa2048"
64
65# Kernel / U-Boot fitImage Padding Algo
66FIT_PAD_ALG ?= "pkcs-1.5"
67
68# Generate keys for signing Kernel fitImage
69FIT_GENERATE_KEYS ?= "0"
70
71# Size of private keys in number of bits
72FIT_SIGN_NUMBITS ?= "2048"
73
74# args to openssl genrsa (Default is just the public exponent)
75FIT_KEY_GENRSA_ARGS ?= "-F4"
76
77# args to openssl req (Default is -batch for non interactive mode and
78# -new for new certificate)
79FIT_KEY_REQ_ARGS ?= "-batch -new"
80
81# Standard format for public key certificate
82FIT_KEY_SIGN_PKCS ?= "-x509"
83
84# Sign individual images as well
85FIT_SIGN_INDIVIDUAL ?= "0"
86
87FIT_CONF_PREFIX ?= "conf-"
88FIT_CONF_PREFIX[doc] = "Prefix to use for FIT configuration node name"
89
90FIT_SUPPORTED_INITRAMFS_FSTYPES ?= "cpio.lz4 cpio.lzo cpio.lzma cpio.xz cpio.zst cpio.gz ext2.gz cpio"
91
92# Allow user to select the default DTB for FIT image when multiple dtb's exists.
93FIT_CONF_DEFAULT_DTB ?= ""
94
95# length of address in number of <u32> cells
96# ex: 1 32bits address, 2 64bits address
97FIT_ADDRESS_CELLS ?= "1"
98
99# Keys used to sign individually image nodes.
100# The keys to sign image nodes must be different from those used to sign
101# configuration nodes, otherwise the "required" property, from
102# UBOOT_DTB_BINARY, will be set to "conf", because "conf" prevails on "image".
103# Then the images signature checking will not be mandatory and no error will be
104# raised in case of failure.
105# UBOOT_SIGN_IMG_KEYNAME = "dev2" # keys name in keydir (eg. "dev2.crt", "dev2.key")
106
107#
108# Emit the fitImage ITS header
109#
110# $1 ... .its filename
111fitimage_emit_fit_header() {
112 cat << EOF >> $1
113/dts-v1/;
114
115/ {
116 description = "${FIT_DESC}";
117 #address-cells = <${FIT_ADDRESS_CELLS}>;
118EOF
119}
120
121#
122# Emit the fitImage section bits
123#
124# $1 ... .its filename
125# $2 ... Section bit type: imagestart - image section start
126# confstart - configuration section start
127# sectend - section end
128# fitend - fitimage end
129#
130fitimage_emit_section_maint() {
131 case $2 in
132 imagestart)
133 cat << EOF >> $1
134
135 images {
136EOF
137 ;;
138 confstart)
139 cat << EOF >> $1
140
141 configurations {
142EOF
143 ;;
144 sectend)
145 cat << EOF >> $1
146 };
147EOF
148 ;;
149 fitend)
150 cat << EOF >> $1
151};
152EOF
153 ;;
154 esac
155}
156
157#
158# Emit the fitImage ITS kernel section
159#
160# $1 ... .its filename
161# $2 ... Image counter
162# $3 ... Path to kernel image
163# $4 ... Compression type
164fitimage_emit_section_kernel() {
165
166 kernel_csum="${FIT_HASH_ALG}"
167 kernel_sign_algo="${FIT_SIGN_ALG}"
168 kernel_sign_keyname="${UBOOT_SIGN_IMG_KEYNAME}"
169
170 ENTRYPOINT="${UBOOT_ENTRYPOINT}"
171 if [ -n "${UBOOT_ENTRYSYMBOL}" ]; then
172 ENTRYPOINT=`${HOST_PREFIX}nm vmlinux | \
173 awk '$3=="${UBOOT_ENTRYSYMBOL}" {print "0x"$1;exit}'`
174 fi
175
176 cat << EOF >> $1
177 kernel-$2 {
178 description = "Linux kernel";
179 data = /incbin/("$3");
180 type = "${UBOOT_MKIMAGE_KERNEL_TYPE}";
181 arch = "${UBOOT_ARCH}";
182 os = "linux";
183 compression = "$4";
184 load = <${UBOOT_LOADADDRESS}>;
185 entry = <$ENTRYPOINT>;
186 hash-1 {
187 algo = "$kernel_csum";
188 };
189 };
190EOF
191
192 if [ "${UBOOT_SIGN_ENABLE}" = "1" -a "${FIT_SIGN_INDIVIDUAL}" = "1" -a -n "$kernel_sign_keyname" ] ; then
193 sed -i '$ d' $1
194 cat << EOF >> $1
195 signature-1 {
196 algo = "$kernel_csum,$kernel_sign_algo";
197 key-name-hint = "$kernel_sign_keyname";
198 };
199 };
200EOF
201 fi
202}
203
204#
205# Emit the fitImage ITS DTB section
206#
207# $1 ... .its filename
208# $2 ... Image counter
209# $3 ... Path to DTB image
210fitimage_emit_section_dtb() {
211
212 dtb_csum="${FIT_HASH_ALG}"
213 dtb_sign_algo="${FIT_SIGN_ALG}"
214 dtb_sign_keyname="${UBOOT_SIGN_IMG_KEYNAME}"
215
216 dtb_loadline=""
217 dtb_ext=${DTB##*.}
218 if [ "${dtb_ext}" = "dtbo" ]; then
219 if [ -n "${UBOOT_DTBO_LOADADDRESS}" ]; then
220 dtb_loadline="load = <${UBOOT_DTBO_LOADADDRESS}>;"
221 fi
222 elif [ -n "${UBOOT_DTB_LOADADDRESS}" ]; then
223 dtb_loadline="load = <${UBOOT_DTB_LOADADDRESS}>;"
224 fi
225 cat << EOF >> $1
226 fdt-$2 {
227 description = "Flattened Device Tree blob";
228 data = /incbin/("$3");
229 type = "flat_dt";
230 arch = "${UBOOT_ARCH}";
231 compression = "none";
232 $dtb_loadline
233 hash-1 {
234 algo = "$dtb_csum";
235 };
236 };
237EOF
238
239 if [ "${UBOOT_SIGN_ENABLE}" = "1" -a "${FIT_SIGN_INDIVIDUAL}" = "1" -a -n "$dtb_sign_keyname" ] ; then
240 sed -i '$ d' $1
241 cat << EOF >> $1
242 signature-1 {
243 algo = "$dtb_csum,$dtb_sign_algo";
244 key-name-hint = "$dtb_sign_keyname";
245 };
246 };
247EOF
248 fi
249}
250
251#
252# Emit the fitImage ITS u-boot script section
253#
254# $1 ... .its filename
255# $2 ... Image counter
256# $3 ... Path to boot script image
257fitimage_emit_section_boot_script() {
258
259 bootscr_csum="${FIT_HASH_ALG}"
260 bootscr_sign_algo="${FIT_SIGN_ALG}"
261 bootscr_sign_keyname="${UBOOT_SIGN_IMG_KEYNAME}"
262
263 cat << EOF >> $1
264 bootscr-$2 {
265 description = "U-boot script";
266 data = /incbin/("$3");
267 type = "script";
268 arch = "${UBOOT_ARCH}";
269 compression = "none";
270 hash-1 {
271 algo = "$bootscr_csum";
272 };
273 };
274EOF
275
276 if [ "${UBOOT_SIGN_ENABLE}" = "1" -a "${FIT_SIGN_INDIVIDUAL}" = "1" -a -n "$bootscr_sign_keyname" ] ; then
277 sed -i '$ d' $1
278 cat << EOF >> $1
279 signature-1 {
280 algo = "$bootscr_csum,$bootscr_sign_algo";
281 key-name-hint = "$bootscr_sign_keyname";
282 };
283 };
284EOF
285 fi
286}
287
288#
289# Emit the fitImage ITS setup section
290#
291# $1 ... .its filename
292# $2 ... Image counter
293# $3 ... Path to setup image
294fitimage_emit_section_setup() {
295
296 setup_csum="${FIT_HASH_ALG}"
297
298 cat << EOF >> $1
299 setup-$2 {
300 description = "Linux setup.bin";
301 data = /incbin/("$3");
302 type = "x86_setup";
303 arch = "${UBOOT_ARCH}";
304 os = "linux";
305 compression = "none";
306 load = <0x00090000>;
307 entry = <0x00090000>;
308 hash-1 {
309 algo = "$setup_csum";
310 };
311 };
312EOF
313}
314
315#
316# Emit the fitImage ITS ramdisk section
317#
318# $1 ... .its filename
319# $2 ... Image counter
320# $3 ... Path to ramdisk image
321fitimage_emit_section_ramdisk() {
322
323 ramdisk_csum="${FIT_HASH_ALG}"
324 ramdisk_sign_algo="${FIT_SIGN_ALG}"
325 ramdisk_sign_keyname="${UBOOT_SIGN_IMG_KEYNAME}"
326 ramdisk_loadline=""
327 ramdisk_entryline=""
328
329 if [ -n "${UBOOT_RD_LOADADDRESS}" ]; then
330 ramdisk_loadline="load = <${UBOOT_RD_LOADADDRESS}>;"
331 fi
332 if [ -n "${UBOOT_RD_ENTRYPOINT}" ]; then
333 ramdisk_entryline="entry = <${UBOOT_RD_ENTRYPOINT}>;"
334 fi
335
336 cat << EOF >> $1
337 ramdisk-$2 {
338 description = "${INITRAMFS_IMAGE}";
339 data = /incbin/("$3");
340 type = "ramdisk";
341 arch = "${UBOOT_ARCH}";
342 os = "linux";
343 compression = "none";
344 $ramdisk_loadline
345 $ramdisk_entryline
346 hash-1 {
347 algo = "$ramdisk_csum";
348 };
349 };
350EOF
351
352 if [ "${UBOOT_SIGN_ENABLE}" = "1" -a "${FIT_SIGN_INDIVIDUAL}" = "1" -a -n "$ramdisk_sign_keyname" ] ; then
353 sed -i '$ d' $1
354 cat << EOF >> $1
355 signature-1 {
356 algo = "$ramdisk_csum,$ramdisk_sign_algo";
357 key-name-hint = "$ramdisk_sign_keyname";
358 };
359 };
360EOF
361 fi
362}
363
364#
365# echoes symlink destination if it points below directory
366#
367# $1 ... file that's a potential symlink
368# $2 ... expected parent directory
369symlink_points_below() {
370 file="$2/$1"
371 dir=$2
372
373 if ! [ -L "$file" ]; then
374 return
375 fi
376
377 realpath="$(realpath --relative-to=$dir $file)"
378 if [ -z "${realpath%%../*}" ]; then
379 return
380 fi
381
382 echo "$realpath"
383}
384
385#
386# Emit the fitImage ITS configuration section
387#
388# $1 ... .its filename
389# $2 ... Linux kernel ID
390# $3 ... DTB image name
391# $4 ... ramdisk ID
392# $5 ... u-boot script ID
393# $6 ... config ID
394# $7 ... default flag
395# $8 ... default DTB image name
396fitimage_emit_section_config() {
397
398 conf_csum="${FIT_HASH_ALG}"
399 conf_sign_algo="${FIT_SIGN_ALG}"
400 conf_padding_algo="${FIT_PAD_ALG}"
401 if [ "${UBOOT_SIGN_ENABLE}" = "1" ] ; then
402 conf_sign_keyname="${UBOOT_SIGN_KEYNAME}"
403 fi
404
405 its_file="$1"
406 kernel_id="$2"
407 dtb_image="$3"
408 ramdisk_id="$4"
409 bootscr_id="$5"
410 config_id="$6"
411 default_flag="$7"
412 default_dtb_image="$8"
413
414 # Test if we have any DTBs at all
415 sep=""
416 conf_desc=""
417 conf_node="${FIT_CONF_PREFIX}"
418 kernel_line=""
419 fdt_line=""
420 ramdisk_line=""
421 bootscr_line=""
422 setup_line=""
423 default_line=""
424 compatible_line=""
425
426 dtb_image_sect=$(symlink_points_below $dtb_image "${EXTERNAL_KERNEL_DEVICETREE}")
427 if [ -z "$dtb_image_sect" ]; then
428 dtb_image_sect=$dtb_image
429 fi
430
431 dtb_path="${EXTERNAL_KERNEL_DEVICETREE}/${dtb_image_sect}"
432 if [ -e "$dtb_path" ]; then
433 compat=$(fdtget -t s "$dtb_path" / compatible | sed 's/ /", "/g')
434 if [ -n "$compat" ]; then
435 compatible_line="compatible = \"$compat\";"
436 fi
437 fi
438
439 dtb_image=$(echo $dtb_image | tr '/' '_')
440 dtb_image_sect=$(echo "${dtb_image_sect}" | tr '/' '_')
441
442 # conf node name is selected based on dtb ID if it is present,
443 # otherwise its selected based on kernel ID
444 if [ -n "$dtb_image" ]; then
445 conf_node=$conf_node$dtb_image
446 else
447 conf_node=$conf_node$kernel_id
448 fi
449
450 if [ -n "$kernel_id" ]; then
451 conf_desc="Linux kernel"
452 sep=", "
453 kernel_line="kernel = \"kernel-$kernel_id\";"
454 fi
455
456 if [ -n "$dtb_image" ]; then
457 conf_desc="$conf_desc${sep}FDT blob"
458 sep=", "
459 fdt_line="fdt = \"fdt-$dtb_image_sect\";"
460 fi
461
462 if [ -n "$ramdisk_id" ]; then
463 conf_desc="$conf_desc${sep}ramdisk"
464 sep=", "
465 ramdisk_line="ramdisk = \"ramdisk-$ramdisk_id\";"
466 fi
467
468 if [ -n "$bootscr_id" ]; then
469 conf_desc="$conf_desc${sep}u-boot script"
470 sep=", "
471 bootscr_line="bootscr = \"bootscr-$bootscr_id\";"
472 fi
473
474 if [ -n "$config_id" ]; then
475 conf_desc="$conf_desc${sep}setup"
476 setup_line="setup = \"setup-$config_id\";"
477 fi
478
479 if [ "$default_flag" = "1" ]; then
480 # default node is selected based on dtb ID if it is present,
481 # otherwise its selected based on kernel ID
482 if [ -n "$dtb_image" ]; then
483 # Select default node as user specified dtb when
484 # multiple dtb exists.
485 if [ -n "$default_dtb_image" ]; then
486 default_line="default = \"${FIT_CONF_PREFIX}$default_dtb_image\";"
487 else
488 default_line="default = \"${FIT_CONF_PREFIX}$dtb_image\";"
489 fi
490 else
491 default_line="default = \"${FIT_CONF_PREFIX}$kernel_id\";"
492 fi
493 fi
494
495 cat << EOF >> $its_file
496 $default_line
497 $conf_node {
498 description = "$default_flag $conf_desc";
499 $compatible_line
500 $kernel_line
501 $fdt_line
502 $ramdisk_line
503 $bootscr_line
504 $setup_line
505 hash-1 {
506 algo = "$conf_csum";
507 };
508EOF
509
510 if [ -n "$conf_sign_keyname" ] ; then
511
512 sign_line="sign-images = "
513 sep=""
514
515 if [ -n "$kernel_id" ]; then
516 sign_line="$sign_line${sep}\"kernel\""
517 sep=", "
518 fi
519
520 if [ -n "$dtb_image" ]; then
521 sign_line="$sign_line${sep}\"fdt\""
522 sep=", "
523 fi
524
525 if [ -n "$ramdisk_id" ]; then
526 sign_line="$sign_line${sep}\"ramdisk\""
527 sep=", "
528 fi
529
530 if [ -n "$bootscr_id" ]; then
531 sign_line="$sign_line${sep}\"bootscr\""
532 sep=", "
533 fi
534
535 if [ -n "$config_id" ]; then
536 sign_line="$sign_line${sep}\"setup\""
537 fi
538
539 sign_line="$sign_line;"
540
541 cat << EOF >> $its_file
542 signature-1 {
543 algo = "$conf_csum,$conf_sign_algo";
544 key-name-hint = "$conf_sign_keyname";
545 padding = "$conf_padding_algo";
546 $sign_line
547 };
548EOF
549 fi
550
551 cat << EOF >> $its_file
552 };
553EOF
554}
555
556#
557# Assemble fitImage
558#
559# $1 ... .its filename
560# $2 ... fitImage name
561# $3 ... include ramdisk
562fitimage_assemble() {
563 kernelcount=1
564 dtbcount=""
565 DTBS=""
566 ramdiskcount=$3
567 setupcount=""
568 bootscr_id=""
569 default_dtb_image=""
570 rm -f $1 arch/${ARCH}/boot/$2
571
572 if [ -n "${UBOOT_SIGN_IMG_KEYNAME}" -a "${UBOOT_SIGN_KEYNAME}" = "${UBOOT_SIGN_IMG_KEYNAME}" ]; then
573 bbfatal "Keys used to sign images and configuration nodes must be different."
574 fi
575
576 fitimage_emit_fit_header $1
577
578 #
579 # Step 1: Prepare a kernel image section.
580 #
581 fitimage_emit_section_maint $1 imagestart
582
583 uboot_prep_kimage
584 fitimage_emit_section_kernel $1 $kernelcount linux.bin "$linux_comp"
585
586 #
587 # Step 2: Prepare a DTB image section
588 #
589
590 if [ -n "${KERNEL_DEVICETREE}" ]; then
591 dtbcount=1
592 for DTB in ${KERNEL_DEVICETREE}; do
593 if echo $DTB | grep -q '/dts/'; then
594 bbwarn "$DTB contains the full path to the the dts file, but only the dtb name should be used."
595 DTB=`basename $DTB | sed 's,\.dts$,.dtb,g'`
596 fi
597
598 # Skip ${DTB} if it's also provided in ${EXTERNAL_KERNEL_DEVICETREE}
599 if [ -n "${EXTERNAL_KERNEL_DEVICETREE}" ] && [ -s ${EXTERNAL_KERNEL_DEVICETREE}/${DTB} ]; then
600 continue
601 fi
602
603 DTB_PATH="${KERNEL_OUTPUT_DIR}/dts/$DTB"
604 if [ ! -e "$DTB_PATH" ]; then
605 DTB_PATH="${KERNEL_OUTPUT_DIR}/$DTB"
606 fi
607
608 # Strip off the path component from the filename
609 if "${@'false' if oe.types.boolean(d.getVar('KERNEL_DTBVENDORED')) else 'true'}"; then
610 DTB=`basename $DTB`
611 fi
612
613 # Set the default dtb image if it exists in the devicetree.
614 if [ ${FIT_CONF_DEFAULT_DTB} = $DTB ];then
615 default_dtb_image=$(echo "$DTB" | tr '/' '_')
616 fi
617
618 DTB=$(echo "$DTB" | tr '/' '_')
619
620 # Skip DTB if we've picked it up previously
621 echo "$DTBS" | tr ' ' '\n' | grep -xq "$DTB" && continue
622
623 DTBS="$DTBS $DTB"
624 DTB=$(echo $DTB | tr '/' '_')
625 fitimage_emit_section_dtb $1 $DTB $DTB_PATH
626 done
627 fi
628
629 if [ -n "${EXTERNAL_KERNEL_DEVICETREE}" ]; then
630 dtbcount=1
631 for DTB in $(find "${EXTERNAL_KERNEL_DEVICETREE}" -name '*.dtb' -printf '%P\n' | sort) \
632 $(find "${EXTERNAL_KERNEL_DEVICETREE}" -name '*.dtbo' -printf '%P\n' | sort); do
633 # Set the default dtb image if it exists in the devicetree.
634 if [ ${FIT_CONF_DEFAULT_DTB} = $DTB ];then
635 default_dtb_image=$(echo "$DTB" | tr '/' '_')
636 fi
637
638 DTB=$(echo "$DTB" | tr '/' '_')
639
640 # Skip DTB/DTBO if we've picked it up previously
641 echo "$DTBS" | tr ' ' '\n' | grep -xq "$DTB" && continue
642
643 DTBS="$DTBS $DTB"
644
645 # Also skip if a symlink. We'll later have each config section point at it
646 [ $(symlink_points_below $DTB "${EXTERNAL_KERNEL_DEVICETREE}") ] && continue
647
648 DTB=$(echo $DTB | tr '/' '_')
649 fitimage_emit_section_dtb $1 $DTB "${EXTERNAL_KERNEL_DEVICETREE}/$DTB"
650 done
651 fi
652
653 if [ -n "${FIT_CONF_DEFAULT_DTB}" ] && [ -z $default_dtb_image ]; then
654 bbwarn "${FIT_CONF_DEFAULT_DTB} is not available in the list of device trees."
655 fi
656
657 #
658 # Step 3: Prepare a u-boot script section
659 #
660
661 if [ -n "${UBOOT_ENV}" ] && [ -d "${STAGING_DIR_HOST}/boot" ]; then
662 if [ -e "${STAGING_DIR_HOST}/boot/${UBOOT_ENV_BINARY}" ]; then
663 cp ${STAGING_DIR_HOST}/boot/${UBOOT_ENV_BINARY} ${B}
664 bootscr_id="${UBOOT_ENV_BINARY}"
665 fitimage_emit_section_boot_script $1 "$bootscr_id" ${UBOOT_ENV_BINARY}
666 else
667 bbwarn "${STAGING_DIR_HOST}/boot/${UBOOT_ENV_BINARY} not found."
668 fi
669 fi
670
671 #
672 # Step 4: Prepare a setup section. (For x86)
673 #
674 if [ -e ${KERNEL_OUTPUT_DIR}/setup.bin ]; then
675 setupcount=1
676 fitimage_emit_section_setup $1 $setupcount ${KERNEL_OUTPUT_DIR}/setup.bin
677 fi
678
679 #
680 # Step 5: Prepare a ramdisk section.
681 #
682 if [ "x${ramdiskcount}" = "x1" ] && [ "${INITRAMFS_IMAGE_BUNDLE}" != "1" ]; then
683 # Find and use the first initramfs image archive type we find
684 found=
685 for img in ${FIT_SUPPORTED_INITRAMFS_FSTYPES}; do
686 initramfs_path="${DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE_NAME}.$img"
687 if [ -e "$initramfs_path" ]; then
688 bbnote "Found initramfs image: $initramfs_path"
689 found=true
690 fitimage_emit_section_ramdisk $1 "$ramdiskcount" "$initramfs_path"
691 break
692 else
693 bbnote "Did not find initramfs image: $initramfs_path"
694 fi
695 done
696
697 if [ -z "$found" ]; then
698 bbfatal "Could not find a valid initramfs type for ${INITRAMFS_IMAGE_NAME}, the supported types are: ${FIT_SUPPORTED_INITRAMFS_FSTYPES}"
699 fi
700 fi
701
702 fitimage_emit_section_maint $1 sectend
703
704 # Force the first Kernel and DTB in the default config
705 kernelcount=1
706 if [ -n "$dtbcount" ]; then
707 dtbcount=1
708 fi
709
710 #
711 # Step 6: Prepare a configurations section
712 #
713 fitimage_emit_section_maint $1 confstart
714
715 # kernel-fitimage.bbclass currently only supports a single kernel (no less or
716 # more) to be added to the FIT image along with 0 or more device trees and
717 # 0 or 1 ramdisk.
718 # It is also possible to include an initramfs bundle (kernel and rootfs in one binary)
719 # When the initramfs bundle is used ramdisk is disabled.
720 # If a device tree is to be part of the FIT image, then select
721 # the default configuration to be used is based on the dtbcount. If there is
722 # no dtb present than select the default configuation to be based on
723 # the kernelcount.
724 if [ -n "$DTBS" ]; then
725 i=1
726 for DTB in ${DTBS}; do
727 dtb_ext=${DTB##*.}
728 if [ "$dtb_ext" = "dtbo" ]; then
729 fitimage_emit_section_config $1 "" "$DTB" "" "$bootscr_id" "" "`expr $i = $dtbcount`" "$default_dtb_image"
730 else
731 fitimage_emit_section_config $1 $kernelcount "$DTB" "$ramdiskcount" "$bootscr_id" "$setupcount" "`expr $i = $dtbcount`" "$default_dtb_image"
732 fi
733 i=`expr $i + 1`
734 done
735 else
736 defaultconfigcount=1
737 fitimage_emit_section_config $1 $kernelcount "" "$ramdiskcount" "$bootscr_id" "$setupcount" $defaultconfigcount "$default_dtb_image"
738 fi
739
740 fitimage_emit_section_maint $1 sectend
741
742 fitimage_emit_section_maint $1 fitend
743
744 #
745 # Step 7: Assemble the image
746 #
747 ${UBOOT_MKIMAGE} \
748 ${@'-D "${UBOOT_MKIMAGE_DTCOPTS}"' if len('${UBOOT_MKIMAGE_DTCOPTS}') else ''} \
749 -f $1 \
750 ${KERNEL_OUTPUT_DIR}/$2
751
752 #
753 # Step 8: Sign the image
754 #
755 if [ "x${UBOOT_SIGN_ENABLE}" = "x1" ] ; then
756 ${UBOOT_MKIMAGE_SIGN} \
757 ${@'-D "${UBOOT_MKIMAGE_DTCOPTS}"' if len('${UBOOT_MKIMAGE_DTCOPTS}') else ''} \
758 -F -k "${UBOOT_SIGN_KEYDIR}" \
759 -r ${KERNEL_OUTPUT_DIR}/$2 \
760 ${UBOOT_MKIMAGE_SIGN_ARGS}
761 fi
762}
763
764do_assemble_fitimage() {
765 if echo ${KERNEL_IMAGETYPES} | grep -wq "fitImage"; then
766 cd ${B}
767 fitimage_assemble fit-image.its fitImage-none ""
768 if [ "${INITRAMFS_IMAGE_BUNDLE}" != "1" ]; then
769 ln -sf fitImage-none ${B}/${KERNEL_OUTPUT_DIR}/fitImage
770 fi
771 fi
772}
773
774addtask assemble_fitimage before do_install after do_compile
775
776SYSROOT_DIRS:append = " /sysroot-only"
777do_install:append() {
778 if echo ${KERNEL_IMAGETYPES} | grep -wq "fitImage" && \
779 [ "${UBOOT_SIGN_ENABLE}" = "1" ]; then
780 install -D ${B}/${KERNEL_OUTPUT_DIR}/fitImage-none ${D}/sysroot-only/fitImage
781 fi
782}
783
784do_assemble_fitimage_initramfs() {
785 if echo ${KERNEL_IMAGETYPES} | grep -wq "fitImage" && \
786 test -n "${INITRAMFS_IMAGE}" ; then
787 cd ${B}
788 if [ "${INITRAMFS_IMAGE_BUNDLE}" = "1" ]; then
789 fitimage_assemble fit-image-${INITRAMFS_IMAGE}.its fitImage-bundle ""
790 ln -sf fitImage-bundle ${B}/${KERNEL_OUTPUT_DIR}/fitImage
791 else
792 fitimage_assemble fit-image-${INITRAMFS_IMAGE}.its fitImage-${INITRAMFS_IMAGE} 1
793 fi
794 fi
795}
796
797addtask assemble_fitimage_initramfs before do_deploy after do_bundle_initramfs
798
799do_kernel_generate_rsa_keys() {
800 if [ "${UBOOT_SIGN_ENABLE}" = "0" ] && [ "${FIT_GENERATE_KEYS}" = "1" ]; then
801 bbwarn "FIT_GENERATE_KEYS is set to 1 even though UBOOT_SIGN_ENABLE is set to 0. The keys will not be generated as they won't be used."
802 fi
803
804 if [ "${UBOOT_SIGN_ENABLE}" = "1" ] && [ "${FIT_GENERATE_KEYS}" = "1" ]; then
805
806 # Generate keys to sign configuration nodes, only if they don't already exist
807 if [ ! -f "${UBOOT_SIGN_KEYDIR}/${UBOOT_SIGN_KEYNAME}".key ] || \
808 [ ! -f "${UBOOT_SIGN_KEYDIR}/${UBOOT_SIGN_KEYNAME}".crt ]; then
809
810 # make directory if it does not already exist
811 mkdir -p "${UBOOT_SIGN_KEYDIR}"
812
813 bbnote "Generating RSA private key for signing fitImage"
814 openssl genrsa ${FIT_KEY_GENRSA_ARGS} -out \
815 "${UBOOT_SIGN_KEYDIR}/${UBOOT_SIGN_KEYNAME}".key \
816 "${FIT_SIGN_NUMBITS}"
817
818 bbnote "Generating certificate for signing fitImage"
819 openssl req ${FIT_KEY_REQ_ARGS} "${FIT_KEY_SIGN_PKCS}" \
820 -key "${UBOOT_SIGN_KEYDIR}/${UBOOT_SIGN_KEYNAME}".key \
821 -out "${UBOOT_SIGN_KEYDIR}/${UBOOT_SIGN_KEYNAME}".crt
822 fi
823
824 # Generate keys to sign image nodes, only if they don't already exist
825 if [ ! -f "${UBOOT_SIGN_KEYDIR}/${UBOOT_SIGN_IMG_KEYNAME}".key ] || \
826 [ ! -f "${UBOOT_SIGN_KEYDIR}/${UBOOT_SIGN_IMG_KEYNAME}".crt ]; then
827
828 # make directory if it does not already exist
829 mkdir -p "${UBOOT_SIGN_KEYDIR}"
830
831 bbnote "Generating RSA private key for signing fitImage"
832 openssl genrsa ${FIT_KEY_GENRSA_ARGS} -out \
833 "${UBOOT_SIGN_KEYDIR}/${UBOOT_SIGN_IMG_KEYNAME}".key \
834 "${FIT_SIGN_NUMBITS}"
835
836 bbnote "Generating certificate for signing fitImage"
837 openssl req ${FIT_KEY_REQ_ARGS} "${FIT_KEY_SIGN_PKCS}" \
838 -key "${UBOOT_SIGN_KEYDIR}/${UBOOT_SIGN_IMG_KEYNAME}".key \
839 -out "${UBOOT_SIGN_KEYDIR}/${UBOOT_SIGN_IMG_KEYNAME}".crt
840 fi
841 fi
842}
843
844addtask kernel_generate_rsa_keys before do_assemble_fitimage after do_compile
845
846kernel_do_deploy[vardepsexclude] = "DATETIME"
847kernel_do_deploy:append() {
848 # Update deploy directory
849 if echo ${KERNEL_IMAGETYPES} | grep -wq "fitImage"; then
850
851 if [ "${INITRAMFS_IMAGE_BUNDLE}" != "1" ]; then
852 bbnote "Copying fit-image.its source file..."
853 install -m 0644 ${B}/fit-image.its "$deployDir/fitImage-its-${KERNEL_FIT_NAME}.its"
854 if [ -n "${KERNEL_FIT_LINK_NAME}" ] ; then
855 ln -snf fitImage-its-${KERNEL_FIT_NAME}.its "$deployDir/fitImage-its-${KERNEL_FIT_LINK_NAME}"
856 fi
857
858 bbnote "Copying linux.bin file..."
859 install -m 0644 ${B}/linux.bin $deployDir/fitImage-linux.bin-${KERNEL_FIT_NAME}${KERNEL_FIT_BIN_EXT}
860 if [ -n "${KERNEL_FIT_LINK_NAME}" ] ; then
861 ln -snf fitImage-linux.bin-${KERNEL_FIT_NAME}${KERNEL_FIT_BIN_EXT} "$deployDir/fitImage-linux.bin-${KERNEL_FIT_LINK_NAME}"
862 fi
863 fi
864
865 if [ -n "${INITRAMFS_IMAGE}" ]; then
866 bbnote "Copying fit-image-${INITRAMFS_IMAGE}.its source file..."
867 install -m 0644 ${B}/fit-image-${INITRAMFS_IMAGE}.its "$deployDir/fitImage-its-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.its"
868 if [ -n "${KERNEL_FIT_LINK_NAME}" ] ; then
869 ln -snf fitImage-its-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.its "$deployDir/fitImage-its-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_LINK_NAME}"
870 fi
871
872 if [ "${INITRAMFS_IMAGE_BUNDLE}" != "1" ]; then
873 bbnote "Copying fitImage-${INITRAMFS_IMAGE} file..."
874 install -m 0644 ${B}/${KERNEL_OUTPUT_DIR}/fitImage-${INITRAMFS_IMAGE} "$deployDir/fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}${KERNEL_FIT_BIN_EXT}"
875 if [ -n "${KERNEL_FIT_LINK_NAME}" ] ; then
876 ln -snf fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}${KERNEL_FIT_BIN_EXT} "$deployDir/fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_LINK_NAME}"
877 fi
878 fi
879 fi
880 fi
881}
diff --git a/meta/classes-recipe/kernel-module-split.bbclass b/meta/classes-recipe/kernel-module-split.bbclass
index 9487365eb7..75ed696b72 100644
--- a/meta/classes-recipe/kernel-module-split.bbclass
+++ b/meta/classes-recipe/kernel-module-split.bbclass
@@ -86,11 +86,7 @@ python split_kernel_module_packages () {
86 vals[m.group(1)] = m.group(2) 86 vals[m.group(1)] = m.group(2)
87 return vals 87 return vals
88 88
89 def frob_metadata(file, pkg, pattern, format, basename): 89 def handle_conf_files(d, basename, pkg):
90 vals = extract_modinfo(file)
91
92 dvar = d.getVar('PKGD')
93
94 # If autoloading is requested, output ${modulesloaddir}/<name>.conf and append 90 # If autoloading is requested, output ${modulesloaddir}/<name>.conf and append
95 # appropriate modprobe commands to the postinst 91 # appropriate modprobe commands to the postinst
96 autoloadlist = (d.getVar("KERNEL_MODULE_AUTOLOAD") or "").split() 92 autoloadlist = (d.getVar("KERNEL_MODULE_AUTOLOAD") or "").split()
@@ -99,9 +95,12 @@ python split_kernel_module_packages () {
99 bb.warn("module_autoload_%s was replaced by KERNEL_MODULE_AUTOLOAD for cases where basename == module name, please drop it" % basename) 95 bb.warn("module_autoload_%s was replaced by KERNEL_MODULE_AUTOLOAD for cases where basename == module name, please drop it" % basename)
100 if autoload and basename not in autoloadlist: 96 if autoload and basename not in autoloadlist:
101 bb.warn("module_autoload_%s is defined but '%s' isn't included in KERNEL_MODULE_AUTOLOAD, please add it there" % (basename, basename)) 97 bb.warn("module_autoload_%s is defined but '%s' isn't included in KERNEL_MODULE_AUTOLOAD, please add it there" % (basename, basename))
98
99 # The .conf file can either be installed by a recipe or generated from module_autoload_*
100 conf = '%s/%s.conf' % (d.getVar('modulesloaddir'), basename)
101 name = '%s%s' % (d.getVar('PKGD'), conf)
102 # If module name is in KERNEL_MODULE_AUTOLOAD, then generate the .conf file and write to `name`.
102 if basename in autoloadlist: 103 if basename in autoloadlist:
103 conf = '%s/%s.conf' % (d.getVar('modulesloaddir'), basename)
104 name = '%s%s' % (dvar, conf)
105 os.makedirs(os.path.dirname(name), exist_ok=True) 104 os.makedirs(os.path.dirname(name), exist_ok=True)
106 with open(name, 'w') as f: 105 with open(name, 'w') as f:
107 if autoload: 106 if autoload:
@@ -109,30 +108,87 @@ python split_kernel_module_packages () {
109 f.write('%s\n' % m) 108 f.write('%s\n' % m)
110 else: 109 else:
111 f.write('%s\n' % basename) 110 f.write('%s\n' % basename)
111 # If the .conf file exits, then add it to FILES:* and CONFFILES:* and add postinstall hook.
112 # It doesn't matter if it was generated from module_autoload_* or installed by the recipe.
113 if os.path.exists(name):
112 conf2append = ' %s' % conf 114 conf2append = ' %s' % conf
113 d.appendVar('FILES:%s' % pkg, conf2append) 115 d.appendVar('FILES:%s' % pkg, conf2append)
114 d.appendVar('CONFFILES:%s' % pkg, conf2append) 116 d.appendVar('CONFFILES:%s' % pkg, conf2append)
115 postinst = d.getVar('pkg_postinst:%s' % pkg) 117 postinst = d.getVar('pkg_postinst:%s' % pkg)
116 if not postinst: 118 if not postinst:
117 bb.fatal("pkg_postinst:%s not defined" % pkg) 119 postinst = d.getVar('pkg_postinst:modules')
118 postinst += d.getVar('autoload_postinst_fragment') % (autoload or basename) 120 postinst += d.getVar('autoload_postinst_fragment') % (autoload or basename)
119 d.setVar('pkg_postinst:%s' % pkg, postinst) 121 d.setVar('pkg_postinst:%s' % pkg, postinst)
120 122
121 # Write out any modconf fragment 123 # Write out any modconf fragment
122 modconflist = (d.getVar("KERNEL_MODULE_PROBECONF") or "").split() 124 modconflist = (d.getVar("KERNEL_MODULE_PROBECONF") or "").split()
123 modconf = d.getVar('module_conf_%s' % basename) 125 modconf = d.getVar('module_conf_%s' % basename)
126
127 # The .conf file can either be installed by a recipe or generated from module_conf_*
128 conf = '%s/%s.conf' % (d.getVar('modprobedir'), basename)
129 name = '%s%s' % (d.getVar('PKGD'), conf)
130 # If module name is in KERNEL_MODULE_PROBECONF, then generate the .conf file and write to `name`.
124 if modconf and basename in modconflist: 131 if modconf and basename in modconflist:
125 conf = '%s/%s.conf' % (d.getVar('modprobedir'), basename)
126 name = '%s%s' % (dvar, conf)
127 os.makedirs(os.path.dirname(name), exist_ok=True) 132 os.makedirs(os.path.dirname(name), exist_ok=True)
128 with open(name, 'w') as f: 133 with open(name, 'w') as f:
129 f.write("%s\n" % modconf) 134 f.write("%s\n" % modconf)
135 elif modconf:
136 bb.error("Please ensure module %s is listed in KERNEL_MODULE_PROBECONF since module_conf_%s is set" % (basename, basename))
137 # If the .conf file exits, then add it to FILES:* and CONFFILES:*.
138 # It doesn't matter if it was generated from module_conf_* or installed by the recipe.
139 if os.path.exists(name):
130 conf2append = ' %s' % conf 140 conf2append = ' %s' % conf
131 d.appendVar('FILES:%s' % pkg, conf2append) 141 d.appendVar('FILES:%s' % pkg, conf2append)
132 d.appendVar('CONFFILES:%s' % pkg, conf2append) 142 d.appendVar('CONFFILES:%s' % pkg, conf2append)
133 143
134 elif modconf: 144 def generate_conf_files(d, root, file_regex, output_pattern):
135 bb.error("Please ensure module %s is listed in KERNEL_MODULE_PROBECONF since module_conf_%s is set" % (basename, basename)) 145 """
146 Arguments:
147 root -- the path in which to search. Contains system lib path
148 so needs expansion.
149 file_regex -- regular expression to match searched files. Use
150 parentheses () to mark the part of this expression
151 that should be used to derive the module name (to be
152 substituted where %s is used in other function
153 arguments as noted below)
154 output_pattern -- pattern to use for the package names. Must include %s.
155 """
156 import re, stat
157
158 dvar = d.getVar('PKGD')
159 root = d.expand(root)
160
161 # if the root directory doesn't exist, it's fatal - exit from the current execution.
162 if not os.path.exists(dvar + root):
163 bb.fatal("kernel module root directory path does not exist")
164
165 # walk through kernel module directory. for each entry in the directory, check if it
166 # matches the desired regex pattern and file type. if it fullfills, process it to generate
167 # it's conf file based on its package name.
168 for walkroot, dirs, files in os.walk(dvar + root):
169 for file in files:
170 relpath = os.path.join(walkroot, file).replace(dvar + root + '/', '', 1)
171 if not relpath:
172 continue
173 m = re.match(file_regex, os.path.basename(relpath))
174 if not m:
175 continue
176 file_f = os.path.join(dvar + root, relpath)
177 mode = os.lstat(file_f).st_mode
178 if not (stat.S_ISREG(mode) or (allow_links and stat.S_ISLNK(mode)) or (allow_dirs and stat.S_ISDIR(mode))):
179 continue
180
181 basename = m.group(1)
182 on = legitimize_package_name(basename)
183 pkg = output_pattern % on
184 handle_conf_files(d, basename, pkg)
185
186
187 def frob_metadata(file, pkg, pattern, format, basename):
188 vals = extract_modinfo(file)
189 dvar = d.getVar('PKGD')
190
191 handle_conf_files(d, basename, pkg)
136 192
137 if "description" in vals: 193 if "description" in vals:
138 old_desc = d.getVar('DESCRIPTION:' + pkg) or "" 194 old_desc = d.getVar('DESCRIPTION:' + pkg) or ""
@@ -167,19 +223,20 @@ python split_kernel_module_packages () {
167 postinst = d.getVar('pkg_postinst:modules') 223 postinst = d.getVar('pkg_postinst:modules')
168 postrm = d.getVar('pkg_postrm:modules') 224 postrm = d.getVar('pkg_postrm:modules')
169 225
170 if splitmods != '1':
171 d.appendVar('FILES:' + metapkg, '%s %s %s/modules' %
172 (d.getVar('modulesloaddir'), d.getVar('modprobedir'), d.getVar("nonarch_base_libdir")))
173 d.appendVar('pkg_postinst:%s' % metapkg, postinst)
174 d.prependVar('pkg_postrm:%s' % metapkg, postrm);
175 return
176
177 module_regex = r'^(.*)\.k?o(?:\.(gz|xz|zst))?$' 226 module_regex = r'^(.*)\.k?o(?:\.(gz|xz|zst))?$'
178 227
179 module_pattern_prefix = d.getVar('KERNEL_MODULE_PACKAGE_PREFIX') 228 module_pattern_prefix = d.getVar('KERNEL_MODULE_PACKAGE_PREFIX')
180 module_pattern_suffix = d.getVar('KERNEL_MODULE_PACKAGE_SUFFIX') 229 module_pattern_suffix = d.getVar('KERNEL_MODULE_PACKAGE_SUFFIX')
181 module_pattern = module_pattern_prefix + kernel_package_name + '-module-%s' + module_pattern_suffix 230 module_pattern = module_pattern_prefix + kernel_package_name + '-module-%s' + module_pattern_suffix
182 231
232 if splitmods != '1':
233 d.appendVar('FILES:' + metapkg, '%s %s %s/modules' %
234 (d.getVar('modulesloaddir'), d.getVar('modprobedir'), d.getVar("nonarch_base_libdir")))
235 d.appendVar('pkg_postinst:%s' % metapkg, postinst)
236 d.prependVar('pkg_postrm:%s' % metapkg, postrm)
237 generate_conf_files(d, root='${nonarch_base_libdir}/modules', file_regex=module_regex, output_pattern=module_pattern)
238 return
239
183 modules = do_split_packages(d, root='${nonarch_base_libdir}/modules', file_regex=module_regex, output_pattern=module_pattern, description='%s kernel module', postinst=postinst, postrm=postrm, recursive=True, hook=frob_metadata, extra_depends='%s-%s' % (kernel_package_name, kernel_version)) 240 modules = do_split_packages(d, root='${nonarch_base_libdir}/modules', file_regex=module_regex, output_pattern=module_pattern, description='%s kernel module', postinst=postinst, postrm=postrm, recursive=True, hook=frob_metadata, extra_depends='%s-%s' % (kernel_package_name, kernel_version))
184 if modules: 241 if modules:
185 d.appendVar('RDEPENDS:' + metapkg, ' '+' '.join(modules)) 242 d.appendVar('RDEPENDS:' + metapkg, ' '+' '.join(modules))
diff --git a/meta/classes-recipe/kernel-uboot.bbclass b/meta/classes-recipe/kernel-uboot.bbclass
index 30a85ccc28..62974baaf0 100644
--- a/meta/classes-recipe/kernel-uboot.bbclass
+++ b/meta/classes-recipe/kernel-uboot.bbclass
@@ -12,19 +12,27 @@ FIT_KERNEL_COMP_ALG_EXTENSION ?= ".gz"
12UBOOT_MKIMAGE_KERNEL_TYPE ?= "kernel" 12UBOOT_MKIMAGE_KERNEL_TYPE ?= "kernel"
13 13
14uboot_prep_kimage() { 14uboot_prep_kimage() {
15 if [ -e arch/${ARCH}/boot/compressed/vmlinux ]; then 15 output_dir=$1
16 # For backward compatibility with kernel-fitimage.bbclass and kernel-uboot.bbclass
17 # support calling without parameter as well
18 if [ -z "$output_dir" ]; then
19 output_dir='.'
20 fi
21
22 linux_bin=$output_dir/linux.bin
23 if [ -e "arch/${ARCH}/boot/compressed/vmlinux" ]; then
16 vmlinux_path="arch/${ARCH}/boot/compressed/vmlinux" 24 vmlinux_path="arch/${ARCH}/boot/compressed/vmlinux"
17 linux_suffix="" 25 linux_suffix=""
18 linux_comp="none" 26 linux_comp="none"
19 elif [ -e arch/${ARCH}/boot/vmlinuz.bin ]; then 27 elif [ -e "arch/${ARCH}/boot/vmlinuz.bin" ]; then
20 rm -f linux.bin 28 rm -f "$linux_bin"
21 cp -l arch/${ARCH}/boot/vmlinuz.bin linux.bin 29 cp -l "arch/${ARCH}/boot/vmlinuz.bin" "$linux_bin"
22 vmlinux_path="" 30 vmlinux_path=""
23 linux_suffix="" 31 linux_suffix=""
24 linux_comp="none" 32 linux_comp="none"
25 else 33 else
26 vmlinux_path="vmlinux" 34 vmlinux_path="vmlinux"
27 # Use vmlinux.initramfs for linux.bin when INITRAMFS_IMAGE_BUNDLE set 35 # Use vmlinux.initramfs for $linux_bin when INITRAMFS_IMAGE_BUNDLE set
28 # As per the implementation in kernel.bbclass. 36 # As per the implementation in kernel.bbclass.
29 # See do_bundle_initramfs function 37 # See do_bundle_initramfs function
30 if [ "${INITRAMFS_IMAGE_BUNDLE}" = "1" ] && [ -e vmlinux.initramfs ]; then 38 if [ "${INITRAMFS_IMAGE_BUNDLE}" = "1" ] && [ -e vmlinux.initramfs ]; then
@@ -34,16 +42,18 @@ uboot_prep_kimage() {
34 linux_comp="${FIT_KERNEL_COMP_ALG}" 42 linux_comp="${FIT_KERNEL_COMP_ALG}"
35 fi 43 fi
36 44
37 [ -n "${vmlinux_path}" ] && ${KERNEL_OBJCOPY} -O binary -R .note -R .comment -S "${vmlinux_path}" linux.bin 45 [ -n "$vmlinux_path" ] && ${KERNEL_OBJCOPY} -O binary -R .note -R .comment -S "$vmlinux_path" "$linux_bin"
38 46
39 if [ "${linux_comp}" != "none" ] ; then 47 if [ "$linux_comp" != "none" ] ; then
40 if [ "${linux_comp}" = "gzip" ] ; then 48 if [ "$linux_comp" = "gzip" ] ; then
41 gzip -9 linux.bin 49 gzip -9 "$linux_bin"
42 elif [ "${linux_comp}" = "lzo" ] ; then 50 elif [ "$linux_comp" = "lzo" ] ; then
43 lzop -9 linux.bin 51 lzop -9 "$linux_bin"
52 elif [ "$linux_comp" = "lzma" ] ; then
53 xz --format=lzma -f -6 "$linux_bin"
44 fi 54 fi
45 mv -f "linux.bin${linux_suffix}" linux.bin 55 mv -f "$linux_bin$linux_suffix" "$linux_bin"
46 fi 56 fi
47 57
48 echo "${linux_comp}" 58 printf "$linux_comp" > "$output_dir/linux_comp"
49} 59}
diff --git a/meta/classes-recipe/kernel-uimage.bbclass b/meta/classes-recipe/kernel-uimage.bbclass
index 1a599e656c..e353232a0e 100644
--- a/meta/classes-recipe/kernel-uimage.bbclass
+++ b/meta/classes-recipe/kernel-uimage.bbclass
@@ -29,6 +29,7 @@ python __anonymous () {
29do_uboot_mkimage[dirs] += "${B}" 29do_uboot_mkimage[dirs] += "${B}"
30do_uboot_mkimage() { 30do_uboot_mkimage() {
31 uboot_prep_kimage 31 uboot_prep_kimage
32 linux_comp="$(cat linux_comp)"
32 33
33 ENTRYPOINT=${UBOOT_ENTRYPOINT} 34 ENTRYPOINT=${UBOOT_ENTRYPOINT}
34 if [ -n "${UBOOT_ENTRYSYMBOL}" ]; then 35 if [ -n "${UBOOT_ENTRYSYMBOL}" ]; then
@@ -36,6 +37,6 @@ do_uboot_mkimage() {
36 awk '$3=="${UBOOT_ENTRYSYMBOL}" {print "0x"$1;exit}'` 37 awk '$3=="${UBOOT_ENTRYSYMBOL}" {print "0x"$1;exit}'`
37 fi 38 fi
38 39
39 uboot-mkimage -A ${UBOOT_ARCH} -O linux -T ${UBOOT_MKIMAGE_KERNEL_TYPE} -C "${linux_comp}" -a ${UBOOT_LOADADDRESS} -e $ENTRYPOINT -n "${DISTRO_NAME}/${PV}/${MACHINE}" -d linux.bin ${B}/arch/${ARCH}/boot/uImage 40 uboot-mkimage -A ${UBOOT_ARCH} -O linux -T ${UBOOT_MKIMAGE_KERNEL_TYPE} -C "$linux_comp" -a ${UBOOT_LOADADDRESS} -e $ENTRYPOINT -n "${DISTRO_NAME}/${PV}/${MACHINE}" -d linux.bin ${B}/arch/${ARCH}/boot/uImage
40 rm -f linux.bin 41 rm -f linux.bin
41} 42}
diff --git a/meta/classes-recipe/kernel-yocto.bbclass b/meta/classes-recipe/kernel-yocto.bbclass
index 6468e8aa90..e53bf15194 100644
--- a/meta/classes-recipe/kernel-yocto.bbclass
+++ b/meta/classes-recipe/kernel-yocto.bbclass
@@ -25,6 +25,7 @@ KCONF_AUDIT_LEVEL ?= "1"
25KCONF_BSP_AUDIT_LEVEL ?= "0" 25KCONF_BSP_AUDIT_LEVEL ?= "0"
26KMETA_AUDIT ?= "yes" 26KMETA_AUDIT ?= "yes"
27KMETA_AUDIT_WERROR ?= "" 27KMETA_AUDIT_WERROR ?= ""
28KMETA_CONFIG_FEATURES ?= ""
28 29
29# returns local (absolute) path names for all valid patches in the 30# returns local (absolute) path names for all valid patches in the
30# src_uri 31# src_uri
@@ -62,8 +63,8 @@ def find_sccs(d):
62 63
63 return sources_list 64 return sources_list
64 65
65# check the SRC_URI for "kmeta" type'd git repositories. Return the name of 66# check the SRC_URI for "kmeta" type'd git repositories and directories. Return
66# the repository as it will be found in UNPACKDIR 67# the name of the repository or directory as it will be found in UNPACKDIR
67def find_kernel_feature_dirs(d): 68def find_kernel_feature_dirs(d):
68 feature_dirs=[] 69 feature_dirs=[]
69 fetch = bb.fetch2.Fetch([], d) 70 fetch = bb.fetch2.Fetch([], d)
@@ -71,13 +72,16 @@ def find_kernel_feature_dirs(d):
71 urldata = fetch.ud[url] 72 urldata = fetch.ud[url]
72 parm = urldata.parm 73 parm = urldata.parm
73 type="" 74 type=""
75 destdir = ""
74 if "type" in parm: 76 if "type" in parm:
75 type = parm["type"] 77 type = parm["type"]
76 if "destsuffix" in parm: 78 if "destsuffix" in parm:
77 destdir = parm["destsuffix"] 79 destdir = parm["destsuffix"]
78 if type == "kmeta": 80 elif urldata.type == "file":
79 feature_dirs.append(destdir) 81 destdir = urldata.basepath
80 82 if type == "kmeta" and destdir:
83 feature_dirs.append(destdir)
84
81 return feature_dirs 85 return feature_dirs
82 86
83# find the master/machine source branch. In the same way that the fetcher proceses 87# find the master/machine source branch. In the same way that the fetcher proceses
@@ -147,10 +151,6 @@ do_kernel_metadata() {
147 # from the source tree, into a common location and normalized "defconfig" name, 151 # from the source tree, into a common location and normalized "defconfig" name,
148 # where the rest of the process will include and incoroporate it into the build 152 # where the rest of the process will include and incoroporate it into the build
149 # 153 #
150 # If the fetcher has already placed a defconfig in UNPACKDIR (from the SRC_URI),
151 # we don't overwrite it, but instead warn the user that SRC_URI defconfigs take
152 # precendence.
153 #
154 if [ -n "${KBUILD_DEFCONFIG}" ]; then 154 if [ -n "${KBUILD_DEFCONFIG}" ]; then
155 if [ -f "${S}/arch/${ARCH}/configs/${KBUILD_DEFCONFIG}" ]; then 155 if [ -f "${S}/arch/${ARCH}/configs/${KBUILD_DEFCONFIG}" ]; then
156 if [ -f "${UNPACKDIR}/defconfig" ]; then 156 if [ -f "${UNPACKDIR}/defconfig" ]; then
@@ -158,12 +158,10 @@ do_kernel_metadata() {
158 # one already placed in UNPACKDIR 158 # one already placed in UNPACKDIR
159 cmp "${UNPACKDIR}/defconfig" "${S}/arch/${ARCH}/configs/${KBUILD_DEFCONFIG}" 159 cmp "${UNPACKDIR}/defconfig" "${S}/arch/${ARCH}/configs/${KBUILD_DEFCONFIG}"
160 if [ $? -ne 0 ]; then 160 if [ $? -ne 0 ]; then
161 bbdebug 1 "detected SRC_URI or unpatched defconfig in UNPACKDIR. ${KBUILD_DEFCONFIG} copied over it" 161 bbdebug 1 "detected SRC_URI or patched defconfig in UNPACKDIR. ${KBUILD_DEFCONFIG} copied over it"
162 fi 162 fi
163 cp -f ${S}/arch/${ARCH}/configs/${KBUILD_DEFCONFIG} ${UNPACKDIR}/defconfig
164 else
165 cp -f ${S}/arch/${ARCH}/configs/${KBUILD_DEFCONFIG} ${UNPACKDIR}/defconfig
166 fi 163 fi
164 cp -f ${S}/arch/${ARCH}/configs/${KBUILD_DEFCONFIG} ${UNPACKDIR}/defconfig
167 in_tree_defconfig="${UNPACKDIR}/defconfig" 165 in_tree_defconfig="${UNPACKDIR}/defconfig"
168 else 166 else
169 bbfatal "A KBUILD_DEFCONFIG '${KBUILD_DEFCONFIG}' was specified, but not present in the source tree (${S}/arch/${ARCH}/configs/)" 167 bbfatal "A KBUILD_DEFCONFIG '${KBUILD_DEFCONFIG}' was specified, but not present in the source tree (${S}/arch/${ARCH}/configs/)"
@@ -234,8 +232,6 @@ do_kernel_metadata() {
234 for f in ${feat_dirs}; do 232 for f in ${feat_dirs}; do
235 if [ -d "${UNPACKDIR}/$f/kernel-meta" ]; then 233 if [ -d "${UNPACKDIR}/$f/kernel-meta" ]; then
236 includes="$includes -I${UNPACKDIR}/$f/kernel-meta" 234 includes="$includes -I${UNPACKDIR}/$f/kernel-meta"
237 elif [ -d "${UNPACKDIR}/../oe-local-files/$f" ]; then
238 includes="$includes -I${UNPACKDIR}/../oe-local-files/$f"
239 elif [ -d "${UNPACKDIR}/$f" ]; then 235 elif [ -d "${UNPACKDIR}/$f" ]; then
240 includes="$includes -I${UNPACKDIR}/$f" 236 includes="$includes -I${UNPACKDIR}/$f"
241 fi 237 fi
@@ -250,6 +246,9 @@ do_kernel_metadata() {
250 fi 246 fi
251 done 247 done
252 248
249 # allow in-tree config fragments to be used in KERNEL_FEATURES
250 includes="$includes -I${S}/arch/${ARCH}/configs -I${S}/kernel/configs"
251
253 # expand kernel features into their full path equivalents 252 # expand kernel features into their full path equivalents
254 bsp_definition=$(spp ${includes} --find -DKMACHINE=${KMACHINE} -DKTYPE=${LINUX_KERNEL_TYPE}) 253 bsp_definition=$(spp ${includes} --find -DKMACHINE=${KMACHINE} -DKTYPE=${LINUX_KERNEL_TYPE})
255 if [ -z "$bsp_definition" ]; then 254 if [ -z "$bsp_definition" ]; then
@@ -270,6 +269,9 @@ do_kernel_metadata() {
270 KERNEL_FEATURES_FINAL="" 269 KERNEL_FEATURES_FINAL=""
271 if [ -n "${KERNEL_FEATURES}" ]; then 270 if [ -n "${KERNEL_FEATURES}" ]; then
272 for feature in ${KERNEL_FEATURES}; do 271 for feature in ${KERNEL_FEATURES}; do
272 feature_as_specified="$feature"
273 feature="$(echo $feature_as_specified | cut -d: -f1)"
274 feature_specifier="$(echo $feature_as_specified | cut -d: -f2)"
273 feature_found=f 275 feature_found=f
274 for d in $includes; do 276 for d in $includes; do
275 path_to_check=$(echo $d | sed 's/^-I//') 277 path_to_check=$(echo $d | sed 's/^-I//')
@@ -287,7 +289,7 @@ do_kernel_metadata() {
287 bbfatal_log "Set KERNEL_DANGLING_FEATURES_WARN_ONLY to ignore this issue" 289 bbfatal_log "Set KERNEL_DANGLING_FEATURES_WARN_ONLY to ignore this issue"
288 fi 290 fi
289 else 291 else
290 KERNEL_FEATURES_FINAL="$KERNEL_FEATURES_FINAL $feature" 292 KERNEL_FEATURES_FINAL="$KERNEL_FEATURES_FINAL $feature_as_specified"
291 fi 293 fi
292 done 294 done
293 fi 295 fi
@@ -297,7 +299,11 @@ do_kernel_metadata() {
297 elements="`echo -n ${bsp_definition} $sccs_defconfig ${sccs} ${patches} $KERNEL_FEATURES_FINAL`" 299 elements="`echo -n ${bsp_definition} $sccs_defconfig ${sccs} ${patches} $KERNEL_FEATURES_FINAL`"
298 if [ -n "${elements}" ]; then 300 if [ -n "${elements}" ]; then
299 echo "${bsp_definition}" > ${S}/${meta_dir}/bsp_definition 301 echo "${bsp_definition}" > ${S}/${meta_dir}/bsp_definition
300 scc --force -o ${S}/${meta_dir}:cfg,merge,meta ${includes} $sccs_defconfig $bsp_definition $sccs $patches $KERNEL_FEATURES_FINAL 302 echo "${KMETA_CONFIG_FEATURES}" | grep -q "prefer-modules"
303 if [ $? -eq 0 ]; then
304 scc_defines="-DMODULE_OR_Y=m"
305 fi
306 scc --force $scc_defines -o ${S}/${meta_dir}:cfg,merge,meta ${includes} $sccs_defconfig $bsp_definition $sccs $patches $KERNEL_FEATURES_FINAL
301 if [ $? -ne 0 ]; then 307 if [ $? -ne 0 ]; then
302 bbfatal_log "Could not generate configuration queue for ${KMACHINE}." 308 bbfatal_log "Could not generate configuration queue for ${KMACHINE}."
303 fi 309 fi
@@ -345,6 +351,9 @@ do_patch() {
345 cd ${S} 351 cd ${S}
346 352
347 check_git_config 353 check_git_config
354 if [ "${KERNEL_DEBUG_TIMESTAMPS}" != "1" ]; then
355 reproducible_git_committer_author
356 fi
348 meta_dir=$(kgit --meta) 357 meta_dir=$(kgit --meta)
349 (cd ${meta_dir}; ln -sf patch.queue series) 358 (cd ${meta_dir}; ln -sf patch.queue series)
350 if [ -f "${meta_dir}/series" ]; then 359 if [ -f "${meta_dir}/series" ]; then
@@ -379,19 +388,19 @@ do_kernel_checkout() {
379 set +e 388 set +e
380 389
381 source_dir=`echo ${S} | sed 's%/$%%'` 390 source_dir=`echo ${S} | sed 's%/$%%'`
382 source_workdir="${WORKDIR}/git" 391 source_unpackdir="${UNPACKDIR}/${BB_GIT_DEFAULT_DESTSUFFIX}"
383 if [ -d "${WORKDIR}/git/" ]; then 392 if [ -d "${source_unpackdir}" ]; then
384 # case: git repository 393 # case: git repository
385 # if S is WORKDIR/git, then we shouldn't be moving or deleting the tree. 394 # if S is UNPACKDIR/BB_GIT_DEFAULT_DESTSUFFIX, then we shouldn't be moving or deleting the tree.
386 if [ "${source_dir}" != "${source_workdir}" ]; then 395 if [ "${source_dir}" != "${source_unpackdir}" ]; then
387 if [ -d "${source_workdir}/.git" ]; then 396 if [ -d "${source_unpackdir}/.git" ]; then
388 # regular git repository with .git 397 # regular git repository with .git
389 rm -rf ${S} 398 rm -rf ${S}
390 mv ${WORKDIR}/git ${S} 399 mv ${source_unpackdir} ${S}
391 else 400 else
392 # create source for bare cloned git repository 401 # create source for bare cloned git repository
393 git clone ${WORKDIR}/git ${S} 402 git clone ${source_unpackdir} ${S}
394 rm -rf ${WORKDIR}/git 403 rm -rf ${source_unpackdir}
395 fi 404 fi
396 fi 405 fi
397 cd ${S} 406 cd ${S}
@@ -427,6 +436,9 @@ do_kernel_checkout() {
427 rm -f .gitignore 436 rm -f .gitignore
428 git init 437 git init
429 check_git_config 438 check_git_config
439 if [ "${KERNEL_DEBUG_TIMESTAMPS}" != "1" ]; then
440 reproducible_git_committer_author
441 fi
430 git add . 442 git add .
431 git commit -q -n -m "baseline commit: creating repo for ${PN}-${PV}" 443 git commit -q -n -m "baseline commit: creating repo for ${PN}-${PV}"
432 git clean -d -f 444 git clean -d -f
@@ -434,7 +446,7 @@ do_kernel_checkout() {
434 446
435 set -e 447 set -e
436} 448}
437do_kernel_checkout[dirs] = "${S} ${WORKDIR}" 449do_kernel_checkout[dirs] = "${S} ${UNPACKDIR}"
438 450
439addtask kernel_checkout before do_kernel_metadata after do_symlink_kernsrc 451addtask kernel_checkout before do_kernel_metadata after do_symlink_kernsrc
440addtask kernel_metadata after do_validate_branches do_unpack before do_patch 452addtask kernel_metadata after do_validate_branches do_unpack before do_patch
@@ -442,8 +454,13 @@ do_kernel_metadata[depends] = "kern-tools-native:do_populate_sysroot"
442do_kernel_metadata[file-checksums] = " ${@get_dirs_with_fragments(d)}" 454do_kernel_metadata[file-checksums] = " ${@get_dirs_with_fragments(d)}"
443do_validate_branches[depends] = "kern-tools-native:do_populate_sysroot" 455do_validate_branches[depends] = "kern-tools-native:do_populate_sysroot"
444 456
445do_kernel_configme[depends] += "virtual/${TARGET_PREFIX}binutils:do_populate_sysroot" 457# ${S} doesn't exist for us at unpack
446do_kernel_configme[depends] += "virtual/${TARGET_PREFIX}gcc:do_populate_sysroot" 458do_qa_unpack() {
459 return
460}
461
462do_kernel_configme[depends] += "virtual/cross-binutils:do_populate_sysroot"
463do_kernel_configme[depends] += "virtual/cross-cc:do_populate_sysroot"
447do_kernel_configme[depends] += "bc-native:do_populate_sysroot bison-native:do_populate_sysroot" 464do_kernel_configme[depends] += "bc-native:do_populate_sysroot bison-native:do_populate_sysroot"
448do_kernel_configme[depends] += "kern-tools-native:do_populate_sysroot" 465do_kernel_configme[depends] += "kern-tools-native:do_populate_sysroot"
449do_kernel_configme[dirs] += "${S} ${B}" 466do_kernel_configme[dirs] += "${S} ${B}"
@@ -559,6 +576,11 @@ python do_config_analysis() {
559python do_kernel_configcheck() { 576python do_kernel_configcheck() {
560 import re, string, sys, subprocess 577 import re, string, sys, subprocess
561 578
579 audit_flag = d.getVar( "KMETA_AUDIT" )
580 if not audit_flag:
581 bb.note( "kernel config audit disabled, skipping .." )
582 return
583
562 s = d.getVar('S') 584 s = d.getVar('S')
563 585
564 # if KMETA isn't set globally by a recipe using this routine, use kgit to 586 # if KMETA isn't set globally by a recipe using this routine, use kgit to
diff --git a/meta/classes-recipe/kernel.bbclass b/meta/classes-recipe/kernel.bbclass
index d6eedf942c..2d9943c8a0 100644
--- a/meta/classes-recipe/kernel.bbclass
+++ b/meta/classes-recipe/kernel.bbclass
@@ -12,7 +12,7 @@ KERNEL_PACKAGE_NAME ??= "kernel"
12KERNEL_DEPLOYSUBDIR ??= "${@ "" if (d.getVar("KERNEL_PACKAGE_NAME") == "kernel") else d.getVar("KERNEL_PACKAGE_NAME") }" 12KERNEL_DEPLOYSUBDIR ??= "${@ "" if (d.getVar("KERNEL_PACKAGE_NAME") == "kernel") else d.getVar("KERNEL_PACKAGE_NAME") }"
13 13
14PROVIDES += "virtual/kernel" 14PROVIDES += "virtual/kernel"
15DEPENDS += "virtual/${TARGET_PREFIX}binutils virtual/${TARGET_PREFIX}gcc kmod-native bc-native bison-native" 15DEPENDS += "virtual/cross-binutils virtual/cross-cc kmod-native bc-native bison-native"
16DEPENDS += "${@bb.utils.contains("INITRAMFS_FSTYPES", "cpio.lzo", "lzop-native", "", d)}" 16DEPENDS += "${@bb.utils.contains("INITRAMFS_FSTYPES", "cpio.lzo", "lzop-native", "", d)}"
17DEPENDS += "${@bb.utils.contains("INITRAMFS_FSTYPES", "cpio.lz4", "lz4-native", "", d)}" 17DEPENDS += "${@bb.utils.contains("INITRAMFS_FSTYPES", "cpio.lz4", "lz4-native", "", d)}"
18DEPENDS += "${@bb.utils.contains("INITRAMFS_FSTYPES", "cpio.zst", "zstd-native", "", d)}" 18DEPENDS += "${@bb.utils.contains("INITRAMFS_FSTYPES", "cpio.zst", "zstd-native", "", d)}"
@@ -21,7 +21,10 @@ PACKAGE_WRITE_DEPS += "depmodwrapper-cross"
21do_deploy[depends] += "depmodwrapper-cross:do_populate_sysroot gzip-native:do_populate_sysroot" 21do_deploy[depends] += "depmodwrapper-cross:do_populate_sysroot gzip-native:do_populate_sysroot"
22do_clean[depends] += "make-mod-scripts:do_clean" 22do_clean[depends] += "make-mod-scripts:do_clean"
23 23
24CVE_PRODUCT ?= "linux_kernel" 24# CPE entries from NVD use linux_kernel, but the raw CVE entries from the kernel CNA have
25# vendor: linux and product: linux. Note that multiple distributions use "linux" as a product
26# name, so we need to fill vendor to avoid false positives
27CVE_PRODUCT ?= "linux_kernel linux:linux"
25 28
26S = "${STAGING_KERNEL_DIR}" 29S = "${STAGING_KERNEL_DIR}"
27B = "${WORKDIR}/build" 30B = "${WORKDIR}/build"
@@ -81,6 +84,10 @@ python __anonymous () {
81 types = (alttype + ' ' + types).strip() 84 types = (alttype + ' ' + types).strip()
82 d.setVar('KERNEL_IMAGETYPES', types) 85 d.setVar('KERNEL_IMAGETYPES', types)
83 86
87 # Since kernel-fitimage.bbclass got replaced by kernel-fit-image.bbclass
88 if "fitImage" in types:
89 bb.error("fitImage is no longer supported as a KERNEL_IMAGETYPE(S). FIT images are built by the linux-yocto-fitimage recipe.")
90
84 # KERNEL_IMAGETYPES may contain a mixture of image types supported directly 91 # KERNEL_IMAGETYPES may contain a mixture of image types supported directly
85 # by the kernel build system and types which are created by post-processing 92 # by the kernel build system and types which are created by post-processing
86 # the output of the kernel build system (e.g. compressing vmlinux -> 93 # the output of the kernel build system (e.g. compressing vmlinux ->
@@ -115,7 +122,9 @@ python __anonymous () {
115 122
116 d.setVar('PKG:%s-image-%s' % (kname,typelower), '%s-image-%s-${KERNEL_VERSION_PKG_NAME}' % (kname, typelower)) 123 d.setVar('PKG:%s-image-%s' % (kname,typelower), '%s-image-%s-${KERNEL_VERSION_PKG_NAME}' % (kname, typelower))
117 d.setVar('ALLOW_EMPTY:%s-image-%s' % (kname, typelower), '1') 124 d.setVar('ALLOW_EMPTY:%s-image-%s' % (kname, typelower), '1')
118 d.prependVar('pkg_postinst:%s-image-%s' % (kname,typelower), """set +e 125
126 if d.getVar('KERNEL_IMAGETYPE_SYMLINK') == '1':
127 d.prependVar('pkg_postinst:%s-image-%s' % (kname,typelower), """set +e
119if [ -n "$D" ]; then 128if [ -n "$D" ]; then
120 ln -sf %s-${KERNEL_VERSION} $D/${KERNEL_IMAGEDEST}/%s > /dev/null 2>&1 129 ln -sf %s-${KERNEL_VERSION} $D/${KERNEL_IMAGEDEST}/%s > /dev/null 2>&1
121else 130else
@@ -127,7 +136,7 @@ else
127fi 136fi
128set -e 137set -e
129""" % (type, type, type, type, type, type, type)) 138""" % (type, type, type, type, type, type, type))
130 d.setVar('pkg_postrm:%s-image-%s' % (kname,typelower), """set +e 139 d.setVar('pkg_postrm:%s-image-%s' % (kname,typelower), """set +e
131if [ -f "${KERNEL_IMAGEDEST}/%s" -o -L "${KERNEL_IMAGEDEST}/%s" ]; then 140if [ -f "${KERNEL_IMAGEDEST}/%s" -o -L "${KERNEL_IMAGEDEST}/%s" ]; then
132 rm -f ${KERNEL_IMAGEDEST}/%s > /dev/null 2>&1 141 rm -f ${KERNEL_IMAGEDEST}/%s > /dev/null 2>&1
133fi 142fi
@@ -141,7 +150,7 @@ set -e
141 # standalone for use by wic and other tools. 150 # standalone for use by wic and other tools.
142 if image: 151 if image:
143 if d.getVar('INITRAMFS_MULTICONFIG'): 152 if d.getVar('INITRAMFS_MULTICONFIG'):
144 d.appendVarFlag('do_bundle_initramfs', 'mcdepends', ' mc::${INITRAMFS_MULTICONFIG}:${INITRAMFS_IMAGE}:do_image_complete') 153 d.appendVarFlag('do_bundle_initramfs', 'mcdepends', ' mc:${BB_CURRENT_MC}:${INITRAMFS_MULTICONFIG}:${INITRAMFS_IMAGE}:do_image_complete')
145 else: 154 else:
146 d.appendVarFlag('do_bundle_initramfs', 'depends', ' ${INITRAMFS_IMAGE}:do_image_complete') 155 d.appendVarFlag('do_bundle_initramfs', 'depends', ' ${INITRAMFS_IMAGE}:do_image_complete')
147 if image and bb.utils.to_boolean(d.getVar('INITRAMFS_IMAGE_BUNDLE')): 156 if image and bb.utils.to_boolean(d.getVar('INITRAMFS_IMAGE_BUNDLE')):
@@ -222,15 +231,13 @@ KERNEL_DTBVENDORED ?= "0"
222# 231#
223# configuration 232# configuration
224# 233#
225export CMDLINE_CONSOLE = "console=${@d.getVar("KERNEL_CONSOLE") or "ttyS0"}"
226
227KERNEL_VERSION = "${@get_kernelversion_headers('${B}')}" 234KERNEL_VERSION = "${@get_kernelversion_headers('${B}')}"
228 235
229# kernels are generally machine specific 236# kernels are generally machine specific
230PACKAGE_ARCH = "${MACHINE_ARCH}" 237PACKAGE_ARCH = "${MACHINE_ARCH}"
231 238
232# U-Boot support 239# U-Boot support
233UBOOT_ENTRYPOINT ?= "20008000" 240UBOOT_ENTRYPOINT ?= "0x20008000"
234UBOOT_LOADADDRESS ?= "${UBOOT_ENTRYPOINT}" 241UBOOT_LOADADDRESS ?= "${UBOOT_ENTRYPOINT}"
235 242
236# Some Linux kernel configurations need additional parameters on the command line 243# Some Linux kernel configurations need additional parameters on the command line
@@ -474,17 +481,10 @@ kernel_do_install() {
474 install -d ${D}/${KERNEL_IMAGEDEST} 481 install -d ${D}/${KERNEL_IMAGEDEST}
475 482
476 # 483 #
477 # When including an initramfs bundle inside a FIT image, the fitImage is created after the install task 484 # bundle_initramfs runs after do_install before do_deploy. do_deploy does what's needed therefore.
478 # by do_assemble_fitimage_initramfs.
479 # This happens after the generation of the initramfs bundle (done by do_bundle_initramfs).
480 # So, at the level of the install task we should not try to install the fitImage. fitImage is still not
481 # generated yet.
482 # After the generation of the fitImage, the deploy task copies the fitImage from the build directory to
483 # the deploy folder.
484 # 485 #
485
486 for imageType in ${KERNEL_IMAGETYPES} ; do 486 for imageType in ${KERNEL_IMAGETYPES} ; do
487 if [ $imageType != "fitImage" ] || [ "${INITRAMFS_IMAGE_BUNDLE}" != "1" ] ; then 487 if [ "${INITRAMFS_IMAGE_BUNDLE}" != "1" ] ; then
488 install -m 0644 ${KERNEL_OUTPUT_DIR}/$imageType ${D}/${KERNEL_IMAGEDEST}/$imageType-${KERNEL_VERSION} 488 install -m 0644 ${KERNEL_OUTPUT_DIR}/$imageType ${D}/${KERNEL_IMAGEDEST}/$imageType-${KERNEL_VERSION}
489 fi 489 fi
490 done 490 done
@@ -658,7 +658,7 @@ KERNEL_LOCALVERSION ??= ""
658# Note: This class saves the value of localversion to a file 658# Note: This class saves the value of localversion to a file
659# so other recipes like make-mod-scripts can restore it via the 659# so other recipes like make-mod-scripts can restore it via the
660# helper function get_kernellocalversion_file 660# helper function get_kernellocalversion_file
661export LOCALVERSION="${KERNEL_LOCALVERSION}" 661export LOCALVERSION = "${KERNEL_LOCALVERSION}"
662 662
663kernel_do_configure() { 663kernel_do_configure() {
664 # fixes extra + in /lib/modules/2.6.37+ 664 # fixes extra + in /lib/modules/2.6.37+
@@ -686,18 +686,8 @@ kernel_do_configure() {
686 ${KERNEL_CONFIG_COMMAND} 686 ${KERNEL_CONFIG_COMMAND}
687} 687}
688 688
689do_savedefconfig() {
690 bbplain "Saving defconfig to:\n${B}/defconfig"
691 oe_runmake -C ${B} savedefconfig
692}
693do_savedefconfig[nostamp] = "1"
694addtask savedefconfig after do_configure
695
696inherit cml1 pkgconfig 689inherit cml1 pkgconfig
697 690
698# Need LD, HOSTLDFLAGS and more for config operations
699KCONFIG_CONFIG_COMMAND:append = " ${EXTRA_OEMAKE}"
700
701EXPORT_FUNCTIONS do_compile do_transform_kernel do_transform_bundled_initramfs do_install do_configure 691EXPORT_FUNCTIONS do_compile do_transform_kernel do_transform_bundled_initramfs do_install do_configure
702 692
703# kernel-base becomes kernel-${KERNEL_VERSION} 693# kernel-base becomes kernel-${KERNEL_VERSION}
@@ -715,9 +705,10 @@ RDEPENDS:${KERNEL_PACKAGE_NAME} = "${KERNEL_PACKAGE_NAME}-base (= ${EXTENDPKGV})
715# not wanted in images as standard 705# not wanted in images as standard
716RRECOMMENDS:${KERNEL_PACKAGE_NAME}-base ?= "${KERNEL_PACKAGE_NAME}-image (= ${EXTENDPKGV})" 706RRECOMMENDS:${KERNEL_PACKAGE_NAME}-base ?= "${KERNEL_PACKAGE_NAME}-image (= ${EXTENDPKGV})"
717PKG:${KERNEL_PACKAGE_NAME}-image = "${KERNEL_PACKAGE_NAME}-image-${@legitimize_package_name(d.getVar('KERNEL_VERSION'))}" 707PKG:${KERNEL_PACKAGE_NAME}-image = "${KERNEL_PACKAGE_NAME}-image-${@legitimize_package_name(d.getVar('KERNEL_VERSION'))}"
708RPROVIDES:${KERNEL_PACKAGE_NAME}-image += "${KERNEL_PACKAGE_NAME}-image"
718RDEPENDS:${KERNEL_PACKAGE_NAME}-image += "${@oe.utils.conditional('KERNEL_IMAGETYPE', 'vmlinux', '${KERNEL_PACKAGE_NAME}-vmlinux (= ${EXTENDPKGV})', '', d)}" 709RDEPENDS:${KERNEL_PACKAGE_NAME}-image += "${@oe.utils.conditional('KERNEL_IMAGETYPE', 'vmlinux', '${KERNEL_PACKAGE_NAME}-vmlinux (= ${EXTENDPKGV})', '', d)}"
719PKG:${KERNEL_PACKAGE_NAME}-base = "${KERNEL_PACKAGE_NAME}-${@legitimize_package_name(d.getVar('KERNEL_VERSION'))}" 710PKG:${KERNEL_PACKAGE_NAME}-base = "${KERNEL_PACKAGE_NAME}-${@legitimize_package_name(d.getVar('KERNEL_VERSION'))}"
720RPROVIDES:${KERNEL_PACKAGE_NAME}-base += "${KERNEL_PACKAGE_NAME}-${KERNEL_VERSION}" 711RPROVIDES:${KERNEL_PACKAGE_NAME}-base += "${KERNEL_PACKAGE_NAME}-${KERNEL_VERSION} ${KERNEL_PACKAGE_NAME}-base"
721ALLOW_EMPTY:${KERNEL_PACKAGE_NAME} = "1" 712ALLOW_EMPTY:${KERNEL_PACKAGE_NAME} = "1"
722ALLOW_EMPTY:${KERNEL_PACKAGE_NAME}-base = "1" 713ALLOW_EMPTY:${KERNEL_PACKAGE_NAME}-base = "1"
723ALLOW_EMPTY:${KERNEL_PACKAGE_NAME}-image = "1" 714ALLOW_EMPTY:${KERNEL_PACKAGE_NAME}-image = "1"
@@ -851,9 +842,6 @@ kernel_do_deploy() {
851 842
852 if [ ! -z "${INITRAMFS_IMAGE}" -a x"${INITRAMFS_IMAGE_BUNDLE}" = x1 ]; then 843 if [ ! -z "${INITRAMFS_IMAGE}" -a x"${INITRAMFS_IMAGE_BUNDLE}" = x1 ]; then
853 for imageType in ${KERNEL_IMAGETYPES} ; do 844 for imageType in ${KERNEL_IMAGETYPES} ; do
854 if [ "$imageType" = "fitImage" ] ; then
855 continue
856 fi
857 initramfsBaseName=$imageType-${INITRAMFS_NAME} 845 initramfsBaseName=$imageType-${INITRAMFS_NAME}
858 install -m 0644 ${KERNEL_OUTPUT_DIR}/$imageType.initramfs $deployDir/$initramfsBaseName${KERNEL_IMAGE_BIN_EXT} 846 install -m 0644 ${KERNEL_OUTPUT_DIR}/$imageType.initramfs $deployDir/$initramfsBaseName${KERNEL_IMAGE_BIN_EXT}
859 if [ -n "${INITRAMFS_LINK_NAME}" ] ; then 847 if [ -n "${INITRAMFS_LINK_NAME}" ] ; then
diff --git a/meta/classes-recipe/kernelsrc.bbclass b/meta/classes-recipe/kernelsrc.bbclass
index ecb02dc9ed..9336184298 100644
--- a/meta/classes-recipe/kernelsrc.bbclass
+++ b/meta/classes-recipe/kernelsrc.bbclass
@@ -15,3 +15,7 @@ LOCAL_VERSION = "${@get_kernellocalversion_file("${STAGING_KERNEL_BUILDDIR}")}"
15 15
16inherit linux-kernel-base 16inherit linux-kernel-base
17 17
18# The final packages get the kernel version instead of the default 1.0
19python do_package:prepend() {
20 d.setVar('PKGV', d.getVar("KERNEL_VERSION").split("-")[0])
21}
diff --git a/meta/classes-recipe/license_image.bbclass b/meta/classes-recipe/license_image.bbclass
index 19b3dc55ba..d2c5ab902c 100644
--- a/meta/classes-recipe/license_image.bbclass
+++ b/meta/classes-recipe/license_image.bbclass
@@ -58,7 +58,7 @@ def write_license_files(d, license_manifest, pkg_dic, rootfs=True):
58 import stat 58 import stat
59 59
60 bad_licenses = (d.getVar("INCOMPATIBLE_LICENSE") or "").split() 60 bad_licenses = (d.getVar("INCOMPATIBLE_LICENSE") or "").split()
61 bad_licenses = expand_wildcard_licenses(d, bad_licenses) 61 bad_licenses = oe.license.expand_wildcard_licenses(d, bad_licenses)
62 pkgarchs = d.getVar("SSTATE_ARCHS").split() 62 pkgarchs = d.getVar("SSTATE_ARCHS").split()
63 pkgarchs.reverse() 63 pkgarchs.reverse()
64 64
@@ -66,17 +66,17 @@ def write_license_files(d, license_manifest, pkg_dic, rootfs=True):
66 with open(license_manifest, "w") as license_file: 66 with open(license_manifest, "w") as license_file:
67 for pkg in sorted(pkg_dic): 67 for pkg in sorted(pkg_dic):
68 remaining_bad_licenses = oe.license.apply_pkg_license_exception(pkg, bad_licenses, exceptions) 68 remaining_bad_licenses = oe.license.apply_pkg_license_exception(pkg, bad_licenses, exceptions)
69 incompatible_licenses = incompatible_pkg_license(d, remaining_bad_licenses, pkg_dic[pkg]["LICENSE"]) 69 incompatible_licenses = oe.license.incompatible_pkg_license(d, remaining_bad_licenses, pkg_dic[pkg]["LICENSE"])
70 if incompatible_licenses: 70 if incompatible_licenses:
71 bb.fatal("Package %s cannot be installed into the image because it has incompatible license(s): %s" %(pkg, ' '.join(incompatible_licenses))) 71 bb.fatal("Package %s cannot be installed into the image because it has incompatible license(s): %s" %(pkg, ' '.join(incompatible_licenses)))
72 else: 72 else:
73 incompatible_licenses = incompatible_pkg_license(d, bad_licenses, pkg_dic[pkg]["LICENSE"]) 73 incompatible_licenses = oe.license.incompatible_pkg_license(d, bad_licenses, pkg_dic[pkg]["LICENSE"])
74 if incompatible_licenses: 74 if incompatible_licenses:
75 oe.qa.handle_error('license-incompatible', "Including %s with incompatible license(s) %s into the image, because it has been allowed by exception list." %(pkg, ' '.join(incompatible_licenses)), d) 75 oe.qa.handle_error('license-exception', "Including %s with incompatible license(s) %s into the image, because it has been allowed by exception list." %(pkg, ' '.join(incompatible_licenses)), d)
76 try: 76 try:
77 (pkg_dic[pkg]["LICENSE"], pkg_dic[pkg]["LICENSES"]) = \ 77 (pkg_dic[pkg]["LICENSE"], pkg_dic[pkg]["LICENSES"]) = \
78 oe.license.manifest_licenses(pkg_dic[pkg]["LICENSE"], 78 oe.license.manifest_licenses(pkg_dic[pkg]["LICENSE"],
79 remaining_bad_licenses, canonical_license, d) 79 remaining_bad_licenses, oe.license.canonical_license, d)
80 except oe.license.LicenseError as exc: 80 except oe.license.LicenseError as exc:
81 bb.fatal('%s: %s' % (d.getVar('P'), exc)) 81 bb.fatal('%s: %s' % (d.getVar('P'), exc))
82 82
@@ -144,7 +144,7 @@ def write_license_files(d, license_manifest, pkg_dic, rootfs=True):
144 if not os.path.exists(pkg_license_dir ): 144 if not os.path.exists(pkg_license_dir ):
145 bb.fatal("Couldn't find license information for dependency %s" % pkg) 145 bb.fatal("Couldn't find license information for dependency %s" % pkg)
146 146
147 pkg_manifest_licenses = [canonical_license(d, lic) \ 147 pkg_manifest_licenses = [oe.license.canonical_license(d, lic) \
148 for lic in pkg_dic[pkg]["LICENSES"]] 148 for lic in pkg_dic[pkg]["LICENSES"]]
149 149
150 licenses = os.listdir(pkg_license_dir) 150 licenses = os.listdir(pkg_license_dir)
@@ -153,7 +153,7 @@ def write_license_files(d, license_manifest, pkg_dic, rootfs=True):
153 pkg_rootfs_license = os.path.join(pkg_rootfs_license_dir, lic) 153 pkg_rootfs_license = os.path.join(pkg_rootfs_license_dir, lic)
154 154
155 if re.match(r"^generic_.*$", lic): 155 if re.match(r"^generic_.*$", lic):
156 generic_lic = canonical_license(d, 156 generic_lic = oe.license.canonical_license(d,
157 re.search(r"^generic_(.*)$", lic).group(1)) 157 re.search(r"^generic_(.*)$", lic).group(1))
158 158
159 # Do not copy generic license into package if isn't 159 # Do not copy generic license into package if isn't
@@ -176,7 +176,7 @@ def write_license_files(d, license_manifest, pkg_dic, rootfs=True):
176 if not os.path.exists(pkg_rootfs_license): 176 if not os.path.exists(pkg_rootfs_license):
177 os.symlink(os.path.join('..', generic_lic_file), pkg_rootfs_license) 177 os.symlink(os.path.join('..', generic_lic_file), pkg_rootfs_license)
178 else: 178 else:
179 if (oe.license.license_ok(canonical_license(d, 179 if (oe.license.license_ok(oe.license.canonical_license(d,
180 lic), bad_licenses) == False or 180 lic), bad_licenses) == False or
181 os.path.exists(pkg_rootfs_license)): 181 os.path.exists(pkg_rootfs_license)):
182 continue 182 continue
diff --git a/meta/classes-recipe/linuxloader.bbclass b/meta/classes-recipe/linuxloader.bbclass
index 2ea1b62254..a2e8f9837b 100644
--- a/meta/classes-recipe/linuxloader.bbclass
+++ b/meta/classes-recipe/linuxloader.bbclass
@@ -57,7 +57,7 @@ def get_glibc_loader(d):
57 elif re.search("i.86", targetarch): 57 elif re.search("i.86", targetarch):
58 dynamic_loader = "${base_libdir}/ld-linux.so.2" 58 dynamic_loader = "${base_libdir}/ld-linux.so.2"
59 elif targetarch == "arm": 59 elif targetarch == "arm":
60 dynamic_loader = "${base_libdir}/ld-linux${@['-armhf', ''][d.getVar('TARGET_FPU') == 'soft']}.so.3" 60 dynamic_loader = "${base_libdir}/ld-linux${@['', '-armhf'][d.getVar('TARGET_FPU') == 'hard']}.so.3"
61 elif targetarch.startswith("aarch64"): 61 elif targetarch.startswith("aarch64"):
62 dynamic_loader = "${base_libdir}/ld-linux-aarch64${ARMPKGSFX_ENDIAN_64}.so.1" 62 dynamic_loader = "${base_libdir}/ld-linux-aarch64${ARMPKGSFX_ENDIAN_64}.so.1"
63 elif targetarch.startswith("riscv64"): 63 elif targetarch.startswith("riscv64"):
diff --git a/meta/classes-recipe/manpages.bbclass b/meta/classes-recipe/manpages.bbclass
index e9ca2f895b..f3d034b046 100644
--- a/meta/classes-recipe/manpages.bbclass
+++ b/meta/classes-recipe/manpages.bbclass
@@ -10,7 +10,7 @@
10# by default. 10# by default.
11PACKAGECONFIG:append:class-target = " ${@bb.utils.contains('DISTRO_FEATURES', 'api-documentation', 'manpages', '', d)}" 11PACKAGECONFIG:append:class-target = " ${@bb.utils.contains('DISTRO_FEATURES', 'api-documentation', 'manpages', '', d)}"
12 12
13inherit qemu 13PACKAGE_WRITE_DEPS += "${@bb.utils.contains('DISTRO_FEATURES', 'api-documentation', 'qemuwrapper-cross', '', d)}"
14 14
15# usually manual files are packaged to ${PN}-doc except man-pages 15# usually manual files are packaged to ${PN}-doc except man-pages
16MAN_PKG ?= "${PN}-doc" 16MAN_PKG ?= "${PN}-doc"
diff --git a/meta/classes-recipe/meson.bbclass b/meta/classes-recipe/meson.bbclass
index 03fa2c06eb..c8b3e1ec29 100644
--- a/meta/classes-recipe/meson.bbclass
+++ b/meta/classes-recipe/meson.bbclass
@@ -9,7 +9,6 @@ inherit python3native meson-routines qemu
9DEPENDS:append = " meson-native ninja-native" 9DEPENDS:append = " meson-native ninja-native"
10 10
11EXEWRAPPER_ENABLED:class-native = "False" 11EXEWRAPPER_ENABLED:class-native = "False"
12EXEWRAPPER_ENABLED:class-nativesdk = "False"
13EXEWRAPPER_ENABLED ?= "${@bb.utils.contains('MACHINE_FEATURES', 'qemu-usermode', 'True', 'False', d)}" 12EXEWRAPPER_ENABLED ?= "${@bb.utils.contains('MACHINE_FEATURES', 'qemu-usermode', 'True', 'False', d)}"
14DEPENDS:append = "${@' qemu-native' if d.getVar('EXEWRAPPER_ENABLED') == 'True' else ''}" 13DEPENDS:append = "${@' qemu-native' if d.getVar('EXEWRAPPER_ENABLED') == 'True' else ''}"
15 14
@@ -23,6 +22,9 @@ MESON_SOURCEPATH = "${S}"
23# The target to build in do_compile. If unset the default targets are built. 22# The target to build in do_compile. If unset the default targets are built.
24MESON_TARGET ?= "" 23MESON_TARGET ?= ""
25 24
25# Since 0.60.0 you can specify custom tags to install
26MESON_INSTALL_TAGS ?= ""
27
26def noprefix(var, d): 28def noprefix(var, d):
27 return d.getVar(var).replace(d.getVar('prefix') + '/', '', 1) 29 return d.getVar(var).replace(d.getVar('prefix') + '/', '', 1)
28 30
@@ -60,6 +62,14 @@ def rust_tool(d, target_var):
60 cmd = [rustc, "--target", d.getVar(target_var)] + d.getVar("RUSTFLAGS").split() 62 cmd = [rustc, "--target", d.getVar(target_var)] + d.getVar("RUSTFLAGS").split()
61 return "rust = %s" % repr(cmd) 63 return "rust = %s" % repr(cmd)
62 64
65def bindgen_args(d):
66 args = '${HOST_CC_ARCH}${TOOLCHAIN_OPTIONS} --target=${TARGET_SYS}'
67 # For SDK packages TOOLCHAIN_OPTIONS don't contain full sysroot path
68 if bb.data.inherits_class("nativesdk", d):
69 args += ' --sysroot=${STAGING_DIR_HOST}${SDKPATHNATIVE}${prefix_nativesdk}'
70 items = d.expand(args).split()
71 return repr(items[0] if len(items) == 1 else items)
72
63addtask write_config before do_configure 73addtask write_config before do_configure
64do_write_config[vardeps] += "CC CXX AR NM STRIP READELF OBJCOPY CFLAGS CXXFLAGS LDFLAGS RUSTC RUSTFLAGS EXEWRAPPER_ENABLED" 74do_write_config[vardeps] += "CC CXX AR NM STRIP READELF OBJCOPY CFLAGS CXXFLAGS LDFLAGS RUSTC RUSTFLAGS EXEWRAPPER_ENABLED"
65do_write_config() { 75do_write_config() {
@@ -91,6 +101,7 @@ cpp_link_args = ${@meson_array('LDFLAGS', d)}
91[properties] 101[properties]
92needs_exe_wrapper = true 102needs_exe_wrapper = true
93sys_root = '${STAGING_DIR_HOST}' 103sys_root = '${STAGING_DIR_HOST}'
104bindgen_clang_arguments = ${@bindgen_args(d)}
94 105
95[host_machine] 106[host_machine]
96system = '${@meson_operating_system('HOST_OS', d)}' 107system = '${@meson_operating_system('HOST_OS', d)}'
@@ -127,7 +138,7 @@ cpp_link_args = ${@meson_array('BUILD_LDFLAGS', d)}
127EOF 138EOF
128} 139}
129 140
130do_write_config:append:class-target() { 141write_qemuwrapper() {
131 # Write out a qemu wrapper that will be used as exe_wrapper so that meson 142 # Write out a qemu wrapper that will be used as exe_wrapper so that meson
132 # can run target helper binaries through that. 143 # can run target helper binaries through that.
133 qemu_binary="${@qemu_wrapper_cmdline(d, '$STAGING_DIR_HOST', ['$STAGING_DIR_HOST/${libdir}','$STAGING_DIR_HOST/${base_libdir}'])}" 144 qemu_binary="${@qemu_wrapper_cmdline(d, '$STAGING_DIR_HOST', ['$STAGING_DIR_HOST/${libdir}','$STAGING_DIR_HOST/${base_libdir}'])}"
@@ -145,6 +156,14 @@ EOF
145 chmod +x ${WORKDIR}/meson-qemuwrapper 156 chmod +x ${WORKDIR}/meson-qemuwrapper
146} 157}
147 158
159do_write_config:append:class-target() {
160 write_qemuwrapper
161}
162
163do_write_config:append:class-nativesdk() {
164 write_qemuwrapper
165}
166
148# Tell externalsrc that changes to this file require a reconfigure 167# Tell externalsrc that changes to this file require a reconfigure
149CONFIGURE_FILES = "meson.build" 168CONFIGURE_FILES = "meson.build"
150 169
@@ -175,7 +194,10 @@ meson_do_compile() {
175} 194}
176 195
177meson_do_install() { 196meson_do_install() {
178 meson install --destdir ${D} --no-rebuild 197 if [ "x${MESON_INSTALL_TAGS}" != "x" ] ; then
198 meson_install_tags="--tags ${MESON_INSTALL_TAGS}"
199 fi
200 meson install --destdir ${D} --no-rebuild $meson_install_tags
179} 201}
180 202
181EXPORT_FUNCTIONS do_configure do_compile do_install 203EXPORT_FUNCTIONS do_configure do_compile do_install
diff --git a/meta/classes-recipe/module.bbclass b/meta/classes-recipe/module.bbclass
index f2f0b25a2d..4948e995c5 100644
--- a/meta/classes-recipe/module.bbclass
+++ b/meta/classes-recipe/module.bbclass
@@ -65,6 +65,7 @@ module_do_install() {
65 CC="${KERNEL_CC}" LD="${KERNEL_LD}" OBJCOPY="${KERNEL_OBJCOPY}" \ 65 CC="${KERNEL_CC}" LD="${KERNEL_LD}" OBJCOPY="${KERNEL_OBJCOPY}" \
66 STRIP="${KERNEL_STRIP}" \ 66 STRIP="${KERNEL_STRIP}" \
67 O=${STAGING_KERNEL_BUILDDIR} \ 67 O=${STAGING_KERNEL_BUILDDIR} \
68 KBUILD_EXTRA_SYMBOLS="${KBUILD_EXTRA_SYMBOLS}" \
68 ${MODULES_INSTALL_TARGET} 69 ${MODULES_INSTALL_TARGET}
69 70
70 if [ ! -e "${B}/${MODULES_MODULE_SYMVERS_LOCATION}/Module.symvers" ] ; then 71 if [ ! -e "${B}/${MODULES_MODULE_SYMVERS_LOCATION}/Module.symvers" ] ; then
diff --git a/meta/classes-recipe/multilib_script.bbclass b/meta/classes-recipe/multilib_script.bbclass
index e6f0249529..a7a08930b7 100644
--- a/meta/classes-recipe/multilib_script.bbclass
+++ b/meta/classes-recipe/multilib_script.bbclass
@@ -28,14 +28,12 @@ python () {
28 if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross', d): 28 if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross', d):
29 return 29 return
30 30
31 for entry in (d.getVar("MULTILIB_SCRIPTS", False) or "").split(): 31 for entry in (d.getVar("MULTILIB_SCRIPTS") or "").split():
32 pkg, script = entry.split(":") 32 pkg, script = entry.split(":", 1)
33 epkg = d.expand(pkg) 33 scriptname = os.path.basename(script)
34 escript = d.expand(script) 34 d.appendVar("ALTERNATIVE:" + pkg, " " + scriptname + " ")
35 scriptname = os.path.basename(escript) 35 d.setVarFlag("ALTERNATIVE_LINK_NAME", scriptname, script)
36 d.appendVar("ALTERNATIVE:" + epkg, " " + scriptname + " ") 36 d.setVarFlag("ALTERNATIVE_TARGET", scriptname, script + "-${MULTILIB_SUFFIX}")
37 d.setVarFlag("ALTERNATIVE_LINK_NAME", scriptname, escript) 37 d.appendVar("multilibscript_rename", "\n mv ${PKGD}" + script + " ${PKGD}" + script + "-${MULTILIB_SUFFIX}")
38 d.setVarFlag("ALTERNATIVE_TARGET", scriptname, escript + "-${MULTILIB_SUFFIX}") 38 d.appendVar("FILES:" + pkg, " " + script + "-${MULTILIB_SUFFIX}")
39 d.appendVar("multilibscript_rename", "\n mv ${PKGD}" + escript + " ${PKGD}" + escript + "-${MULTILIB_SUFFIX}")
40 d.appendVar("FILES:" + epkg, " " + escript + "-${MULTILIB_SUFFIX}")
41} 39}
diff --git a/meta/classes-recipe/native.bbclass b/meta/classes-recipe/native.bbclass
index 84a3ec65da..625975a694 100644
--- a/meta/classes-recipe/native.bbclass
+++ b/meta/classes-recipe/native.bbclass
@@ -40,11 +40,6 @@ HOST_CC_ARCH = "${BUILD_CC_ARCH}"
40HOST_LD_ARCH = "${BUILD_LD_ARCH}" 40HOST_LD_ARCH = "${BUILD_LD_ARCH}"
41HOST_AS_ARCH = "${BUILD_AS_ARCH}" 41HOST_AS_ARCH = "${BUILD_AS_ARCH}"
42 42
43CPPFLAGS = "${BUILD_CPPFLAGS}"
44CFLAGS = "${BUILD_CFLAGS}"
45CXXFLAGS = "${BUILD_CXXFLAGS}"
46LDFLAGS = "${BUILD_LDFLAGS}"
47
48STAGING_BINDIR = "${STAGING_BINDIR_NATIVE}" 43STAGING_BINDIR = "${STAGING_BINDIR_NATIVE}"
49STAGING_BINDIR_CROSS = "${STAGING_BINDIR_NATIVE}" 44STAGING_BINDIR_CROSS = "${STAGING_BINDIR_NATIVE}"
50 45
@@ -58,17 +53,20 @@ PTEST_ENABLED = "0"
58export CONFIG_SITE = "${COREBASE}/meta/site/native" 53export CONFIG_SITE = "${COREBASE}/meta/site/native"
59 54
60# set the compiler as well. It could have been set to something else 55# set the compiler as well. It could have been set to something else
61export CC = "${BUILD_CC}" 56CC = "${BUILD_CC}"
62export CXX = "${BUILD_CXX}" 57CXX = "${BUILD_CXX}"
63export FC = "${BUILD_FC}" 58FC = "${BUILD_FC}"
64export CPP = "${BUILD_CPP}" 59CPP = "${BUILD_CPP}"
65export LD = "${BUILD_LD}" 60LD = "${BUILD_LD}"
66export CCLD = "${BUILD_CCLD}" 61CCLD = "${BUILD_CCLD}"
67export AR = "${BUILD_AR}" 62AR = "${BUILD_AR}"
68export AS = "${BUILD_AS}" 63AS = "${BUILD_AS}"
69export RANLIB = "${BUILD_RANLIB}" 64RANLIB = "${BUILD_RANLIB}"
70export STRIP = "${BUILD_STRIP}" 65STRIP = "${BUILD_STRIP}"
71export NM = "${BUILD_NM}" 66NM = "${BUILD_NM}"
67OBJCOPY = "${BUILD_OBJCOPY}"
68OBJDUMP = "${BUILD_OBJDUMP}"
69READELF = "${BUILD_READELF}"
72 70
73# Path prefixes 71# Path prefixes
74base_prefix = "${STAGING_DIR_NATIVE}" 72base_prefix = "${STAGING_DIR_NATIVE}"
@@ -124,6 +122,7 @@ SSTATE_SCAN_CMD ?= "${SSTATE_SCAN_CMD_NATIVE}"
124INHIBIT_SYSROOT_STRIP ?= "${@oe.utils.vartrue('DEBUG_BUILD', '1', '', d)}" 122INHIBIT_SYSROOT_STRIP ?= "${@oe.utils.vartrue('DEBUG_BUILD', '1', '', d)}"
125 123
126python native_virtclass_handler () { 124python native_virtclass_handler () {
125 import re
127 pn = e.data.getVar("PN") 126 pn = e.data.getVar("PN")
128 if not pn.endswith("-native"): 127 if not pn.endswith("-native"):
129 return 128 return
@@ -163,10 +162,20 @@ python native_virtclass_handler () {
163 newdeps.append(dep.replace(pn, bpn) + "-native") 162 newdeps.append(dep.replace(pn, bpn) + "-native")
164 else: 163 else:
165 newdeps.append(dep) 164 newdeps.append(dep)
166 d.setVar(varname, " ".join(newdeps)) 165 output_varname = varname
166 # Handle ${PN}-xxx -> ${BPN}-xxx-native
167 if suffix != "${PN}" and "${PN}" in suffix:
168 output_varname = varname.replace("${PN}", "${BPN}") + "-native"
169 d.renameVar(varname, output_varname)
170 d.setVar(output_varname, " ".join(newdeps))
167 171
168 map_dependencies("DEPENDS", e.data, selfref=False) 172 map_dependencies("DEPENDS", e.data, selfref=False)
169 for pkg in e.data.getVar("PACKAGES", False).split(): 173 # We need to handle things like ${@bb.utils.contains('PTEST_ENABLED', '1', '${PN}-ptest', '', d)}
174 # and not pass ${PN}-test since in the native case it would be ignored. This does mean we ignore
175 # anonymous python derived PACKAGES entries.
176 for pkg in re.split(r"\${@(?:{.*?}|.)+?}|\s", d.getVar("PACKAGES", False)):
177 if not pkg:
178 continue
170 map_dependencies("RDEPENDS", e.data, pkg) 179 map_dependencies("RDEPENDS", e.data, pkg)
171 map_dependencies("RRECOMMENDS", e.data, pkg) 180 map_dependencies("RRECOMMENDS", e.data, pkg)
172 map_dependencies("RSUGGESTS", e.data, pkg) 181 map_dependencies("RSUGGESTS", e.data, pkg)
diff --git a/meta/classes-recipe/nativesdk.bbclass b/meta/classes-recipe/nativesdk.bbclass
index de6debda93..7ecb4c12c1 100644
--- a/meta/classes-recipe/nativesdk.bbclass
+++ b/meta/classes-recipe/nativesdk.bbclass
@@ -32,6 +32,7 @@ RECIPE_SYSROOT = "${WORKDIR}/recipe-sysroot"
32# 32#
33PACKAGE_ARCH = "${SDK_ARCH}-${SDKPKGSUFFIX}" 33PACKAGE_ARCH = "${SDK_ARCH}-${SDKPKGSUFFIX}"
34PACKAGE_ARCHS = "${SDK_PACKAGE_ARCHS}" 34PACKAGE_ARCHS = "${SDK_PACKAGE_ARCHS}"
35TUNE_PKGARCH = "${SDK_ARCH}"
35 36
36# 37#
37# We need chrpath >= 0.14 to ensure we can deal with 32 and 64 bit 38# We need chrpath >= 0.14 to ensure we can deal with 32 and 64 bit
@@ -66,11 +67,6 @@ TARGET_FPU = ""
66EXTRA_OECONF_GCC_FLOAT = "" 67EXTRA_OECONF_GCC_FLOAT = ""
67TUNE_FEATURES = "" 68TUNE_FEATURES = ""
68 69
69CPPFLAGS = "${BUILDSDK_CPPFLAGS}"
70CFLAGS = "${BUILDSDK_CFLAGS}"
71CXXFLAGS = "${BUILDSDK_CXXFLAGS}"
72LDFLAGS = "${BUILDSDK_LDFLAGS}"
73
74# Change to place files in SDKPATH 70# Change to place files in SDKPATH
75base_prefix = "${SDKPATHNATIVE}" 71base_prefix = "${SDKPATHNATIVE}"
76prefix = "${SDKPATHNATIVE}${prefix_nativesdk}" 72prefix = "${SDKPATHNATIVE}${prefix_nativesdk}"
@@ -108,6 +104,7 @@ python () {
108 clsextend.rename_package_variables((d.getVar("PACKAGEVARS") or "").split()) 104 clsextend.rename_package_variables((d.getVar("PACKAGEVARS") or "").split())
109 105
110 clsextend.map_depends_variable("DEPENDS") 106 clsextend.map_depends_variable("DEPENDS")
107 clsextend.map_depends_variable("PACKAGE_WRITE_DEPS")
111 clsextend.map_packagevars() 108 clsextend.map_packagevars()
112 clsextend.map_variable("PROVIDES") 109 clsextend.map_variable("PROVIDES")
113 clsextend.map_regexp_variable("PACKAGES_DYNAMIC") 110 clsextend.map_regexp_variable("PACKAGES_DYNAMIC")
diff --git a/meta/classes-recipe/nospdx.bbclass b/meta/classes-recipe/nospdx.bbclass
new file mode 100644
index 0000000000..b20e28218b
--- /dev/null
+++ b/meta/classes-recipe/nospdx.bbclass
@@ -0,0 +1,13 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7deltask do_collect_spdx_deps
8deltask do_create_spdx
9deltask do_create_spdx_runtime
10deltask do_create_package_spdx
11deltask do_create_rootfs_spdx
12deltask do_create_image_spdx
13deltask do_create_image_sbom
diff --git a/meta/classes-recipe/npm.bbclass b/meta/classes-recipe/npm.bbclass
index 91da3295f2..344e8b4bec 100644
--- a/meta/classes-recipe/npm.bbclass
+++ b/meta/classes-recipe/npm.bbclass
@@ -72,8 +72,10 @@ def npm_pack(env, srcdir, workdir):
72 j = json.load(f) 72 j = json.load(f)
73 73
74 # base does not really matter and is for documentation purposes 74 # base does not really matter and is for documentation purposes
75 # only. But the 'version' part must exist because other parts of 75 # only. But the 'version' part must exist because other parts of
76 # the bbclass rely on it. 76 # the bbclass rely on it.
77 if 'version' not in j:
78 j['version'] = '0.0.0-unknown'
77 base = j['name'].split('/')[-1] 79 base = j['name'].split('/')[-1]
78 tarball = os.path.join(workdir, "%s-%s.tgz" % (base, j['version'])); 80 tarball = os.path.join(workdir, "%s-%s.tgz" % (base, j['version']));
79 81
@@ -152,6 +154,9 @@ python npm_do_configure() {
152 has_shrinkwrap_file = False 154 has_shrinkwrap_file = False
153 155
154 if has_shrinkwrap_file: 156 if has_shrinkwrap_file:
157 if int(orig_shrinkwrap.get("lockfileVersion", 0)) < 2:
158 bb.fatal("%s: lockfileVersion version 2 or later is required" % orig_shrinkwrap_file)
159
155 cached_shrinkwrap = copy.deepcopy(orig_shrinkwrap) 160 cached_shrinkwrap = copy.deepcopy(orig_shrinkwrap)
156 for package in orig_shrinkwrap["packages"]: 161 for package in orig_shrinkwrap["packages"]:
157 if package != "": 162 if package != "":
diff --git a/meta/classes-recipe/pixbufcache.bbclass b/meta/classes-recipe/pixbufcache.bbclass
index 107e38885e..c32673df55 100644
--- a/meta/classes-recipe/pixbufcache.bbclass
+++ b/meta/classes-recipe/pixbufcache.bbclass
@@ -9,12 +9,9 @@
9# packages. 9# packages.
10# 10#
11 11
12DEPENDS:append:class-target = " qemu-native"
13inherit qemu
14
15PIXBUF_PACKAGES ??= "${PN}" 12PIXBUF_PACKAGES ??= "${PN}"
16 13
17PACKAGE_WRITE_DEPS += "qemu-native gdk-pixbuf-native" 14PACKAGE_WRITE_DEPS += "qemuwrapper-cross gdk-pixbuf-native"
18 15
19pixbufcache_common() { 16pixbufcache_common() {
20if [ "x$D" != "x" ]; then 17if [ "x$D" != "x" ]; then
diff --git a/meta/classes-recipe/populate_sdk_base.bbclass b/meta/classes-recipe/populate_sdk_base.bbclass
index 81896d808f..e6685cde97 100644
--- a/meta/classes-recipe/populate_sdk_base.bbclass
+++ b/meta/classes-recipe/populate_sdk_base.bbclass
@@ -4,9 +4,16 @@
4# SPDX-License-Identifier: MIT 4# SPDX-License-Identifier: MIT
5# 5#
6 6
7SDK_CLASSES += "${@bb.utils.contains("IMAGE_CLASSES", "testimage", "testsdk", "", d)}"
8inherit_defer ${SDK_CLASSES}
9
7PACKAGES = "" 10PACKAGES = ""
8 11
9inherit image-postinst-intercepts image-artifact-names 12# This exists as an optimization for SPDX processing to only run in image and
13# SDK processing context. This class happens to be common to these usages.
14SPDX_MULTILIB_SSTATE_ARCHS = "${@all_multilib_tune_values(d, 'SSTATE_ARCHS')}"
15
16inherit image-postinst-intercepts image-artifact-names nopackages
10 17
11# Wildcards specifying complementary packages to install for every package that has been explicitly 18# Wildcards specifying complementary packages to install for every package that has been explicitly
12# installed into the rootfs 19# installed into the rootfs
@@ -45,6 +52,8 @@ SDK_DEPLOY = "${DEPLOY_DIR}/sdk"
45 52
46SDKDEPLOYDIR = "${WORKDIR}/${SDKMACHINE}-deploy-${PN}-populate-sdk" 53SDKDEPLOYDIR = "${WORKDIR}/${SDKMACHINE}-deploy-${PN}-populate-sdk"
47 54
55PSEUDO_INCLUDE_PATHS .= ",${SDK_DIR}"
56
48B:task-populate-sdk = "${SDK_DIR}" 57B:task-populate-sdk = "${SDK_DIR}"
49 58
50SDKTARGETSYSROOT = "${SDKPATH}/sysroots/${REAL_MULTIMACH_TARGET_SYS}" 59SDKTARGETSYSROOT = "${SDKPATH}/sysroots/${REAL_MULTIMACH_TARGET_SYS}"
@@ -75,7 +84,9 @@ SDK_ARCHIVE_TYPE ?= "tar.xz"
75SDK_XZ_COMPRESSION_LEVEL ?= "-9" 84SDK_XZ_COMPRESSION_LEVEL ?= "-9"
76SDK_XZ_OPTIONS ?= "${XZ_DEFAULTS} ${SDK_XZ_COMPRESSION_LEVEL}" 85SDK_XZ_OPTIONS ?= "${XZ_DEFAULTS} ${SDK_XZ_COMPRESSION_LEVEL}"
77SDK_ZIP_OPTIONS ?= "-y" 86SDK_ZIP_OPTIONS ?= "-y"
78 87SDK_7ZIP_OPTIONS ?= "-mx=9 -mm=BZip2"
88SDK_7ZIP_TYPE ?= "7z"
89SDK_ZSTD_COMPRESSION_LEVEL = "-17"
79 90
80# To support different sdk type according to SDK_ARCHIVE_TYPE, now support zip and tar.xz 91# To support different sdk type according to SDK_ARCHIVE_TYPE, now support zip and tar.xz
81python () { 92python () {
@@ -84,13 +95,23 @@ python () {
84 # SDK_ARCHIVE_CMD used to generate archived sdk ${TOOLCHAIN_OUTPUTNAME}.${SDK_ARCHIVE_TYPE} from input dir ${SDK_OUTPUT}/${SDKPATH} to output dir ${SDKDEPLOYDIR} 95 # SDK_ARCHIVE_CMD used to generate archived sdk ${TOOLCHAIN_OUTPUTNAME}.${SDK_ARCHIVE_TYPE} from input dir ${SDK_OUTPUT}/${SDKPATH} to output dir ${SDKDEPLOYDIR}
85 # recommand to cd into input dir first to avoid archive with buildpath 96 # recommand to cd into input dir first to avoid archive with buildpath
86 d.setVar('SDK_ARCHIVE_CMD', 'cd ${SDK_OUTPUT}/${SDKPATH}; zip -r ${SDK_ZIP_OPTIONS} ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.${SDK_ARCHIVE_TYPE} .') 97 d.setVar('SDK_ARCHIVE_CMD', 'cd ${SDK_OUTPUT}/${SDKPATH}; zip -r ${SDK_ZIP_OPTIONS} ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.${SDK_ARCHIVE_TYPE} .')
87 else: 98 elif d.getVar('SDK_ARCHIVE_TYPE') == '7zip':
99 d.setVar('SDK_ARCHIVE_DEPENDS', '7zip-native')
100 d.setVar('SDK_ARCHIVE_CMD', 'cd ${SDK_OUTPUT}/${SDKPATH}; 7za a -r ${SDK_7ZIP_OPTIONS} ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.${SDK_7ZIP_TYPE} .')
101 elif d.getVar('SDK_ARCHIVE_TYPE') == 'tar.zst':
102 d.setVar('SDK_ARCHIVE_DEPENDS', 'zstd-native')
103 d.setVar('SDK_ARCHIVE_CMD',
104 'cd ${SDK_OUTPUT}/${SDKPATH}; tar ${SDKTAROPTS} -cf - . | zstd -f -k -T0 -c ${SDK_ZSTD_COMPRESSION_LEVEL} > ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.${SDK_ARCHIVE_TYPE}')
105 elif d.getVar('SDK_ARCHIVE_TYPE') == 'tar.xz':
88 d.setVar('SDK_ARCHIVE_DEPENDS', 'xz-native') 106 d.setVar('SDK_ARCHIVE_DEPENDS', 'xz-native')
89 d.setVar('SDK_ARCHIVE_CMD', 'cd ${SDK_OUTPUT}/${SDKPATH}; tar ${SDKTAROPTS} -cf - . | xz ${SDK_XZ_OPTIONS} > ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.${SDK_ARCHIVE_TYPE}') 107 d.setVar('SDK_ARCHIVE_CMD',
108 'cd ${SDK_OUTPUT}/${SDKPATH}; tar ${SDKTAROPTS} -cf - . | xz ${SDK_XZ_OPTIONS} > ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.${SDK_ARCHIVE_TYPE}')
109 else:
110 bb.fatal("Invalid SDK_ARCHIVE_TYPE: %s, the supported SDK archive types are: zip, 7z, tar.xz, tar.zst" % d.getVar('SDK_ARCHIVE_TYPE'))
90} 111}
91 112
92SDK_RDEPENDS = "${TOOLCHAIN_TARGET_TASK} ${TOOLCHAIN_HOST_TASK}" 113SDK_RDEPENDS = "${TOOLCHAIN_TARGET_TASK} ${TOOLCHAIN_HOST_TASK}"
93SDK_DEPENDS = "virtual/fakeroot-native ${SDK_ARCHIVE_DEPENDS} cross-localedef-native nativesdk-qemuwrapper-cross ${@' '.join(["%s-qemuwrapper-cross" % m for m in d.getVar("MULTILIB_VARIANTS").split()])} qemuwrapper-cross" 114SDK_DEPENDS = "virtual/fakeroot-native ${SDK_ARCHIVE_DEPENDS} cross-localedef-native"
94PATH:prepend = "${WORKDIR}/recipe-sysroot/${SDKPATHNATIVE}${bindir}/crossscripts:${@":".join(all_multilib_tune_values(d, 'STAGING_BINDIR_CROSS').split())}:" 115PATH:prepend = "${WORKDIR}/recipe-sysroot/${SDKPATHNATIVE}${bindir}/crossscripts:${@":".join(all_multilib_tune_values(d, 'STAGING_BINDIR_CROSS').split())}:"
95SDK_DEPENDS += "nativesdk-glibc-locale" 116SDK_DEPENDS += "nativesdk-glibc-locale"
96 117
@@ -156,6 +177,33 @@ POPULATE_SDK_POST_TARGET_COMMAND:append = " write_sdk_test_data"
156POPULATE_SDK_POST_TARGET_COMMAND:append:task-populate-sdk = " write_target_sdk_manifest sdk_prune_dirs" 177POPULATE_SDK_POST_TARGET_COMMAND:append:task-populate-sdk = " write_target_sdk_manifest sdk_prune_dirs"
157POPULATE_SDK_POST_HOST_COMMAND:append:task-populate-sdk = " write_host_sdk_manifest" 178POPULATE_SDK_POST_HOST_COMMAND:append:task-populate-sdk = " write_host_sdk_manifest"
158 179
180# Prepare the root links to point to the /usr counterparts.
181create_merged_usr_symlinks() {
182 root="$1"
183 install -d $root${base_bindir} $root${base_sbindir} $root${base_libdir}
184 ln -rs $root${base_bindir} $root/bin
185 ln -rs $root${base_sbindir} $root/sbin
186 ln -rs $root${base_libdir} $root/${baselib}
187
188 if [ "${nonarch_base_libdir}" != "${base_libdir}" ]; then
189 install -d $root${nonarch_base_libdir}
190 ln -rs $root${nonarch_base_libdir} $root/lib
191 fi
192
193 # create base links for multilibs
194 multi_libdirs="${@d.getVar('MULTILIB_VARIANTS')}"
195 for d in $multi_libdirs; do
196 install -d $root${exec_prefix}/$d
197 ln -rs $root${exec_prefix}/$d $root/$d
198 done
199}
200
201create_merged_usr_symlinks_sdk() {
202 create_merged_usr_symlinks ${SDK_OUTPUT}${SDKTARGETSYSROOT}
203}
204
205POPULATE_SDK_PRE_TARGET_COMMAND += "${@bb.utils.contains('DISTRO_FEATURES', 'usrmerge', 'create_merged_usr_symlinks_sdk', '',d)}"
206
159SDK_PACKAGING_COMMAND = "${@'${SDK_PACKAGING_FUNC}' if '${SDK_PACKAGING_FUNC}' else ''}" 207SDK_PACKAGING_COMMAND = "${@'${SDK_PACKAGING_FUNC}' if '${SDK_PACKAGING_FUNC}' else ''}"
160SDK_POSTPROCESS_COMMAND = "create_sdk_files check_sdk_sysroots archive_sdk ${SDK_PACKAGING_COMMAND}" 208SDK_POSTPROCESS_COMMAND = "create_sdk_files check_sdk_sysroots archive_sdk ${SDK_PACKAGING_COMMAND}"
161 209
@@ -217,8 +265,6 @@ python do_populate_sdk_setscene () {
217} 265}
218addtask do_populate_sdk_setscene 266addtask do_populate_sdk_setscene
219 267
220PSEUDO_IGNORE_PATHS .= ",${SDKDEPLOYDIR},${WORKDIR}/oe-sdk-repo,${WORKDIR}/sstate-build-populate_sdk"
221
222fakeroot create_sdk_files() { 268fakeroot create_sdk_files() {
223 cp ${COREBASE}/scripts/relocate_sdk.py ${SDK_OUTPUT}/${SDKPATH}/ 269 cp ${COREBASE}/scripts/relocate_sdk.py ${SDK_OUTPUT}/${SDKPATH}/
224 270
@@ -327,7 +373,6 @@ EOF
327 -e 's#@SDK_VERSION@#${SDK_VERSION}#g' \ 373 -e 's#@SDK_VERSION@#${SDK_VERSION}#g' \
328 -e '/@SDK_PRE_INSTALL_COMMAND@/d' \ 374 -e '/@SDK_PRE_INSTALL_COMMAND@/d' \
329 -e '/@SDK_POST_INSTALL_COMMAND@/d' \ 375 -e '/@SDK_POST_INSTALL_COMMAND@/d' \
330 -e 's#@SDK_GCC_VER@#${@oe.utils.host_gcc_version(d, taskcontextonly=True)}#g' \
331 -e 's#@SDK_ARCHIVE_TYPE@#${SDK_ARCHIVE_TYPE}#g' \ 376 -e 's#@SDK_ARCHIVE_TYPE@#${SDK_ARCHIVE_TYPE}#g' \
332 ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.sh 377 ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.sh
333 378
@@ -381,6 +426,6 @@ do_populate_sdk[file-checksums] += "${TOOLCHAIN_SHAR_REL_TMPL}:True \
381do_populate_sdk[dirs] = "${PKGDATA_DIR} ${TOPDIR}" 426do_populate_sdk[dirs] = "${PKGDATA_DIR} ${TOPDIR}"
382do_populate_sdk[depends] += "${@' '.join([x + ':do_populate_sysroot' for x in d.getVar('SDK_DEPENDS').split()])} ${@d.getVarFlag('do_rootfs', 'depends', False)}" 427do_populate_sdk[depends] += "${@' '.join([x + ':do_populate_sysroot' for x in d.getVar('SDK_DEPENDS').split()])} ${@d.getVarFlag('do_rootfs', 'depends', False)}"
383do_populate_sdk[rdepends] = "${@' '.join([x + ':do_package_write_${IMAGE_PKGTYPE} ' + x + ':do_packagedata' for x in d.getVar('SDK_RDEPENDS').split()])}" 428do_populate_sdk[rdepends] = "${@' '.join([x + ':do_package_write_${IMAGE_PKGTYPE} ' + x + ':do_packagedata' for x in d.getVar('SDK_RDEPENDS').split()])}"
384do_populate_sdk[recrdeptask] += "do_packagedata do_package_write_rpm do_package_write_ipk do_package_write_deb" 429do_populate_sdk[recrdeptask] += "do_packagedata do_package_write_rpm do_package_write_ipk do_package_write_deb do_package_qa"
385do_populate_sdk[file-checksums] += "${POSTINST_INTERCEPT_CHECKSUMS}" 430do_populate_sdk[file-checksums] += "${POSTINST_INTERCEPT_CHECKSUMS}"
386addtask populate_sdk 431addtask populate_sdk
diff --git a/meta/classes-recipe/populate_sdk_ext.bbclass b/meta/classes-recipe/populate_sdk_ext.bbclass
index 09d5e2aeb6..20dfdf02d4 100644
--- a/meta/classes-recipe/populate_sdk_ext.bbclass
+++ b/meta/classes-recipe/populate_sdk_ext.bbclass
@@ -40,7 +40,7 @@ ESDK_LOCALCONF_REMOVE ?= "CONF_VERSION \
40 TMPDIR \ 40 TMPDIR \
41 BB_SERVER_TIMEOUT \ 41 BB_SERVER_TIMEOUT \
42 " 42 "
43ESDK_CLASS_INHERIT_DISABLE ?= "buildhistory icecc" 43ESDK_CLASS_INHERIT_DISABLE ?= "buildhistory"
44SDK_UPDATE_URL ?= "" 44SDK_UPDATE_URL ?= ""
45 45
46SDK_TARGETS ?= "${PN}" 46SDK_TARGETS ?= "${PN}"
@@ -150,10 +150,8 @@ def create_filtered_tasklist(d, sdkbasepath, tasklistfile, conf_initpath):
150 with open(sdkbasepath + '/conf/local.conf', 'a') as f: 150 with open(sdkbasepath + '/conf/local.conf', 'a') as f:
151 # Force the use of sstate from the build system 151 # Force the use of sstate from the build system
152 f.write('\nSSTATE_DIR:forcevariable = "%s"\n' % d.getVar('SSTATE_DIR')) 152 f.write('\nSSTATE_DIR:forcevariable = "%s"\n' % d.getVar('SSTATE_DIR'))
153 f.write('SSTATE_MIRRORS:forcevariable = "file://universal/(.*) file://universal-4.9/\\1 file://universal-4.9/(.*) file://universal-4.8/\\1"\n')
154 # Ensure TMPDIR is the default so that clean_esdk_builddir() can delete it 153 # Ensure TMPDIR is the default so that clean_esdk_builddir() can delete it
155 f.write('TMPDIR:forcevariable = "${TOPDIR}/tmp"\n') 154 f.write('TMPDIR:forcevariable = "${TOPDIR}/tmp"\n')
156 f.write('TCLIBCAPPEND:forcevariable = ""\n')
157 # Drop uninative if the build isn't using it (or else NATIVELSBSTRING will 155 # Drop uninative if the build isn't using it (or else NATIVELSBSTRING will
158 # be different and we won't be able to find our native sstate) 156 # be different and we won't be able to find our native sstate)
159 if not bb.data.inherits_class('uninative', d): 157 if not bb.data.inherits_class('uninative', d):
@@ -290,6 +288,8 @@ def copy_uninative(d, baseoutpath):
290 return uninative_checksum 288 return uninative_checksum
291 289
292def write_local_conf(d, baseoutpath, derivative, core_meta_subdir, uninative_checksum): 290def write_local_conf(d, baseoutpath, derivative, core_meta_subdir, uninative_checksum):
291 import shutil
292
293 #check if custome templateconf path is set 293 #check if custome templateconf path is set
294 use_custom_templateconf = d.getVar('SDK_CUSTOM_TEMPLATECONF') 294 use_custom_templateconf = d.getVar('SDK_CUSTOM_TEMPLATECONF')
295 295
@@ -340,7 +340,6 @@ def write_local_conf(d, baseoutpath, derivative, core_meta_subdir, uninative_che
340 f.write('\n') 340 f.write('\n')
341 341
342 f.write('TMPDIR = "${TOPDIR}/tmp"\n') 342 f.write('TMPDIR = "${TOPDIR}/tmp"\n')
343 f.write('TCLIBCAPPEND = ""\n')
344 f.write('DL_DIR = "${TOPDIR}/downloads"\n') 343 f.write('DL_DIR = "${TOPDIR}/downloads"\n')
345 344
346 if bb.data.inherits_class('uninative', d): 345 if bb.data.inherits_class('uninative', d):
@@ -380,9 +379,6 @@ def write_local_conf(d, baseoutpath, derivative, core_meta_subdir, uninative_che
380 f.write('# Provide a flag to indicate we are in the EXT_SDK Context\n') 379 f.write('# Provide a flag to indicate we are in the EXT_SDK Context\n')
381 f.write('WITHIN_EXT_SDK = "1"\n\n') 380 f.write('WITHIN_EXT_SDK = "1"\n\n')
382 381
383 # Map gcc-dependent uninative sstate cache for installer usage
384 f.write('SSTATE_MIRRORS += " file://universal/(.*) file://universal-4.9/\\1 file://universal-4.9/(.*) file://universal-4.8/\\1"\n\n')
385
386 if d.getVar("PRSERV_HOST"): 382 if d.getVar("PRSERV_HOST"):
387 # Override this, we now include PR data, so it should only point ot the local database 383 # Override this, we now include PR data, so it should only point ot the local database
388 f.write('PRSERV_HOST = "localhost:0"\n\n') 384 f.write('PRSERV_HOST = "localhost:0"\n\n')
@@ -413,10 +409,6 @@ def write_local_conf(d, baseoutpath, derivative, core_meta_subdir, uninative_che
413 if os.path.exists(builddir + dest_stub): 409 if os.path.exists(builddir + dest_stub):
414 shutil.copyfile(builddir + dest_stub, baseoutpath + dest_stub) 410 shutil.copyfile(builddir + dest_stub, baseoutpath + dest_stub)
415 411
416 cachedir = os.path.join(baseoutpath, 'cache')
417 bb.utils.mkdirhier(cachedir)
418 bb.parse.siggen.copy_unitaskhashes(cachedir)
419
420 # If PR Service is in use, we need to export this as well 412 # If PR Service is in use, we need to export this as well
421 bb.note('Do we have a pr database?') 413 bb.note('Do we have a pr database?')
422 if d.getVar("PRSERV_HOST"): 414 if d.getVar("PRSERV_HOST"):
@@ -495,8 +487,8 @@ def prepare_locked_cache(d, baseoutpath, derivative, conf_initpath):
495 sstate_out = baseoutpath + '/sstate-cache' 487 sstate_out = baseoutpath + '/sstate-cache'
496 bb.utils.remove(sstate_out, True) 488 bb.utils.remove(sstate_out, True)
497 489
498 # uninative.bbclass sets NATIVELSBSTRING to 'universal%s' % oe.utils.host_gcc_version(d) 490 # uninative.bbclass sets NATIVELSBSTRING to 'universal'
499 fixedlsbstring = "universal%s" % oe.utils.host_gcc_version(d) if bb.data.inherits_class('uninative', d) else "" 491 fixedlsbstring = "universal" if bb.data.inherits_class('uninative', d) else ""
500 492
501 sdk_include_toolchain = (d.getVar('SDK_INCLUDE_TOOLCHAIN') == '1') 493 sdk_include_toolchain = (d.getVar('SDK_INCLUDE_TOOLCHAIN') == '1')
502 sdk_ext_type = d.getVar('SDK_EXT_TYPE') 494 sdk_ext_type = d.getVar('SDK_EXT_TYPE')
@@ -507,10 +499,6 @@ def prepare_locked_cache(d, baseoutpath, derivative, conf_initpath):
507 else: 499 else:
508 tasklistfn = None 500 tasklistfn = None
509 501
510 cachedir = os.path.join(baseoutpath, 'cache')
511 bb.utils.mkdirhier(cachedir)
512 bb.parse.siggen.copy_unitaskhashes(cachedir)
513
514 # Add packagedata if enabled 502 # Add packagedata if enabled
515 if d.getVar('SDK_INCLUDE_PKGDATA') == '1': 503 if d.getVar('SDK_INCLUDE_PKGDATA') == '1':
516 lockedsigs_base = d.getVar('WORKDIR') + '/locked-sigs-base.inc' 504 lockedsigs_base = d.getVar('WORKDIR') + '/locked-sigs-base.inc'
@@ -732,7 +720,7 @@ sdk_ext_postinst() {
732 echo "# Save and reset OECORE_NATIVE_SYSROOT as buildtools may change it" >> $env_setup_script 720 echo "# Save and reset OECORE_NATIVE_SYSROOT as buildtools may change it" >> $env_setup_script
733 echo "SAVED=\"\$OECORE_NATIVE_SYSROOT\"" >> $env_setup_script 721 echo "SAVED=\"\$OECORE_NATIVE_SYSROOT\"" >> $env_setup_script
734 echo ". $target_sdk_dir/buildtools/environment-setup*" >> $env_setup_script 722 echo ". $target_sdk_dir/buildtools/environment-setup*" >> $env_setup_script
735 echo "OECORE_NATIVE_SYSROOT=\"\$SAVED\"" >> $env_setup_script 723 echo "export OECORE_NATIVE_SYSROOT=\"\$SAVED\"" >> $env_setup_script
736 fi 724 fi
737 725
738 # Allow bitbake environment setup to be ran as part of this sdk. 726 # Allow bitbake environment setup to be ran as part of this sdk.
@@ -776,7 +764,7 @@ fakeroot python do_populate_sdk_ext() {
776 764
777 # FIXME hopefully we can remove this restriction at some point, but the eSDK 765 # FIXME hopefully we can remove this restriction at some point, but the eSDK
778 # can only be built for the primary (default) multiconfig 766 # can only be built for the primary (default) multiconfig
779 if d.getVar('BB_CURRENT_MC') != 'default': 767 if d.getVar('BB_CURRENT_MC') != '':
780 bb.fatal('The extensible SDK can currently only be built for the default multiconfig. Currently trying to build for %s.' % d.getVar('BB_CURRENT_MC')) 768 bb.fatal('The extensible SDK can currently only be built for the default multiconfig. Currently trying to build for %s.' % d.getVar('BB_CURRENT_MC'))
781 769
782 # eSDK dependencies don't use the traditional variables and things don't work properly if they are set 770 # eSDK dependencies don't use the traditional variables and things don't work properly if they are set
diff --git a/meta/classes-recipe/ptest-cargo.bbclass b/meta/classes-recipe/ptest-cargo.bbclass
index c46df362bf..ece25ff1eb 100644
--- a/meta/classes-recipe/ptest-cargo.bbclass
+++ b/meta/classes-recipe/ptest-cargo.bbclass
@@ -12,16 +12,17 @@ python do_compile_ptest_cargo() {
12 import subprocess 12 import subprocess
13 import json 13 import json
14 14
15 cargo = bb.utils.which(d.getVar("PATH"), d.getVar("CARGO", True)) 15 cargo = bb.utils.which(d.getVar("PATH"), d.getVar("CARGO"))
16 cargo_build_flags = d.getVar("CARGO_BUILD_FLAGS", True) 16 cargo_build_flags = d.getVar("CARGO_BUILD_FLAGS")
17 rust_flags = d.getVar("RUSTFLAGS", True) 17 packageconfig_confargs = d.getVar("PACKAGECONFIG_CONFARGS")
18 manifest_path = d.getVar("CARGO_MANIFEST_PATH", True) 18 rust_flags = d.getVar("RUSTFLAGS")
19 manifest_path = d.getVar("CARGO_MANIFEST_PATH")
19 project_manifest_path = os.path.normpath(manifest_path) 20 project_manifest_path = os.path.normpath(manifest_path)
20 manifest_dir = os.path.dirname(manifest_path) 21 manifest_dir = os.path.dirname(manifest_path)
21 22
22 env = os.environ.copy() 23 env = os.environ.copy()
23 env['RUSTFLAGS'] = rust_flags 24 env['RUSTFLAGS'] = rust_flags
24 cmd = f"{cargo} build --tests --message-format json {cargo_build_flags}" 25 cmd = f"{cargo} build --tests --message-format json {cargo_build_flags} {packageconfig_confargs}"
25 bb.note(f"Building tests with cargo ({cmd})") 26 bb.note(f"Building tests with cargo ({cmd})")
26 27
27 try: 28 try:
@@ -66,7 +67,7 @@ python do_compile_ptest_cargo() {
66 if not test_bins: 67 if not test_bins:
67 bb.fatal("Unable to find any test binaries") 68 bb.fatal("Unable to find any test binaries")
68 69
69 cargo_test_binaries_file = d.getVar('CARGO_TEST_BINARIES_FILES', True) 70 cargo_test_binaries_file = d.getVar('CARGO_TEST_BINARIES_FILES')
70 bb.note(f"Found {len(test_bins)} tests, write their paths into {cargo_test_binaries_file}") 71 bb.note(f"Found {len(test_bins)} tests, write their paths into {cargo_test_binaries_file}")
71 with open(cargo_test_binaries_file, "w") as f: 72 with open(cargo_test_binaries_file, "w") as f:
72 for test_bin in test_bins: 73 for test_bin in test_bins:
@@ -76,11 +77,12 @@ python do_compile_ptest_cargo() {
76 77
77python do_install_ptest_cargo() { 78python do_install_ptest_cargo() {
78 import shutil 79 import shutil
80 import textwrap
79 81
80 dest_dir = d.getVar("D", True) 82 dest_dir = d.getVar("D")
81 pn = d.getVar("PN", True) 83 pn = d.getVar("PN")
82 ptest_path = d.getVar("PTEST_PATH", True) 84 ptest_path = d.getVar("PTEST_PATH")
83 cargo_test_binaries_file = d.getVar('CARGO_TEST_BINARIES_FILES', True) 85 cargo_test_binaries_file = d.getVar('CARGO_TEST_BINARIES_FILES')
84 rust_test_args = d.getVar('RUST_TEST_ARGS') or "" 86 rust_test_args = d.getVar('RUST_TEST_ARGS') or ""
85 87
86 ptest_dir = os.path.join(dest_dir, ptest_path.lstrip('/')) 88 ptest_dir = os.path.join(dest_dir, ptest_path.lstrip('/'))
@@ -97,17 +99,29 @@ python do_install_ptest_cargo() {
97 test_paths.append(os.path.join(ptest_path, os.path.basename(test_bin))) 99 test_paths.append(os.path.join(ptest_path, os.path.basename(test_bin)))
98 100
99 ptest_script = os.path.join(ptest_dir, "run-ptest") 101 ptest_script = os.path.join(ptest_dir, "run-ptest")
100 if os.path.exists(ptest_script): 102 script_exists = os.path.exists(ptest_script)
101 with open(ptest_script, "a") as f: 103 with open(ptest_script, "a") as f:
102 f.write(f"\necho \"\"\n") 104 if not script_exists:
103 f.write(f"echo \"## starting to run rust tests ##\"\n")
104 for test_path in test_paths:
105 f.write(f"{test_path} {rust_test_args}\n")
106 else:
107 with open(ptest_script, "a") as f:
108 f.write("#!/bin/sh\n") 105 f.write("#!/bin/sh\n")
109 for test_path in test_paths: 106 f.write("rc=0\n")
110 f.write(f"{test_path} {rust_test_args}\n") 107 else:
108 f.write(f"\necho \"\"\n")
109 f.write(f"echo \"## starting to run rust tests ##\"\n")
110 for test_path in test_paths:
111 script = textwrap.dedent(f"""\
112 if ! {test_path} {rust_test_args}
113 then
114 rc=1
115 echo "FAIL: {test_path}"
116 else
117 echo "PASS: {test_path}"
118 fi
119 """)
120 f.write(script)
121
122 f.write("exit $rc\n")
123
124 if not script_exists:
111 os.chmod(ptest_script, 0o755) 125 os.chmod(ptest_script, 0o755)
112 126
113 # this is chown -R root:root ${D}${PTEST_PATH} 127 # this is chown -R root:root ${D}${PTEST_PATH}
diff --git a/meta/classes-recipe/ptest-perl.bbclass b/meta/classes-recipe/ptest-perl.bbclass
index c283fdd1fc..a4a9d40d52 100644
--- a/meta/classes-recipe/ptest-perl.bbclass
+++ b/meta/classes-recipe/ptest-perl.bbclass
@@ -13,7 +13,7 @@ SRC_URI += "file://ptest-perl/run-ptest"
13do_install_ptest_perl() { 13do_install_ptest_perl() {
14 install -d ${D}${PTEST_PATH} 14 install -d ${D}${PTEST_PATH}
15 if [ ! -f ${D}${PTEST_PATH}/run-ptest ]; then 15 if [ ! -f ${D}${PTEST_PATH}/run-ptest ]; then
16 install -m 0755 ${WORKDIR}/ptest-perl/run-ptest ${D}${PTEST_PATH} 16 install -m 0755 ${UNPACKDIR}/ptest-perl/run-ptest ${D}${PTEST_PATH}
17 fi 17 fi
18 cp -r ${B}/t ${D}${PTEST_PATH} 18 cp -r ${B}/t ${D}${PTEST_PATH}
19 chown -R root:root ${D}${PTEST_PATH} 19 chown -R root:root ${D}${PTEST_PATH}
diff --git a/meta/classes-recipe/ptest-python-pytest.bbclass b/meta/classes-recipe/ptest-python-pytest.bbclass
new file mode 100644
index 0000000000..a4615e12bf
--- /dev/null
+++ b/meta/classes-recipe/ptest-python-pytest.bbclass
@@ -0,0 +1,37 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7inherit ptest
8
9# Overridable configuration for the directory within the source tree
10# containing the pytest files
11PTEST_PYTEST_DIR ?= "tests"
12
13do_install_ptest() {
14 # Check if the recipe provides its own version of run-ptest
15 # If nothing exists in the SRC_URI, dynamically create a
16 # run-test script of "last resort" that has the default
17 # pytest behavior.
18 #
19 # Users can override this behavior by simply including a
20 # custom script (run-ptest) in the source file list
21 if [ ! -f "${UNPACKDIR}/run-ptest" ]; then
22 cat > ${D}${PTEST_PATH}/run-ptest << EOF
23#!/bin/sh
24pytest --automake
25EOF
26 # Ensure the newly created script has the execute bit set
27 chmod 755 ${D}${PTEST_PATH}/run-ptest
28 fi
29 if [ -d "${S}/${PTEST_PYTEST_DIR}" ]; then
30 install -d ${D}${PTEST_PATH}/${PTEST_PYTEST_DIR}
31 cp -rf ${S}/${PTEST_PYTEST_DIR}/* ${D}${PTEST_PATH}/${PTEST_PYTEST_DIR}/
32 fi
33}
34
35FILES:${PN}-ptest:prepend = "${PTEST_PATH}/*"
36
37RDEPENDS:${PN}-ptest:prepend = "python3-pytest python3-unittest-automake-output "
diff --git a/meta/classes-recipe/ptest.bbclass b/meta/classes-recipe/ptest.bbclass
index 0941572f8f..64c4bb9788 100644
--- a/meta/classes-recipe/ptest.bbclass
+++ b/meta/classes-recipe/ptest.bbclass
@@ -131,7 +131,7 @@ python () {
131} 131}
132 132
133QARECIPETEST[missing-ptest] = "package_qa_check_missing_ptest" 133QARECIPETEST[missing-ptest] = "package_qa_check_missing_ptest"
134def package_qa_check_missing_ptest(pn, d, messages): 134def package_qa_check_missing_ptest(pn, d):
135 # This checks that ptest package is actually included 135 # This checks that ptest package is actually included
136 # in standard oe-core ptest images - only for oe-core recipes 136 # in standard oe-core ptest images - only for oe-core recipes
137 if not 'meta/recipes' in d.getVar('FILE') or not(d.getVar('PTEST_ENABLED') == "1"): 137 if not 'meta/recipes' in d.getVar('FILE') or not(d.getVar('PTEST_ENABLED') == "1"):
diff --git a/meta/classes-recipe/pypi.bbclass b/meta/classes-recipe/pypi.bbclass
index c6bbe8119a..eb30004a0f 100644
--- a/meta/classes-recipe/pypi.bbclass
+++ b/meta/classes-recipe/pypi.bbclass
@@ -28,16 +28,30 @@ def pypi_src_uri(d):
28 archive_downloadname = d.getVar('PYPI_ARCHIVE_NAME_PREFIX') + archive_name 28 archive_downloadname = d.getVar('PYPI_ARCHIVE_NAME_PREFIX') + archive_name
29 return 'https://files.pythonhosted.org/packages/source/%s/%s/%s;downloadfilename=%s' % (package[0], package, archive_name, archive_downloadname) 29 return 'https://files.pythonhosted.org/packages/source/%s/%s/%s;downloadfilename=%s' % (package[0], package, archive_name, archive_downloadname)
30 30
31def pypi_normalize(d):
32 """"
33 Normalize the package names to match PEP625 (https://peps.python.org/pep-0625/).
34 For non-compliant packages, maintainers can set UPSTREAM_CHECK_PYPI_PACKAGE to override the normalization
35 """
36 import re
37 return re.sub(r"[-_.]+", "-", d.getVar('PYPI_PACKAGE')).lower()
38
31PYPI_SRC_URI ?= "${@pypi_src_uri(d)}" 39PYPI_SRC_URI ?= "${@pypi_src_uri(d)}"
32 40
33HOMEPAGE ?= "https://pypi.python.org/pypi/${PYPI_PACKAGE}/" 41HOMEPAGE ?= "https://pypi.python.org/pypi/${PYPI_PACKAGE}/"
34SECTION = "devel/python" 42SECTION = "devel/python"
35SRC_URI:prepend = "${PYPI_SRC_URI} " 43SRC_URI:prepend = "${PYPI_SRC_URI} "
36S = "${WORKDIR}/${PYPI_PACKAGE}-${PV}" 44S = "${UNPACKDIR}/${PYPI_PACKAGE}-${PV}"
37 45
38# Replace any '_' characters in the pypi URI with '-'s to follow the PyPi website naming conventions 46# Replace any '_' characters in the pypi URI with '-'s to follow the PyPi website naming conventions
39UPSTREAM_CHECK_PYPI_PACKAGE ?= "${@d.getVar('PYPI_PACKAGE').replace('_', '-')}" 47UPSTREAM_CHECK_PYPI_PACKAGE ?= "${@pypi_normalize(d)}"
40UPSTREAM_CHECK_URI ?= "https://pypi.org/project/${UPSTREAM_CHECK_PYPI_PACKAGE}/" 48
41UPSTREAM_CHECK_REGEX ?= "/${UPSTREAM_CHECK_PYPI_PACKAGE}/(?P<pver>(\d+[\.\-_]*)+)/" 49# Use the simple repository API rather than the potentially unstable project URL
50# More information on the pypi API specification is avaialble here:
51# https://packaging.python.org/en/latest/specifications/simple-repository-api/
52#
53# NOTE: All URLs for the simple API MUST request canonical normalized URLs per the spec
54UPSTREAM_CHECK_URI ?= "https://pypi.org/simple/${@pypi_normalize(d)}"
55UPSTREAM_CHECK_REGEX ?= "${UPSTREAM_CHECK_PYPI_PACKAGE}-(?P<pver>(\d+[\.\-_]*)+).(tar\.gz|tgz|zip|tar\.bz2)"
42 56
43CVE_PRODUCT ?= "python:${PYPI_PACKAGE}" 57CVE_PRODUCT ?= "python:${PYPI_PACKAGE}"
diff --git a/meta/classes-recipe/python3-dir.bbclass b/meta/classes-recipe/python3-dir.bbclass
index 3d07de99b8..0f4e7e7773 100644
--- a/meta/classes-recipe/python3-dir.bbclass
+++ b/meta/classes-recipe/python3-dir.bbclass
@@ -4,7 +4,7 @@
4# SPDX-License-Identifier: MIT 4# SPDX-License-Identifier: MIT
5# 5#
6 6
7PYTHON_BASEVERSION = "3.12" 7PYTHON_BASEVERSION = "3.13"
8PYTHON_ABI = "" 8PYTHON_ABI = ""
9PYTHON_DIR = "python${PYTHON_BASEVERSION}" 9PYTHON_DIR = "python${PYTHON_BASEVERSION}"
10PYTHON_PN = "python3" 10PYTHON_PN = "python3"
diff --git a/meta/classes-recipe/python3native.bbclass b/meta/classes-recipe/python3native.bbclass
index 654a002fdb..da1283d6b3 100644
--- a/meta/classes-recipe/python3native.bbclass
+++ b/meta/classes-recipe/python3native.bbclass
@@ -6,7 +6,7 @@
6 6
7inherit python3-dir 7inherit python3-dir
8 8
9PYTHON="${STAGING_BINDIR_NATIVE}/python3-native/python3" 9PYTHON = "${STAGING_BINDIR_NATIVE}/python3-native/python3"
10EXTRANATIVEPATH += "python3-native" 10EXTRANATIVEPATH += "python3-native"
11DEPENDS:append = " python3-native " 11DEPENDS:append = " python3-native "
12 12
@@ -20,8 +20,8 @@ export STAGING_LIBDIR
20# find_package(PythonLibs REQUIRED) 20# find_package(PythonLibs REQUIRED)
21# which ends up using libs/includes from build host 21# which ends up using libs/includes from build host
22# Therefore pre-empt that effort 22# Therefore pre-empt that effort
23export PYTHON_LIBRARY="${STAGING_LIBDIR}/lib${PYTHON_DIR}${PYTHON_ABI}.so" 23export PYTHON_LIBRARY = "${STAGING_LIBDIR}/lib${PYTHON_DIR}${PYTHON_ABI}.so"
24export PYTHON_INCLUDE_DIR="${STAGING_INCDIR}/${PYTHON_DIR}${PYTHON_ABI}" 24export PYTHON_INCLUDE_DIR = "${STAGING_INCDIR}/${PYTHON_DIR}${PYTHON_ABI}"
25 25
26# suppress host user's site-packages dirs. 26# suppress host user's site-packages dirs.
27export PYTHONNOUSERSITE = "1" 27export PYTHONNOUSERSITE = "1"
diff --git a/meta/classes-recipe/python_flit_core.bbclass b/meta/classes-recipe/python_flit_core.bbclass
index a0b1feb70a..c5480654f3 100644
--- a/meta/classes-recipe/python_flit_core.bbclass
+++ b/meta/classes-recipe/python_flit_core.bbclass
@@ -6,7 +6,7 @@
6 6
7inherit python_pep517 python3native python3-dir setuptools3-base 7inherit python_pep517 python3native python3-dir setuptools3-base
8 8
9DEPENDS += "python3 python3-flit-core-native" 9DEPENDS += "python3-flit-core-native"
10 10
11python_flit_core_do_manual_build () { 11python_flit_core_do_manual_build () {
12 cd ${PEP517_SOURCE_PATH} 12 cd ${PEP517_SOURCE_PATH}
diff --git a/meta/classes-recipe/python_mesonpy.bbclass b/meta/classes-recipe/python_mesonpy.bbclass
index 131fa74bed..81c087c7c7 100644
--- a/meta/classes-recipe/python_mesonpy.bbclass
+++ b/meta/classes-recipe/python_mesonpy.bbclass
@@ -33,10 +33,6 @@ def mesonpy_get_args(d):
33 33
34PEP517_BUILD_OPTS = "-Cbuilddir='${B}' ${@mesonpy_get_args(d)}" 34PEP517_BUILD_OPTS = "-Cbuilddir='${B}' ${@mesonpy_get_args(d)}"
35 35
36# Python pyx -> c -> so build leaves absolute build paths in the code
37INSANE_SKIP:${PN} += "buildpaths"
38INSANE_SKIP:${PN}-src += "buildpaths"
39
40python_mesonpy_do_configure () { 36python_mesonpy_do_configure () {
41 python_pep517_do_configure 37 python_pep517_do_configure
42} 38}
diff --git a/meta/classes-recipe/python_pep517.bbclass b/meta/classes-recipe/python_pep517.bbclass
index c30674c8ec..e8cd1923ef 100644
--- a/meta/classes-recipe/python_pep517.bbclass
+++ b/meta/classes-recipe/python_pep517.bbclass
@@ -50,6 +50,8 @@ python_pep517_do_install () {
50 fi 50 fi
51 51
52 nativepython3 -m installer ${INSTALL_WHEEL_COMPILE_BYTECODE} --interpreter "${USRBINPATH}/env ${PEP517_INSTALL_PYTHON}" --destdir=${D} ${PEP517_WHEEL_PATH}/*.whl 52 nativepython3 -m installer ${INSTALL_WHEEL_COMPILE_BYTECODE} --interpreter "${USRBINPATH}/env ${PEP517_INSTALL_PYTHON}" --destdir=${D} ${PEP517_WHEEL_PATH}/*.whl
53
54 find ${D} -path *.dist-info/RECORD -delete
53} 55}
54 56
55# A manual do_install that just uses unzip for bootstrapping purposes. Callers should DEPEND on unzip-native. 57# A manual do_install that just uses unzip for bootstrapping purposes. Callers should DEPEND on unzip-native.
diff --git a/meta/classes-recipe/python_pyo3.bbclass b/meta/classes-recipe/python_pyo3.bbclass
index 9a32eac6fd..7f5a00f584 100644
--- a/meta/classes-recipe/python_pyo3.bbclass
+++ b/meta/classes-recipe/python_pyo3.bbclass
@@ -11,13 +11,13 @@
11 11
12inherit cargo python3-dir siteinfo 12inherit cargo python3-dir siteinfo
13 13
14export PYO3_CROSS="1" 14export PYO3_CROSS = "1"
15export PYO3_CROSS_PYTHON_VERSION="${PYTHON_BASEVERSION}" 15export PYO3_CROSS_PYTHON_VERSION = "${PYTHON_BASEVERSION}"
16export PYO3_CROSS_LIB_DIR="${STAGING_LIBDIR}" 16export PYO3_CROSS_LIB_DIR = "${STAGING_LIBDIR}"
17export CARGO_BUILD_TARGET="${RUST_HOST_SYS}" 17export CARGO_BUILD_TARGET = "${RUST_HOST_SYS}"
18export RUSTFLAGS 18export RUSTFLAGS
19export PYO3_PYTHON="${PYTHON}" 19export PYO3_PYTHON = "${PYTHON}"
20export PYO3_CONFIG_FILE="${WORKDIR}/pyo3.config" 20export PYO3_CONFIG_FILE = "${WORKDIR}/pyo3.config"
21 21
22python_pyo3_do_configure () { 22python_pyo3_do_configure () {
23 cat > ${WORKDIR}/pyo3.config << EOF 23 cat > ${WORKDIR}/pyo3.config << EOF
diff --git a/meta/classes-recipe/qemu.bbclass b/meta/classes-recipe/qemu.bbclass
index dbb5ee0b66..f83faf8049 100644
--- a/meta/classes-recipe/qemu.bbclass
+++ b/meta/classes-recipe/qemu.bbclass
@@ -10,48 +10,13 @@
10# 10#
11 11
12def qemu_target_binary(data): 12def qemu_target_binary(data):
13 package_arch = data.getVar("PACKAGE_ARCH") 13 return oe.qemu.qemu_target_binary(data)
14 qemu_target_binary = (data.getVar("QEMU_TARGET_BINARY_%s" % package_arch) or "")
15 if qemu_target_binary:
16 return qemu_target_binary
17
18 target_arch = data.getVar("TARGET_ARCH")
19 if target_arch in ("i486", "i586", "i686"):
20 target_arch = "i386"
21 elif target_arch == "powerpc":
22 target_arch = "ppc"
23 elif target_arch == "powerpc64":
24 target_arch = "ppc64"
25 elif target_arch == "powerpc64le":
26 target_arch = "ppc64le"
27
28 return "qemu-" + target_arch
29 14
30def qemu_wrapper_cmdline(data, rootfs_path, library_paths): 15def qemu_wrapper_cmdline(data, rootfs_path, library_paths):
31 import string 16 return oe.qemu.qemu_wrapper_cmdline(data, rootfs_path, library_paths)
32
33 qemu_binary = qemu_target_binary(data)
34 if qemu_binary == "qemu-allarch":
35 qemu_binary = "qemuwrapper"
36
37 qemu_options = data.getVar("QEMU_OPTIONS") or ""
38
39 return "PSEUDO_UNLOAD=1 " + qemu_binary + " " + qemu_options + " -L " + rootfs_path\
40 + " -E LD_LIBRARY_PATH=" + ":".join(library_paths) + " "
41 17
42# Next function will return a string containing the command that is needed to
43# to run a certain binary through qemu. For example, in order to make a certain
44# postinstall scriptlet run at do_rootfs time and running the postinstall is
45# architecture dependent, we can run it through qemu. For example, in the
46# postinstall scriptlet, we could use the following:
47#
48# ${@qemu_run_binary(d, '$D', '/usr/bin/test_app')} [test_app arguments]
49#
50def qemu_run_binary(data, rootfs_path, binary): 18def qemu_run_binary(data, rootfs_path, binary):
51 libdir = rootfs_path + data.getVar("libdir", False) 19 return oe.qemu.qemu_run_binary(data, rootfs_path, binary)
52 base_libdir = rootfs_path + data.getVar("base_libdir", False)
53
54 return qemu_wrapper_cmdline(data, rootfs_path, [libdir, base_libdir]) + rootfs_path + binary
55 20
56# QEMU_EXTRAOPTIONS is not meant to be directly used, the extensions are 21# QEMU_EXTRAOPTIONS is not meant to be directly used, the extensions are
57# PACKAGE_ARCH, *NOT* overrides. 22# PACKAGE_ARCH, *NOT* overrides.
@@ -59,19 +24,5 @@ def qemu_run_binary(data, rootfs_path, binary):
59# enough and a PACKAGE_ARCH specific -cpu option is needed (hence we have to do 24# enough and a PACKAGE_ARCH specific -cpu option is needed (hence we have to do
60# this dance). For others (e.g. arm) a -cpu option is not necessary, since the 25# this dance). For others (e.g. arm) a -cpu option is not necessary, since the
61# qemu-arm default CPU supports all required architecture levels. 26# qemu-arm default CPU supports all required architecture levels.
62 27QEMU_OPTIONS = "-r ${OLDEST_KERNEL} ${@d.getVar("QEMU_EXTRAOPTIONS:tune-%s" % d.getVar('TUNE_PKGARCH')) or ""}"
63QEMU_OPTIONS = "-r ${OLDEST_KERNEL} ${@d.getVar("QEMU_EXTRAOPTIONS_%s" % d.getVar('PACKAGE_ARCH')) or ""}" 28QEMU_OPTIONS[vardeps] += "QEMU_EXTRAOPTIONS:tune-${TUNE_PKGARCH}"
64QEMU_OPTIONS[vardeps] += "QEMU_EXTRAOPTIONS_${PACKAGE_ARCH}"
65
66QEMU_EXTRAOPTIONS_ppce500v2 = " -cpu e500v2"
67QEMU_EXTRAOPTIONS_ppce500mc = " -cpu e500mc"
68QEMU_EXTRAOPTIONS_ppce5500 = " -cpu e500mc"
69QEMU_EXTRAOPTIONS_ppc64e5500 = " -cpu e500mc"
70QEMU_EXTRAOPTIONS_ppce6500 = " -cpu e500mc"
71QEMU_EXTRAOPTIONS_ppc64e6500 = " -cpu e500mc"
72QEMU_EXTRAOPTIONS_ppc7400 = " -cpu 7400"
73QEMU_EXTRAOPTIONS_powerpc64le = " -cpu POWER9"
74# Some packages e.g. fwupd sets PACKAGE_ARCH = MACHINE_ARCH and uses meson which
75# needs right options to usermode qemu
76QEMU_EXTRAOPTIONS_qemuppc = " -cpu 7400"
77QEMU_EXTRAOPTIONS_qemuppc64 = " -cpu POWER9"
diff --git a/meta/classes-recipe/qemuboot.bbclass b/meta/classes-recipe/qemuboot.bbclass
index 895fd38d68..0f80c60ab5 100644
--- a/meta/classes-recipe/qemuboot.bbclass
+++ b/meta/classes-recipe/qemuboot.bbclass
@@ -129,7 +129,8 @@ addtask do_write_qemuboot_conf after do_rootfs before do_image
129 129
130def qemuboot_vars(d): 130def qemuboot_vars(d):
131 build_vars = ['MACHINE', 'TUNE_ARCH', 'DEPLOY_DIR_IMAGE', 131 build_vars = ['MACHINE', 'TUNE_ARCH', 'DEPLOY_DIR_IMAGE',
132 'KERNEL_IMAGETYPE', 'IMAGE_NAME', 'IMAGE_LINK_NAME', 132 'KERNEL_IMAGETYPE', 'KERNEL_IMAGE_NAME',
133 'KERNEL_IMAGE_BIN_EXT', 'IMAGE_NAME', 'IMAGE_LINK_NAME',
133 'STAGING_DIR_NATIVE', 'STAGING_BINDIR_NATIVE', 134 'STAGING_DIR_NATIVE', 'STAGING_BINDIR_NATIVE',
134 'STAGING_DIR_HOST', 'SERIAL_CONSOLES', 'UNINATIVE_LOADER'] 135 'STAGING_DIR_HOST', 'SERIAL_CONSOLES', 'UNINATIVE_LOADER']
135 return build_vars + [k for k in d.keys() if k.startswith('QB_')] 136 return build_vars + [k for k in d.keys() if k.startswith('QB_')]
diff --git a/meta/classes-recipe/rootfs-postcommands.bbclass b/meta/classes-recipe/rootfs-postcommands.bbclass
index 920da94ba2..8b5822a0b5 100644
--- a/meta/classes-recipe/rootfs-postcommands.bbclass
+++ b/meta/classes-recipe/rootfs-postcommands.bbclass
@@ -4,20 +4,20 @@
4# SPDX-License-Identifier: MIT 4# SPDX-License-Identifier: MIT
5# 5#
6 6
7# Zap the root password if debug-tweaks and empty-root-password features are not enabled 7# Zap the root password if empty-root-password feature is not enabled
8ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'empty-root-password' ], "", "zap_empty_root_password ",d)}' 8ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("IMAGE_FEATURES", "empty-root-password", "", "zap_empty_root_password ",d)}'
9 9
10# Allow dropbear/openssh to accept logins from accounts with an empty password string if debug-tweaks or allow-empty-password is enabled 10# Allow dropbear/openssh to accept logins from accounts with an empty password string if allow-empty-password is enabled
11ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'allow-empty-password' ], "ssh_allow_empty_password ", "",d)}' 11ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("IMAGE_FEATURES", "allow-empty-password", "ssh_allow_empty_password ", "",d)}'
12 12
13# Allow dropbear/openssh to accept root logins if debug-tweaks or allow-root-login is enabled 13# Allow dropbear/openssh to accept root logins if allow-root-login is enabled
14ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'allow-root-login' ], "ssh_allow_root_login ", "",d)}' 14ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("IMAGE_FEATURES", "allow-root-login", "ssh_allow_root_login ", "",d)}'
15 15
16# Autologin the root user on the serial console, if empty-root-password and serial-autologin-root are active 16# Autologin the root user on the serial console, if empty-root-password and serial-autologin-root are active
17ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("IMAGE_FEATURES", [ 'empty-root-password', 'serial-autologin-root' ], "serial_autologin_root ", "",d)}' 17ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("IMAGE_FEATURES", [ 'empty-root-password', 'serial-autologin-root' ], "serial_autologin_root ", "",d)}'
18 18
19# Enable postinst logging if debug-tweaks or post-install-logging is enabled 19# Enable postinst logging if post-install-logging is enabled
20ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'post-install-logging' ], "postinst_enable_logging ", "",d)}' 20ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("IMAGE_FEATURES", "post-install-logging", "postinst_enable_logging ", "",d)}'
21 21
22# Create /etc/timestamp during image construction to give a reasonably sane default time setting 22# Create /etc/timestamp during image construction to give a reasonably sane default time setting
23ROOTFS_POSTPROCESS_COMMAND += "rootfs_update_timestamp " 23ROOTFS_POSTPROCESS_COMMAND += "rootfs_update_timestamp "
@@ -43,7 +43,7 @@ ROOTFS_POSTUNINSTALL_COMMAND =+ "write_image_manifest"
43POSTINST_LOGFILE ?= "${localstatedir}/log/postinstall.log" 43POSTINST_LOGFILE ?= "${localstatedir}/log/postinstall.log"
44# Set default target for systemd images 44# Set default target for systemd images
45SYSTEMD_DEFAULT_TARGET ?= '${@bb.utils.contains_any("IMAGE_FEATURES", [ "x11-base", "weston" ], "graphical.target", "multi-user.target", d)}' 45SYSTEMD_DEFAULT_TARGET ?= '${@bb.utils.contains_any("IMAGE_FEATURES", [ "x11-base", "weston" ], "graphical.target", "multi-user.target", d)}'
46ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("DISTRO_FEATURES", "systemd", "set_systemd_default_target systemd_sysusers_check", "", d)}' 46ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("DISTRO_FEATURES", "systemd", "set_systemd_default_target systemd_sysusers_check systemd_handle_machine_id", "", d)}'
47 47
48ROOTFS_POSTPROCESS_COMMAND += 'empty_var_volatile' 48ROOTFS_POSTPROCESS_COMMAND += 'empty_var_volatile'
49 49
@@ -173,6 +173,23 @@ python systemd_sysusers_check() {
173 check_group_exists(d, sid) 173 check_group_exists(d, sid)
174} 174}
175 175
176systemd_handle_machine_id() {
177 if ${@bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs", "true", "false", d)}; then
178 # Create machine-id
179 # 20:12 < mezcalero> koen: you have three options: a) run systemd-machine-id-setup at install time, b) have / read-only and an empty file there (for stateless) and c) boot with / writable
180 touch ${IMAGE_ROOTFS}${sysconfdir}/machine-id
181 fi
182 # In order to be backward compatible with the previous OE-core specific (re)implementation of systemctl
183 # we need to touch machine-id when handling presets and when the rootfs is NOT stateless
184 if ${@ 'true' if not bb.utils.contains('IMAGE_FEATURES', 'stateless-rootfs', True, False, d) else 'false'}; then
185 touch ${IMAGE_ROOTFS}${sysconfdir}/machine-id
186 if [ -e ${IMAGE_ROOTFS}${root_prefix}/lib/systemd/systemd ]; then
187 systemctl --root="${IMAGE_ROOTFS}" --preset-mode=enable-only preset-all
188 systemctl --root="${IMAGE_ROOTFS}" --global --preset-mode=enable-only preset-all
189 fi
190 fi
191}
192
176# 193#
177# A hook function to support read-only-rootfs IMAGE_FEATURES 194# A hook function to support read-only-rootfs IMAGE_FEATURES
178# 195#
@@ -224,12 +241,6 @@ read_only_rootfs_hook () {
224 ${IMAGE_ROOTFS}/etc/init.d/populate-volatile.sh 241 ${IMAGE_ROOTFS}/etc/init.d/populate-volatile.sh
225 fi 242 fi
226 fi 243 fi
227
228 if ${@bb.utils.contains("DISTRO_FEATURES", "systemd", "true", "false", d)}; then
229 # Create machine-id
230 # 20:12 < mezcalero> koen: you have three options: a) run systemd-machine-id-setup at install time, b) have / read-only and an empty file there (for stateless) and c) boot with / writable
231 touch ${IMAGE_ROOTFS}${sysconfdir}/machine-id
232 fi
233} 244}
234 245
235# 246#
@@ -308,19 +319,19 @@ serial_autologin_root () {
308} 319}
309 320
310python tidy_shadowutils_files () { 321python tidy_shadowutils_files () {
311 import rootfspostcommands 322 import oe.rootfspostcommands
312 rootfspostcommands.tidy_shadowutils_files(d.expand('${IMAGE_ROOTFS}${sysconfdir}')) 323 oe.rootfspostcommands.tidy_shadowutils_files(d.expand('${IMAGE_ROOTFS}${sysconfdir}'))
313} 324}
314 325
315python sort_passwd () { 326python sort_passwd () {
316 """ 327 """
317 Deprecated in the favour of tidy_shadowutils_files. 328 Deprecated in the favour of tidy_shadowutils_files.
318 """ 329 """
319 import rootfspostcommands 330 import oe.rootfspostcommands
320 bb.warn('[sort_passwd] You are using a deprecated function for ' 331 bb.warn('[sort_passwd] You are using a deprecated function for '
321 'SORT_PASSWD_POSTPROCESS_COMMAND. The default one is now called ' 332 'SORT_PASSWD_POSTPROCESS_COMMAND. The default one is now called '
322 '"tidy_shadowutils_files".') 333 '"tidy_shadowutils_files".')
323 rootfspostcommands.tidy_shadowutils_files(d.expand('${IMAGE_ROOTFS}${sysconfdir}')) 334 oe.rootfspostcommands.tidy_shadowutils_files(d.expand('${IMAGE_ROOTFS}${sysconfdir}'))
324} 335}
325 336
326# 337#
@@ -487,6 +498,10 @@ rootfs_reproducible () {
487 find ${IMAGE_ROOTFS}${sysconfdir}/gconf -name '%gconf.xml' -print0 | xargs -0r \ 498 find ${IMAGE_ROOTFS}${sysconfdir}/gconf -name '%gconf.xml' -print0 | xargs -0r \
488 sed -i -e 's@\bmtime="[0-9][0-9]*"@mtime="'${REPRODUCIBLE_TIMESTAMP_ROOTFS}'"@g' 499 sed -i -e 's@\bmtime="[0-9][0-9]*"@mtime="'${REPRODUCIBLE_TIMESTAMP_ROOTFS}'"@g'
489 fi 500 fi
501
502 if [ -f ${IMAGE_ROOTFS}${localstatedir}/lib/opkg/status ]; then
503 sed -i 's/^Installed-Time: .*/Installed-Time: ${REPRODUCIBLE_TIMESTAMP_ROOTFS}/' ${IMAGE_ROOTFS}${localstatedir}/lib/opkg/status
504 fi
490 fi 505 fi
491} 506}
492 507
diff --git a/meta/classes-recipe/rust-common.bbclass b/meta/classes-recipe/rust-common.bbclass
index 6940093e59..31331c7a26 100644
--- a/meta/classes-recipe/rust-common.bbclass
+++ b/meta/classes-recipe/rust-common.bbclass
@@ -13,7 +13,7 @@ FILES:${PN} += "${rustlibdir}/*.so"
13FILES:${PN}-dev += "${rustlibdir}/*.rlib ${rustlibdir}/*.rmeta" 13FILES:${PN}-dev += "${rustlibdir}/*.rlib ${rustlibdir}/*.rmeta"
14FILES:${PN}-dbg += "${rustlibdir}/.debug" 14FILES:${PN}-dbg += "${rustlibdir}/.debug"
15 15
16RUSTLIB = "-L ${STAGING_DIR_HOST}${rustlibdir}" 16RUSTLIB ?= "-L ${STAGING_DIR_HOST}${rustlibdir}"
17RUST_DEBUG_REMAP = "--remap-path-prefix=${WORKDIR}=${TARGET_DBGSRC_DIR}" 17RUST_DEBUG_REMAP = "--remap-path-prefix=${WORKDIR}=${TARGET_DBGSRC_DIR}"
18RUSTFLAGS += "${RUSTLIB} ${RUST_DEBUG_REMAP}" 18RUSTFLAGS += "${RUSTLIB} ${RUST_DEBUG_REMAP}"
19RUSTLIB_DEP ??= "libstd-rs" 19RUSTLIB_DEP ??= "libstd-rs"
@@ -173,7 +173,7 @@ do_rust_create_wrappers () {
173 mkdir -p "${WRAPPER_DIR}" 173 mkdir -p "${WRAPPER_DIR}"
174 174
175 # Yocto Build / Rust Host C compiler 175 # Yocto Build / Rust Host C compiler
176 create_wrapper_rust "${RUST_BUILD_CC}" "" "${CRATE_CC_FLAGS}" "${BUILD_CC}" 176 create_wrapper_rust "${RUST_BUILD_CC}" "" "${CRATE_CC_FLAGS}" "${BUILD_CC}" "${BUILD_LDFLAGS}"
177 # Yocto Build / Rust Host C++ compiler 177 # Yocto Build / Rust Host C++ compiler
178 create_wrapper_rust "${RUST_BUILD_CXX}" "" "${CRATE_CC_FLAGS}" "${BUILD_CXX}" 178 create_wrapper_rust "${RUST_BUILD_CXX}" "" "${CRATE_CC_FLAGS}" "${BUILD_CXX}"
179 # Yocto Build / Rust Host linker 179 # Yocto Build / Rust Host linker
diff --git a/meta/classes-recipe/rust-target-config.bbclass b/meta/classes-recipe/rust-target-config.bbclass
index 330ad8a3f5..cac6e90a9e 100644
--- a/meta/classes-recipe/rust-target-config.bbclass
+++ b/meta/classes-recipe/rust-target-config.bbclass
@@ -77,8 +77,33 @@ def llvm_features_from_tune(d):
77 f.append("+a15") 77 f.append("+a15")
78 if 'cortexa17' in feat: 78 if 'cortexa17' in feat:
79 f.append("+a17") 79 f.append("+a17")
80 if ('riscv64' in feat) or ('riscv32' in feat): 80 if 'rv' in feat:
81 f.append("+a,+c,+d,+f,+m") 81 if 'm' in feat:
82 f.append("+m")
83 if 'a' in feat:
84 f.append("+a")
85 if 'f' in feat:
86 f.append("+f")
87 if 'd' in feat:
88 f.append("+d")
89 if 'c' in feat:
90 f.append("+c")
91 if 'v' in feat:
92 f.append("+v")
93 if 'zicbom' in feat:
94 f.append("+zicbom")
95 if 'zicsr' in feat:
96 f.append("+zicsr")
97 if 'zifencei' in feat:
98 f.append("+zifencei")
99 if 'zba' in feat:
100 f.append("+zba")
101 if 'zbb' in feat:
102 f.append("+zbb")
103 if 'zbc' in feat:
104 f.append("+zbc")
105 if 'zbs' in feat:
106 f.append("+zbs")
82 return f 107 return f
83llvm_features_from_tune[vardepvalue] = "${@llvm_features_from_tune(d)}" 108llvm_features_from_tune[vardepvalue] = "${@llvm_features_from_tune(d)}"
84 109
@@ -130,7 +155,7 @@ def llvm_features(d):
130llvm_features[vardepvalue] = "${@llvm_features(d)}" 155llvm_features[vardepvalue] = "${@llvm_features(d)}"
131 156
132## arm-unknown-linux-gnueabihf 157## arm-unknown-linux-gnueabihf
133DATA_LAYOUT[arm-eabi] = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64" 158DATA_LAYOUT[arm-eabi] = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64"
134TARGET_ENDIAN[arm-eabi] = "little" 159TARGET_ENDIAN[arm-eabi] = "little"
135TARGET_POINTER_WIDTH[arm-eabi] = "32" 160TARGET_POINTER_WIDTH[arm-eabi] = "32"
136TARGET_C_INT_WIDTH[arm-eabi] = "32" 161TARGET_C_INT_WIDTH[arm-eabi] = "32"
@@ -138,7 +163,7 @@ MAX_ATOMIC_WIDTH[arm-eabi] = "64"
138FEATURES[arm-eabi] = "+v6,+vfp2" 163FEATURES[arm-eabi] = "+v6,+vfp2"
139 164
140## armv7-unknown-linux-gnueabihf 165## armv7-unknown-linux-gnueabihf
141DATA_LAYOUT[armv7-eabi] = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64" 166DATA_LAYOUT[armv7-eabi] = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64"
142TARGET_ENDIAN[armv7-eabi] = "little" 167TARGET_ENDIAN[armv7-eabi] = "little"
143TARGET_POINTER_WIDTH[armv7-eabi] = "32" 168TARGET_POINTER_WIDTH[armv7-eabi] = "32"
144TARGET_C_INT_WIDTH[armv7-eabi] = "32" 169TARGET_C_INT_WIDTH[armv7-eabi] = "32"
@@ -146,35 +171,35 @@ MAX_ATOMIC_WIDTH[armv7-eabi] = "64"
146FEATURES[armv7-eabi] = "+v7,+vfp2,+thumb2" 171FEATURES[armv7-eabi] = "+v7,+vfp2,+thumb2"
147 172
148## aarch64-unknown-linux-{gnu, musl} 173## aarch64-unknown-linux-{gnu, musl}
149DATA_LAYOUT[aarch64] = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128" 174DATA_LAYOUT[aarch64] = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128-Fn32"
150TARGET_ENDIAN[aarch64] = "little" 175TARGET_ENDIAN[aarch64] = "little"
151TARGET_POINTER_WIDTH[aarch64] = "64" 176TARGET_POINTER_WIDTH[aarch64] = "64"
152TARGET_C_INT_WIDTH[aarch64] = "32" 177TARGET_C_INT_WIDTH[aarch64] = "32"
153MAX_ATOMIC_WIDTH[aarch64] = "128" 178MAX_ATOMIC_WIDTH[aarch64] = "128"
154 179
155## x86_64-unknown-linux-{gnu, musl} 180## x86_64-unknown-linux-{gnu, musl}
156DATA_LAYOUT[x86_64] = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" 181DATA_LAYOUT[x86_64] = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128"
157TARGET_ENDIAN[x86_64] = "little" 182TARGET_ENDIAN[x86_64] = "little"
158TARGET_POINTER_WIDTH[x86_64] = "64" 183TARGET_POINTER_WIDTH[x86_64] = "64"
159TARGET_C_INT_WIDTH[x86_64] = "32" 184TARGET_C_INT_WIDTH[x86_64] = "32"
160MAX_ATOMIC_WIDTH[x86_64] = "64" 185MAX_ATOMIC_WIDTH[x86_64] = "64"
161 186
162## x86_64-unknown-linux-gnux32 187## x86_64-unknown-linux-gnux32
163DATA_LAYOUT[x86_64-x32] = "e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" 188DATA_LAYOUT[x86_64-x32] = "e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128"
164TARGET_ENDIAN[x86_64-x32] = "little" 189TARGET_ENDIAN[x86_64-x32] = "little"
165TARGET_POINTER_WIDTH[x86_64-x32] = "32" 190TARGET_POINTER_WIDTH[x86_64-x32] = "32"
166TARGET_C_INT_WIDTH[x86_64-x32] = "32" 191TARGET_C_INT_WIDTH[x86_64-x32] = "32"
167MAX_ATOMIC_WIDTH[x86_64-x32] = "64" 192MAX_ATOMIC_WIDTH[x86_64-x32] = "64"
168 193
169## i686-unknown-linux-{gnu, musl} 194## i686-unknown-linux-{gnu, musl}
170DATA_LAYOUT[i686] = "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128" 195DATA_LAYOUT[i686] = "e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-i128:128-f64:32:64-f80:32-n8:16:32-S128"
171TARGET_ENDIAN[i686] = "little" 196TARGET_ENDIAN[i686] = "little"
172TARGET_POINTER_WIDTH[i686] = "32" 197TARGET_POINTER_WIDTH[i686] = "32"
173TARGET_C_INT_WIDTH[i686] = "32" 198TARGET_C_INT_WIDTH[i686] = "32"
174MAX_ATOMIC_WIDTH[i686] = "64" 199MAX_ATOMIC_WIDTH[i686] = "64"
175 200
176## XXX: a bit of a hack so qemux86 builds, clone of i686-unknown-linux-{gnu, musl} above 201## XXX: a bit of a hack so qemux86 builds, clone of i686-unknown-linux-{gnu, musl} above
177DATA_LAYOUT[i586] = "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128" 202DATA_LAYOUT[i586] = "e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-i128:128-f64:32:64-f80:32-n8:16:32-S128"
178TARGET_ENDIAN[i586] = "little" 203TARGET_ENDIAN[i586] = "little"
179TARGET_POINTER_WIDTH[i586] = "32" 204TARGET_POINTER_WIDTH[i586] = "32"
180TARGET_C_INT_WIDTH[i586] = "32" 205TARGET_C_INT_WIDTH[i586] = "32"
@@ -198,7 +223,7 @@ MAX_ATOMIC_WIDTH[mipsel] = "32"
198DATA_LAYOUT[mips64] = "E-m:e-i8:8:32-i16:16:32-i64:64-n32:64-S128" 223DATA_LAYOUT[mips64] = "E-m:e-i8:8:32-i16:16:32-i64:64-n32:64-S128"
199TARGET_ENDIAN[mips64] = "big" 224TARGET_ENDIAN[mips64] = "big"
200TARGET_POINTER_WIDTH[mips64] = "64" 225TARGET_POINTER_WIDTH[mips64] = "64"
201TARGET_C_INT_WIDTH[mips64] = "64" 226TARGET_C_INT_WIDTH[mips64] = "32"
202MAX_ATOMIC_WIDTH[mips64] = "64" 227MAX_ATOMIC_WIDTH[mips64] = "64"
203 228
204## mips64-n32-unknown-linux-{gnu, musl} 229## mips64-n32-unknown-linux-{gnu, musl}
@@ -212,46 +237,46 @@ MAX_ATOMIC_WIDTH[mips64-n32] = "64"
212DATA_LAYOUT[mips64el] = "e-m:e-i8:8:32-i16:16:32-i64:64-n32:64-S128" 237DATA_LAYOUT[mips64el] = "e-m:e-i8:8:32-i16:16:32-i64:64-n32:64-S128"
213TARGET_ENDIAN[mips64el] = "little" 238TARGET_ENDIAN[mips64el] = "little"
214TARGET_POINTER_WIDTH[mips64el] = "64" 239TARGET_POINTER_WIDTH[mips64el] = "64"
215TARGET_C_INT_WIDTH[mips64el] = "64" 240TARGET_C_INT_WIDTH[mips64el] = "32"
216MAX_ATOMIC_WIDTH[mips64el] = "64" 241MAX_ATOMIC_WIDTH[mips64el] = "64"
217 242
218## powerpc-unknown-linux-{gnu, musl} 243## powerpc-unknown-linux-{gnu, musl}
219DATA_LAYOUT[powerpc] = "E-m:e-p:32:32-i64:64-n32" 244DATA_LAYOUT[powerpc] = "E-m:e-p:32:32-Fn32-i64:64-n32"
220TARGET_ENDIAN[powerpc] = "big" 245TARGET_ENDIAN[powerpc] = "big"
221TARGET_POINTER_WIDTH[powerpc] = "32" 246TARGET_POINTER_WIDTH[powerpc] = "32"
222TARGET_C_INT_WIDTH[powerpc] = "32" 247TARGET_C_INT_WIDTH[powerpc] = "32"
223MAX_ATOMIC_WIDTH[powerpc] = "32" 248MAX_ATOMIC_WIDTH[powerpc] = "32"
224 249
225## powerpc64-unknown-linux-{gnu, musl} 250## powerpc64-unknown-linux-{gnu, musl}
226DATA_LAYOUT[powerpc64] = "E-m:e-i64:64-n32:64-S128-v256:256:256-v512:512:512" 251DATA_LAYOUT[powerpc64] = "E-m:e-Fi64-i64:64-n32:64-S128-v256:256:256-v512:512:512"
227TARGET_ENDIAN[powerpc64] = "big" 252TARGET_ENDIAN[powerpc64] = "big"
228TARGET_POINTER_WIDTH[powerpc64] = "64" 253TARGET_POINTER_WIDTH[powerpc64] = "64"
229TARGET_C_INT_WIDTH[powerpc64] = "64" 254TARGET_C_INT_WIDTH[powerpc64] = "32"
230MAX_ATOMIC_WIDTH[powerpc64] = "64" 255MAX_ATOMIC_WIDTH[powerpc64] = "64"
231 256
232## powerpc64le-unknown-linux-{gnu, musl} 257## powerpc64le-unknown-linux-{gnu, musl}
233DATA_LAYOUT[powerpc64le] = "e-m:e-i64:64-n32:64-v256:256:256-v512:512:512" 258DATA_LAYOUT[powerpc64le] = "e-m:e-Fn32-i64:64-n32:64-S128-v256:256:256-v512:512:512"
234TARGET_ENDIAN[powerpc64le] = "little" 259TARGET_ENDIAN[powerpc64le] = "little"
235TARGET_POINTER_WIDTH[powerpc64le] = "64" 260TARGET_POINTER_WIDTH[powerpc64le] = "64"
236TARGET_C_INT_WIDTH[powerpc64le] = "64" 261TARGET_C_INT_WIDTH[powerpc64le] = "32"
237MAX_ATOMIC_WIDTH[powerpc64le] = "64" 262MAX_ATOMIC_WIDTH[powerpc64le] = "64"
238 263
239## riscv32gc-unknown-linux-{gnu, musl} 264## riscv32-unknown-linux-{gnu, musl}
240DATA_LAYOUT[riscv32gc] = "e-m:e-p:32:32-i64:64-n32-S128" 265DATA_LAYOUT[riscv32] = "e-m:e-p:32:32-i64:64-n32-S128"
241TARGET_ENDIAN[riscv32gc] = "little" 266TARGET_ENDIAN[riscv32] = "little"
242TARGET_POINTER_WIDTH[riscv32gc] = "32" 267TARGET_POINTER_WIDTH[riscv32] = "32"
243TARGET_C_INT_WIDTH[riscv32gc] = "32" 268TARGET_C_INT_WIDTH[riscv32] = "32"
244MAX_ATOMIC_WIDTH[riscv32gc] = "32" 269MAX_ATOMIC_WIDTH[riscv32] = "32"
245 270
246## riscv64gc-unknown-linux-{gnu, musl} 271## riscv64-unknown-linux-{gnu, musl}
247DATA_LAYOUT[riscv64gc] = "e-m:e-p:64:64-i64:64-i128:128-n64-S128" 272DATA_LAYOUT[riscv64] = "e-m:e-p:64:64-i64:64-i128:128-n32:64-S128"
248TARGET_ENDIAN[riscv64gc] = "little" 273TARGET_ENDIAN[riscv64] = "little"
249TARGET_POINTER_WIDTH[riscv64gc] = "64" 274TARGET_POINTER_WIDTH[riscv64] = "64"
250TARGET_C_INT_WIDTH[riscv64gc] = "64" 275TARGET_C_INT_WIDTH[riscv64] = "32"
251MAX_ATOMIC_WIDTH[riscv64gc] = "64" 276MAX_ATOMIC_WIDTH[riscv64] = "64"
252 277
253## loongarch64-unknown-linux-{gnu, musl} 278## loongarch64-unknown-linux-{gnu, musl}
254DATA_LAYOUT[loongarch64] = "e-m:e-i8:8:32-i16:16:32-i64:64-n32:64-S128" 279DATA_LAYOUT[loongarch64] = "e-m:e-p:64:64-i64:64-i128:128-n32:64-S128"
255TARGET_ENDIAN[loongarch64] = "little" 280TARGET_ENDIAN[loongarch64] = "little"
256TARGET_POINTER_WIDTH[loongarch64] = "64" 281TARGET_POINTER_WIDTH[loongarch64] = "64"
257TARGET_C_INT_WIDTH[loongarch64] = "32" 282TARGET_C_INT_WIDTH[loongarch64] = "32"
@@ -271,19 +296,11 @@ def arch_to_rust_target_arch(arch):
271 return "arm" 296 return "arm"
272 elif arch == "powerpc64le": 297 elif arch == "powerpc64le":
273 return "powerpc64" 298 return "powerpc64"
274 elif arch == "riscv32gc":
275 return "riscv32"
276 elif arch == "riscv64gc":
277 return "riscv64"
278 else: 299 else:
279 return arch 300 return arch
280 301
281# Convert a rust target string to a llvm-compatible triplet 302# Convert a rust target string to a llvm-compatible triplet
282def rust_sys_to_llvm_target(sys): 303def rust_sys_to_llvm_target(sys):
283 if sys.startswith('riscv32gc-'):
284 return sys.replace('riscv32gc-', 'riscv32-', 1)
285 if sys.startswith('riscv64gc-'):
286 return sys.replace('riscv64gc-', 'riscv64-', 1)
287 return sys 304 return sys
288 305
289# generates our target CPU value 306# generates our target CPU value
@@ -380,9 +397,9 @@ def rust_gen_target(d, thing, wd, arch):
380 else: 397 else:
381 tspec['env'] = "gnu" 398 tspec['env'] = "gnu"
382 if "riscv64" in tspec['llvm-target']: 399 if "riscv64" in tspec['llvm-target']:
383 tspec['llvm-abiname'] = "lp64d" 400 tspec['llvm-abiname'] = d.getVar('TUNE_RISCV_ABI')
384 if "riscv32" in tspec['llvm-target']: 401 if "riscv32" in tspec['llvm-target']:
385 tspec['llvm-abiname'] = "ilp32d" 402 tspec['llvm-abiname'] = d.getVar('TUNE_RISCV_ABI')
386 if "loongarch64" in tspec['llvm-target']: 403 if "loongarch64" in tspec['llvm-target']:
387 tspec['llvm-abiname'] = "lp64d" 404 tspec['llvm-abiname'] = "lp64d"
388 tspec['vendor'] = "unknown" 405 tspec['vendor'] = "unknown"
@@ -391,6 +408,11 @@ def rust_gen_target(d, thing, wd, arch):
391 tspec['cpu'] = cpu 408 tspec['cpu'] = cpu
392 if features != "": 409 if features != "":
393 tspec['features'] = features 410 tspec['features'] = features
411 fpu = d.getVar('TARGET_FPU')
412 if fpu == "soft":
413 tspec['llvm-floatabi'] = "soft"
414 elif fpu == "hard":
415 tspec['llvm-floatabi'] = "hard"
394 tspec['dynamic-linking'] = True 416 tspec['dynamic-linking'] = True
395 tspec['executables'] = True 417 tspec['executables'] = True
396 tspec['linker-is-gnu'] = True 418 tspec['linker-is-gnu'] = True
diff --git a/meta/classes-recipe/rust.bbclass b/meta/classes-recipe/rust.bbclass
index dae25cac2a..e727601679 100644
--- a/meta/classes-recipe/rust.bbclass
+++ b/meta/classes-recipe/rust.bbclass
@@ -43,9 +43,9 @@ HOST_CFLAGS ?= "${CFLAGS}"
43HOST_CXXFLAGS ?= "${CXXFLAGS}" 43HOST_CXXFLAGS ?= "${CXXFLAGS}"
44HOST_CPPFLAGS ?= "${CPPFLAGS}" 44HOST_CPPFLAGS ?= "${CPPFLAGS}"
45 45
46rustlib_suffix="${TUNE_ARCH}${TARGET_VENDOR}-${TARGET_OS}/rustlib/${RUST_HOST_SYS}/lib" 46rustlib_suffix = "${TUNE_ARCH}${TARGET_VENDOR}-${TARGET_OS}/rustlib/${RUST_HOST_SYS}/lib"
47# Native sysroot standard library path 47# Native sysroot standard library path
48rustlib_src="${prefix}/lib/${rustlib_suffix}" 48rustlib_src = "${prefix}/lib/${rustlib_suffix}"
49# Host sysroot standard library path 49# Host sysroot standard library path
50rustlib="${libdir}/${rustlib_suffix}" 50rustlib = "${libdir}/${rustlib_suffix}"
51rustlib:class-native="${libdir}/rustlib/${BUILD_SYS}/lib" 51rustlib:class-native = "${libdir}/rustlib/${BUILD_SYS}/lib"
diff --git a/meta/classes-recipe/scons.bbclass b/meta/classes-recipe/scons.bbclass
index d20a78dc6e..1cb375522d 100644
--- a/meta/classes-recipe/scons.bbclass
+++ b/meta/classes-recipe/scons.bbclass
@@ -12,7 +12,7 @@ EXTRA_OESCONS ?= ""
12# This value below is derived from $(getconf ARG_MAX) 12# This value below is derived from $(getconf ARG_MAX)
13SCONS_MAXLINELENGTH ?= "MAXLINELENGTH=2097152" 13SCONS_MAXLINELENGTH ?= "MAXLINELENGTH=2097152"
14EXTRA_OESCONS:append = " ${SCONS_MAXLINELENGTH}" 14EXTRA_OESCONS:append = " ${SCONS_MAXLINELENGTH}"
15do_configure() { 15scons_do_configure() {
16 if [ -n "${CONFIGURESTAMPFILE}" -a "${S}" = "${B}" ]; then 16 if [ -n "${CONFIGURESTAMPFILE}" -a "${S}" = "${B}" ]; then
17 if [ -e "${CONFIGURESTAMPFILE}" -a "`cat ${CONFIGURESTAMPFILE}`" != "${BB_TASKHASH}" -a "${CLEANBROKEN}" != "1" ]; then 17 if [ -e "${CONFIGURESTAMPFILE}" -a "`cat ${CONFIGURESTAMPFILE}`" != "${BB_TASKHASH}" -a "${CLEANBROKEN}" != "1" ]; then
18 ${STAGING_BINDIR_NATIVE}/scons --directory=${S} --clean PREFIX=${prefix} prefix=${prefix} ${EXTRA_OESCONS} 18 ${STAGING_BINDIR_NATIVE}/scons --directory=${S} --clean PREFIX=${prefix} prefix=${prefix} ${EXTRA_OESCONS}
@@ -37,4 +37,4 @@ do_configure[vardepsexclude] = "SCONS_MAXLINELENGTH"
37do_compile[vardepsexclude] = "SCONS_MAXLINELENGTH" 37do_compile[vardepsexclude] = "SCONS_MAXLINELENGTH"
38do_install[vardepsexclude] = "SCONS_MAXLINELENGTH" 38do_install[vardepsexclude] = "SCONS_MAXLINELENGTH"
39 39
40EXPORT_FUNCTIONS do_compile do_install 40EXPORT_FUNCTIONS do_configure do_compile do_install
diff --git a/meta/classes-recipe/setuptools3-base.bbclass b/meta/classes-recipe/setuptools3-base.bbclass
index 27af6abc58..190d9e6e3a 100644
--- a/meta/classes-recipe/setuptools3-base.bbclass
+++ b/meta/classes-recipe/setuptools3-base.bbclass
@@ -23,6 +23,10 @@ export CCSHARED = "-fPIC -DPIC"
23# the python executable 23# the python executable
24export LINKFORSHARED = "${SECURITY_CFLAGS} -Xlinker -export-dynamic" 24export LINKFORSHARED = "${SECURITY_CFLAGS} -Xlinker -export-dynamic"
25 25
26# The environment variable SETUPTOOLS_SCM_SUBPROCESS_TIMEOUT allows
27# to override the subprocess timeout.
28export SETUPTOOLS_SCM_SUBPROCESS_TIMEOUT ??= "600"
29
26FILES:${PN} += "${PYTHON_SITEPACKAGES_DIR}" 30FILES:${PN} += "${PYTHON_SITEPACKAGES_DIR}"
27FILES:${PN}-staticdev += "${PYTHON_SITEPACKAGES_DIR}/*.a" 31FILES:${PN}-staticdev += "${PYTHON_SITEPACKAGES_DIR}/*.a"
28FILES:${PN}-dev += "${PYTHON_SITEPACKAGES_DIR}/*.la" 32FILES:${PN}-dev += "${PYTHON_SITEPACKAGES_DIR}/*.la"
diff --git a/meta/classes-recipe/setuptools3.bbclass b/meta/classes-recipe/setuptools3.bbclass
index d71a089539..0adce5e2ec 100644
--- a/meta/classes-recipe/setuptools3.bbclass
+++ b/meta/classes-recipe/setuptools3.bbclass
@@ -12,13 +12,26 @@ SETUPTOOLS_BUILD_ARGS ?= ""
12 12
13SETUPTOOLS_SETUP_PATH ?= "${S}" 13SETUPTOOLS_SETUP_PATH ?= "${S}"
14 14
15python do_check_backend() {
16 import re
17 filename = d.expand("${SETUPTOOLS_SETUP_PATH}/pyproject.toml")
18 if os.path.exists(filename):
19 for line in open(filename):
20 match = re.match(r"build-backend\s*=\s*\W([\w.]+)\W", line)
21 if not match: continue
22
23 msg = f"inherits setuptools3 but has pyproject.toml with {match[1]}, use the correct class"
24 if "pep517-backend" not in (d.getVar("INSANE_SKIP") or "").split():
25 oe.qa.handle_error("pep517-backend", msg, d)
26}
27addtask check_backend after do_patch before do_configure
28
15setuptools3_do_configure() { 29setuptools3_do_configure() {
16 : 30 :
17} 31}
18 32
19setuptools3_do_compile() { 33setuptools3_do_compile() {
20 cd ${SETUPTOOLS_SETUP_PATH} 34 cd ${SETUPTOOLS_SETUP_PATH}
21 NO_FETCH_BUILD=1 \
22 STAGING_INCDIR=${STAGING_INCDIR} \ 35 STAGING_INCDIR=${STAGING_INCDIR} \
23 STAGING_LIBDIR=${STAGING_LIBDIR} \ 36 STAGING_LIBDIR=${STAGING_LIBDIR} \
24 ${STAGING_BINDIR_NATIVE}/python3-native/python3 setup.py \ 37 ${STAGING_BINDIR_NATIVE}/python3-native/python3 setup.py \
@@ -35,4 +48,4 @@ setuptools3_do_install() {
35 48
36EXPORT_FUNCTIONS do_configure do_compile do_install 49EXPORT_FUNCTIONS do_configure do_compile do_install
37 50
38export LDSHARED="${CCLD} -shared" 51export LDSHARED = "${CCLD} -shared"
diff --git a/meta/classes-recipe/setuptools3_legacy.bbclass b/meta/classes-recipe/setuptools3_legacy.bbclass
index 264b1f5cfb..166808a695 100644
--- a/meta/classes-recipe/setuptools3_legacy.bbclass
+++ b/meta/classes-recipe/setuptools3_legacy.bbclass
@@ -35,7 +35,6 @@ setuptools3_legacy_do_configure() {
35 35
36setuptools3_legacy_do_compile() { 36setuptools3_legacy_do_compile() {
37 cd ${SETUPTOOLS_SETUP_PATH} 37 cd ${SETUPTOOLS_SETUP_PATH}
38 NO_FETCH_BUILD=1 \
39 STAGING_INCDIR=${STAGING_INCDIR} \ 38 STAGING_INCDIR=${STAGING_INCDIR} \
40 STAGING_LIBDIR=${STAGING_LIBDIR} \ 39 STAGING_LIBDIR=${STAGING_LIBDIR} \
41 ${STAGING_BINDIR_NATIVE}/python3-native/python3 setup.py \ 40 ${STAGING_BINDIR_NATIVE}/python3-native/python3 setup.py \
@@ -79,6 +78,6 @@ setuptools3_legacy_do_install[vardepsexclude] = "MACHINE"
79 78
80EXPORT_FUNCTIONS do_configure do_compile do_install 79EXPORT_FUNCTIONS do_configure do_compile do_install
81 80
82export LDSHARED="${CCLD} -shared" 81export LDSHARED = "${CCLD} -shared"
83DEPENDS += "python3-setuptools-native" 82DEPENDS += "python3-setuptools-native"
84 83
diff --git a/meta/classes-recipe/siteinfo.bbclass b/meta/classes-recipe/siteinfo.bbclass
index 68aefb8eda..25b53d929a 100644
--- a/meta/classes-recipe/siteinfo.bbclass
+++ b/meta/classes-recipe/siteinfo.bbclass
@@ -221,20 +221,6 @@ def siteinfo_get_files(d, sysrootcache=False):
221 # This would show up as breaking sstatetests.SStateTests.test_sstate_32_64_same_hash for example 221 # This would show up as breaking sstatetests.SStateTests.test_sstate_32_64_same_hash for example
222 searched = [] 222 searched = []
223 223
224 if not sysrootcache:
225 return sitefiles, searched
226
227 # Now check for siteconfig cache files in sysroots
228 path_siteconfig = d.getVar('SITECONFIG_SYSROOTCACHE')
229 if path_siteconfig and os.path.isdir(path_siteconfig):
230 for i in os.listdir(path_siteconfig):
231 if not i.endswith("_config"):
232 continue
233 filename = os.path.join(path_siteconfig, i)
234 sitefiles.append(filename)
235 return sitefiles, searched 224 return sitefiles, searched
236 225
237# 226
238# Make some information available via variables
239#
240SITECONFIG_SYSROOTCACHE = "${STAGING_DATADIR}/${TARGET_SYS}_config_site.d"
diff --git a/meta/classes-recipe/sourceforge-releases.bbclass b/meta/classes-recipe/sourceforge-releases.bbclass
new file mode 100644
index 0000000000..0b5e5d0711
--- /dev/null
+++ b/meta/classes-recipe/sourceforge-releases.bbclass
@@ -0,0 +1,2 @@
1SOURCEFORGE_PROJECT ?= "${BPN}"
2UPSTREAM_CHECK_URI = "https://sourceforge.net/projects/${SOURCEFORGE_PROJECT}/files/"
diff --git a/meta/classes-recipe/systemd.bbclass b/meta/classes-recipe/systemd.bbclass
index 48b364c1d4..12c59647be 100644
--- a/meta/classes-recipe/systemd.bbclass
+++ b/meta/classes-recipe/systemd.bbclass
@@ -37,17 +37,29 @@ if systemctl >/dev/null 2>/dev/null; then
37 fi 37 fi
38 38
39 if [ "${SYSTEMD_AUTO_ENABLE}" = "enable" ]; then 39 if [ "${SYSTEMD_AUTO_ENABLE}" = "enable" ]; then
40 for service in ${SYSTEMD_SERVICE_ESCAPED}; do 40 for service in ${@systemd_filter_services("${SYSTEMD_SERVICE_ESCAPED}", False, d)}; do
41 systemctl ${OPTS} enable "$service" 41 systemctl ${OPTS} enable "$service"
42 done 42 done
43
44 for service in ${@systemd_filter_services("${SYSTEMD_SERVICE_ESCAPED}", True, d)}; do
45 systemctl --global ${OPTS} enable "$service"
46 done
43 fi 47 fi
44 48
45 if [ -z "$D" ]; then 49 if [ -z "$D" ]; then
50 # Reload only system service manager
51 # --global for daemon-reload is not supported: https://github.com/systemd/systemd/issues/19284
46 systemctl daemon-reload 52 systemctl daemon-reload
47 systemctl preset ${SYSTEMD_SERVICE_ESCAPED} 53 [ -n "${@systemd_filter_services("${SYSTEMD_SERVICE_ESCAPED}", False, d)}" ] && \
54 systemctl preset ${@systemd_filter_services("${SYSTEMD_SERVICE_ESCAPED}", False, d)}
55
56 [ -n "${@systemd_filter_services("${SYSTEMD_SERVICE_ESCAPED}", True, d)}" ] && \
57 systemctl --global preset ${@systemd_filter_services("${SYSTEMD_SERVICE_ESCAPED}", True, d)}
48 58
49 if [ "${SYSTEMD_AUTO_ENABLE}" = "enable" ]; then 59 if [ "${SYSTEMD_AUTO_ENABLE}" = "enable" ]; then
50 systemctl --no-block restart ${SYSTEMD_SERVICE_ESCAPED} 60 # --global flag for restart is not supported by systemd (see above)
61 [ -n "${@systemd_filter_services("${SYSTEMD_SERVICE_ESCAPED}", False, d)}" ] && \
62 systemctl --no-block restart ${@systemd_filter_services("${SYSTEMD_SERVICE_ESCAPED}", False, d)}
51 fi 63 fi
52 fi 64 fi
53fi 65fi
@@ -56,9 +68,14 @@ fi
56systemd_prerm() { 68systemd_prerm() {
57if systemctl >/dev/null 2>/dev/null; then 69if systemctl >/dev/null 2>/dev/null; then
58 if [ -z "$D" ]; then 70 if [ -z "$D" ]; then
59 systemctl stop ${SYSTEMD_SERVICE_ESCAPED} 71 if [ -n "${@systemd_filter_services("${SYSTEMD_SERVICE_ESCAPED}", False, d)}" ]; then
72 systemctl stop ${@systemd_filter_services("${SYSTEMD_SERVICE_ESCAPED}", False, d)}
73 systemctl disable ${@systemd_filter_services("${SYSTEMD_SERVICE_ESCAPED}", False, d)}
74 fi
60 75
61 systemctl disable ${SYSTEMD_SERVICE_ESCAPED} 76 # same as above, --global flag is not supported for stop so do disable only
77 [ -n "${@systemd_filter_services("${SYSTEMD_SERVICE_ESCAPED}", True, d)}" ] && \
78 systemctl --global disable ${@systemd_filter_services("${SYSTEMD_SERVICE_ESCAPED}", True, d)}
62 fi 79 fi
63fi 80fi
64} 81}
@@ -68,6 +85,49 @@ systemd_populate_packages[vardeps] += "systemd_prerm systemd_postinst"
68systemd_populate_packages[vardepsexclude] += "OVERRIDES" 85systemd_populate_packages[vardepsexclude] += "OVERRIDES"
69 86
70 87
88def systemd_service_path(service, searchpaths, d):
89 path_found = ''
90
91 # Deal with adding, for example, 'ifplugd@eth0.service' from
92 # 'ifplugd@.service'
93 base = None
94 at = service.find('@')
95 if at != -1:
96 ext = service.rfind('.')
97 base = service[:at] + '@' + service[ext:]
98
99 for path in searchpaths:
100 if os.path.lexists(oe.path.join(d.getVar("D"), path, service)):
101 path_found = path
102 break
103 elif base is not None:
104 if os.path.exists(oe.path.join(d.getVar("D"), path, base)):
105 path_found = path
106 break
107
108 return path_found, base
109
110def systemd_service_searchpaths(user, d):
111 if user:
112 return [
113 oe.path.join(d.getVar("sysconfdir"), "systemd", "user"),
114 d.getVar("systemd_user_unitdir"),
115 ]
116 else:
117 return [
118 oe.path.join(d.getVar("sysconfdir"), "systemd", "system"),
119 d.getVar("systemd_system_unitdir"),
120 ]
121
122def systemd_service_exists(service, user, d):
123 searchpaths = systemd_service_searchpaths(user, d)
124 path, _ = systemd_service_path(service, searchpaths, d)
125
126 return path != ''
127
128def systemd_filter_services(services, user, d):
129 return ' '.join(service for service in services.split() if systemd_service_exists(service, user, d))
130
71python systemd_populate_packages() { 131python systemd_populate_packages() {
72 import re 132 import re
73 import shlex 133 import shlex
@@ -85,7 +145,7 @@ python systemd_populate_packages() {
85 def systemd_check_package(pkg_systemd): 145 def systemd_check_package(pkg_systemd):
86 packages = d.getVar('PACKAGES') 146 packages = d.getVar('PACKAGES')
87 if not pkg_systemd in packages.split(): 147 if not pkg_systemd in packages.split():
88 bb.error('%s does not appear in package list, please add it' % pkg_systemd) 148 bb.error('%s is marked for packaging systemd scripts, but it does not appear in package list, please add it to PACKAGES or adjust SYSTEMD_PACKAGES accordingly' % pkg_systemd)
89 149
90 150
91 def systemd_generate_package_scripts(pkg): 151 def systemd_generate_package_scripts(pkg):
@@ -124,73 +184,75 @@ python systemd_populate_packages() {
124 return appended 184 return appended
125 185
126 # Add systemd files to FILES:*-systemd, parse for Also= and follow recursive 186 # Add systemd files to FILES:*-systemd, parse for Also= and follow recursive
127 def systemd_add_files_and_parse(pkg_systemd, path, service, keys): 187 def systemd_add_files_and_parse(pkg_systemd, path, service):
128 # avoid infinite recursion 188 # avoid infinite recursion
129 if systemd_append_file(pkg_systemd, oe.path.join(path, service)): 189 if systemd_append_file(pkg_systemd, oe.path.join(path, service)):
130 fullpath = oe.path.join(d.getVar("D"), path, service) 190 fullpath = oe.path.join(d.getVar("D"), path, service)
131 if service.find('.service') != -1: 191 if service.find('.service') != -1:
132 # for *.service add *@.service 192 # for *.service add *@.service
133 service_base = service.replace('.service', '') 193 service_base = service.replace('.service', '')
134 systemd_add_files_and_parse(pkg_systemd, path, service_base + '@.service', keys) 194 systemd_add_files_and_parse(pkg_systemd, path, service_base + '@.service')
195 # Add the socket unit which is referred by the Also= in this service file to the same package.
196 with open(fullpath, 'r') as unit_f:
197 for line in unit_f:
198 if line.startswith('Also'):
199 also_unit = line.split('=', 1)[1].strip()
200 if also_unit.find('.socket') != -1:
201 systemd_add_files_and_parse(pkg_systemd, path, also_unit)
135 if service.find('.socket') != -1: 202 if service.find('.socket') != -1:
136 # for *.socket add *.service and *@.service 203 # for *.socket add *.service and *@.service
137 service_base = service.replace('.socket', '') 204 service_base = service.replace('.socket', '')
138 systemd_add_files_and_parse(pkg_systemd, path, service_base + '.service', keys) 205 systemd_add_files_and_parse(pkg_systemd, path, service_base + '.service')
139 systemd_add_files_and_parse(pkg_systemd, path, service_base + '@.service', keys) 206 systemd_add_files_and_parse(pkg_systemd, path, service_base + '@.service')
140 for key in keys.split():
141 # recurse all dependencies found in keys ('Also';'Conflicts';..) and add to files
142 cmd = "grep %s %s | sed 's,%s=,,g' | tr ',' '\\n'" % (key, shlex.quote(fullpath), key)
143 pipe = os.popen(cmd, 'r')
144 line = pipe.readline()
145 while line:
146 line = line.replace('\n', '')
147 systemd_add_files_and_parse(pkg_systemd, path, line, keys)
148 line = pipe.readline()
149 pipe.close()
150 207
151 # Check service-files and call systemd_add_files_and_parse for each entry 208 # Check service-files and call systemd_add_files_and_parse for each entry
152 def systemd_check_services(): 209 def systemd_check_services():
153 searchpaths = [oe.path.join(d.getVar("sysconfdir"), "systemd", "system"),] 210 searchpaths = systemd_service_searchpaths(False, d)
154 searchpaths.append(d.getVar("systemd_system_unitdir")) 211 searchpaths.extend(systemd_service_searchpaths(True, d))
155 searchpaths.append(d.getVar("systemd_user_unitdir")) 212
156 systemd_packages = d.getVar('SYSTEMD_PACKAGES') 213 systemd_packages = d.getVar('SYSTEMD_PACKAGES')
157 214
158 keys = 'Also'
159 # scan for all in SYSTEMD_SERVICE[] 215 # scan for all in SYSTEMD_SERVICE[]
160 for pkg_systemd in systemd_packages.split(): 216 for pkg_systemd in systemd_packages.split():
161 for service in get_package_var(d, 'SYSTEMD_SERVICE', pkg_systemd).split(): 217 for service in get_package_var(d, 'SYSTEMD_SERVICE', pkg_systemd).split():
162 path_found = '' 218 path_found, base = systemd_service_path(service, searchpaths, d)
163
164 # Deal with adding, for example, 'ifplugd@eth0.service' from
165 # 'ifplugd@.service'
166 base = None
167 at = service.find('@')
168 if at != -1:
169 ext = service.rfind('.')
170 base = service[:at] + '@' + service[ext:]
171
172 for path in searchpaths:
173 if os.path.lexists(oe.path.join(d.getVar("D"), path, service)):
174 path_found = path
175 break
176 elif base is not None:
177 if os.path.exists(oe.path.join(d.getVar("D"), path, base)):
178 path_found = path
179 break
180 219
181 if path_found != '': 220 if path_found != '':
182 systemd_add_files_and_parse(pkg_systemd, path_found, service, keys) 221 systemd_add_files_and_parse(pkg_systemd, path_found, service)
183 else: 222 else:
184 bb.fatal("Didn't find service unit '{0}', specified in SYSTEMD_SERVICE:{1}. {2}".format( 223 bb.fatal("Didn't find service unit '{0}', specified in SYSTEMD_SERVICE:{1}. {2}".format(
185 service, pkg_systemd, "Also looked for service unit '{0}'.".format(base) if base is not None else "")) 224 service, pkg_systemd, "Also looked for service unit '{0}'.".format(base) if base is not None else ""))
186 225
187 def systemd_create_presets(pkg, action): 226 def systemd_create_presets(pkg, action, user):
188 presetf = oe.path.join(d.getVar("PKGD"), d.getVar("systemd_unitdir"), "system-preset/98-%s.preset" % pkg) 227 import re
228
229 # Check there is at least one service of given type (system/user), don't
230 # create empty files.
231 needs_preset = False
232 for service in d.getVar('SYSTEMD_SERVICE:%s' % pkg).split():
233 if systemd_service_exists(service, user, d):
234 needs_preset = True
235 break
236
237 if not needs_preset:
238 return
239
240 prefix = "user" if user else "system"
241 presetf = oe.path.join(d.getVar("PKGD"), d.getVar("systemd_unitdir"), "%s-preset/98-%s.preset" % (prefix, pkg))
189 bb.utils.mkdirhier(os.path.dirname(presetf)) 242 bb.utils.mkdirhier(os.path.dirname(presetf))
190 with open(presetf, 'a') as fd: 243 with open(presetf, 'a') as fd:
244 template_services = {}
191 for service in d.getVar('SYSTEMD_SERVICE:%s' % pkg).split(): 245 for service in d.getVar('SYSTEMD_SERVICE:%s' % pkg).split():
192 fd.write("%s %s\n" % (action,service)) 246 if not systemd_service_exists(service, user, d):
193 d.appendVar("FILES:%s" % pkg, ' ' + oe.path.join(d.getVar("systemd_unitdir"), "system-preset/98-%s.preset" % pkg)) 247 continue
248 if '@' in service and '@.' not in service:
249 (servicename, instance, service_type) = re.split('[@.]', service)
250 template_services.setdefault(servicename + '@.' + service_type, []).append(instance)
251 else:
252 fd.write("%s %s\n" % (action,service))
253 for template, instances in template_services.items():
254 fd.write("%s %s %s\n" % (action, template, ' '.join(instances)))
255 d.appendVar("FILES:%s" % pkg, ' ' + oe.path.join(d.getVar("systemd_unitdir"), "%s-preset/98-%s.preset" % (prefix, pkg)))
194 256
195 # Run all modifications once when creating package 257 # Run all modifications once when creating package
196 if os.path.exists(d.getVar("D")): 258 if os.path.exists(d.getVar("D")):
@@ -200,7 +262,8 @@ python systemd_populate_packages() {
200 systemd_generate_package_scripts(pkg) 262 systemd_generate_package_scripts(pkg)
201 action = get_package_var(d, 'SYSTEMD_AUTO_ENABLE', pkg) 263 action = get_package_var(d, 'SYSTEMD_AUTO_ENABLE', pkg)
202 if action in ("enable", "disable"): 264 if action in ("enable", "disable"):
203 systemd_create_presets(pkg, action) 265 systemd_create_presets(pkg, action, False)
266 systemd_create_presets(pkg, action, True)
204 elif action not in ("mask", "preset"): 267 elif action not in ("mask", "preset"):
205 bb.fatal("SYSTEMD_AUTO_ENABLE:%s '%s' is not 'enable', 'disable', 'mask' or 'preset'" % (pkg, action)) 268 bb.fatal("SYSTEMD_AUTO_ENABLE:%s '%s' is not 'enable', 'disable', 'mask' or 'preset'" % (pkg, action))
206 systemd_check_services() 269 systemd_check_services()
@@ -208,33 +271,28 @@ python systemd_populate_packages() {
208 271
209PACKAGESPLITFUNCS =+ "systemd_populate_packages" 272PACKAGESPLITFUNCS =+ "systemd_populate_packages"
210 273
211python rm_systemd_unitdir (){ 274rm_systemd_unitdir() {
212 import shutil 275 rm -rf ${D}${systemd_unitdir}
213 if not bb.utils.contains('DISTRO_FEATURES', 'systemd', True, False, d): 276 # Change into ${D} and use a relative path with rmdir -p to avoid
214 systemd_unitdir = oe.path.join(d.getVar("D"), d.getVar('systemd_unitdir')) 277 # having it remove ${D} if it becomes empty.
215 if os.path.exists(systemd_unitdir): 278 (cd ${D} && rmdir -p $(dirname ${systemd_unitdir#/}) 2>/dev/null || :)
216 shutil.rmtree(systemd_unitdir)
217 systemd_libdir = os.path.dirname(systemd_unitdir)
218 if (os.path.exists(systemd_libdir) and not os.listdir(systemd_libdir)):
219 os.rmdir(systemd_libdir)
220} 279}
221 280
222python rm_sysvinit_initddir (){ 281rm_sysvinit_initddir() {
223 import shutil 282 local sysv_initddir=${INIT_D_DIR}
224 sysv_initddir = oe.path.join(d.getVar("D"), (d.getVar('INIT_D_DIR') or "/etc/init.d")) 283 : ${sysv_initddir:=${sysconfdir}/init.d}
225 284
226 if bb.utils.contains('DISTRO_FEATURES', 'systemd', True, False, d) and \ 285 # If systemd_system_unitdir contains anything, delete sysv_initddir
227 not bb.utils.contains('DISTRO_FEATURES', 'sysvinit', True, False, d) and \ 286 if [ "$(ls -A ${D}${systemd_system_unitdir} 2>/dev/null)" ]; then
228 os.path.exists(sysv_initddir): 287 rm -rf ${D}$sysv_initddir
229 systemd_system_unitdir = oe.path.join(d.getVar("D"), d.getVar('systemd_system_unitdir')) 288 rmdir -p $(dirname ${D}$sysv_initddir) 2>/dev/null || :
230 289 fi
231 # If systemd_system_unitdir contains anything, delete sysv_initddir
232 if (os.path.exists(systemd_system_unitdir) and os.listdir(systemd_system_unitdir)):
233 shutil.rmtree(sysv_initddir)
234} 290}
235 291
236do_install[postfuncs] += "${RMINITDIR} " 292do_install[postfuncs] += "${RMINITDIR}"
237RMINITDIR:class-target = " rm_sysvinit_initddir rm_systemd_unitdir " 293RMINITDIR = " \
238RMINITDIR:class-nativesdk = " rm_sysvinit_initddir rm_systemd_unitdir " 294 ${@bb.utils.contains('DISTRO_FEATURES', 'systemd', '', 'rm_systemd_unitdir', d)} \
239RMINITDIR = "" 295 ${@'rm_sysvinit_initddir' if bb.utils.contains('DISTRO_FEATURES', 'systemd', True, False, d) and \
240 296 not bb.utils.contains('DISTRO_FEATURES', 'sysvinit', True, False, d) else ''} \
297"
298RMINITDIR:class-native = ""
diff --git a/meta/classes-recipe/testexport.bbclass b/meta/classes-recipe/testexport.bbclass
index 572f5d9e76..cc4088c71a 100644
--- a/meta/classes-recipe/testexport.bbclass
+++ b/meta/classes-recipe/testexport.bbclass
@@ -29,7 +29,6 @@ require conf/testexport.conf
29TEST_EXPORT_SDK_ENABLED ?= "0" 29TEST_EXPORT_SDK_ENABLED ?= "0"
30 30
31TEST_EXPORT_DEPENDS = "" 31TEST_EXPORT_DEPENDS = ""
32TEST_EXPORT_DEPENDS += "${@bb.utils.contains('IMAGE_PKGTYPE', 'rpm', 'cpio-native:do_populate_sysroot', '', d)}"
33TEST_EXPORT_DEPENDS += "${@bb.utils.contains('TEST_EXPORT_SDK_ENABLED', '1', 'testexport-tarball:do_populate_sdk', '', d)}" 32TEST_EXPORT_DEPENDS += "${@bb.utils.contains('TEST_EXPORT_SDK_ENABLED', '1', 'testexport-tarball:do_populate_sdk', '', d)}"
34TEST_EXPORT_LOCK = "${TMPDIR}/testimage.lock" 33TEST_EXPORT_LOCK = "${TMPDIR}/testimage.lock"
35 34
@@ -50,16 +49,23 @@ def testexport_main(d):
50 from oeqa.runtime.context import OERuntimeTestContextExecutor 49 from oeqa.runtime.context import OERuntimeTestContextExecutor
51 50
52 image_name = ("%s/%s" % (d.getVar('DEPLOY_DIR_IMAGE'), 51 image_name = ("%s/%s" % (d.getVar('DEPLOY_DIR_IMAGE'),
53 d.getVar('IMAGE_LINK_NAME'))) 52 d.getVar('IMAGE_LINK_NAME') or d.getVar('IMAGE_NAME')))
54 53
55 tdname = "%s.testdata.json" % image_name 54 tdname = "%s.testdata.json" % image_name
56 td = json.load(open(tdname, "r")) 55 td = json.load(open(tdname, "r"))
57 56
58 logger = logging.getLogger("BitBake") 57 logger = logging.getLogger("BitBake")
59 58
59 target_kwargs = { }
60 target_kwargs['machine'] = d.getVar("MACHINE") or None
61 target_kwargs['serialcontrol_cmd'] = d.getVar("TEST_SERIALCONTROL_CMD") or None
62 target_kwargs['serialcontrol_extra_args'] = d.getVar("TEST_SERIALCONTROL_EXTRA_ARGS") or ""
63 target_kwargs['serialcontrol_ps1'] = d.getVar("TEST_SERIALCONTROL_PS1") or None
64 target_kwargs['serialcontrol_connect_timeout'] = d.getVar("TEST_SERIALCONTROL_CONNECT_TIMEOUT") or None
65
60 target = OERuntimeTestContextExecutor.getTarget( 66 target = OERuntimeTestContextExecutor.getTarget(
61 d.getVar("TEST_TARGET"), None, d.getVar("TEST_TARGET_IP"), 67 d.getVar("TEST_TARGET"), None, d.getVar("TEST_TARGET_IP"),
62 d.getVar("TEST_SERVER_IP")) 68 d.getVar("TEST_SERVER_IP"), **target_kwargs)
63 69
64 image_manifest = "%s.manifest" % image_name 70 image_manifest = "%s.manifest" % image_name
65 image_packages = OERuntimeTestContextExecutor.readPackagesManifest(image_manifest) 71 image_packages = OERuntimeTestContextExecutor.readPackagesManifest(image_manifest)
diff --git a/meta/classes-recipe/testimage.bbclass b/meta/classes-recipe/testimage.bbclass
index ed0d87b7a7..847a6f18a8 100644
--- a/meta/classes-recipe/testimage.bbclass
+++ b/meta/classes-recipe/testimage.bbclass
@@ -24,11 +24,14 @@ TESTIMAGE_AUTO ??= "0"
24 24
25TESTIMAGE_FAILED_QA_ARTIFACTS = "\ 25TESTIMAGE_FAILED_QA_ARTIFACTS = "\
26 ${localstatedir}/log \ 26 ${localstatedir}/log \
27 ${localstatedir}/volatile/log \
27 ${sysconfdir}/version \ 28 ${sysconfdir}/version \
28 ${sysconfdir}/os-release" 29 ${sysconfdir}/os-release \
30 ${nonarch_libdir}/os-release \
31"
29 32
30# If some ptests are run and fail, retrieve corresponding directories 33# If some ptests are run and fail, retrieve corresponding directories
31TESTIMAGE_FAILED_QA_ARTIFACTS += "${@bb.utils.contains('DISTRO_FEATURES', 'ptest', '${libdir}/${MCNAME}/ptest', '', d)}" 34TESTIMAGE_FAILED_QA_ARTIFACTS += "${@bb.utils.contains('DISTRO_FEATURES', 'ptest', '${libdir}/*/ptest', '', d)}"
32 35
33# You can set (or append to) TEST_SUITES in local.conf to select the tests 36# You can set (or append to) TEST_SUITES in local.conf to select the tests
34# which you want to run for your target. 37# which you want to run for your target.
@@ -99,7 +102,6 @@ TESTIMAGE_BOOT_PATTERNS ?= ""
99 102
100TESTIMAGEDEPENDS = "" 103TESTIMAGEDEPENDS = ""
101TESTIMAGEDEPENDS:append:qemuall = " qemu-native:do_populate_sysroot qemu-helper-native:do_populate_sysroot qemu-helper-native:do_addto_recipe_sysroot" 104TESTIMAGEDEPENDS:append:qemuall = " qemu-native:do_populate_sysroot qemu-helper-native:do_populate_sysroot qemu-helper-native:do_addto_recipe_sysroot"
102TESTIMAGEDEPENDS += "${@bb.utils.contains('IMAGE_PKGTYPE', 'rpm', 'cpio-native:do_populate_sysroot', '', d)}"
103TESTIMAGEDEPENDS += "${@bb.utils.contains('IMAGE_PKGTYPE', 'rpm', 'dnf-native:do_populate_sysroot', '', d)}" 105TESTIMAGEDEPENDS += "${@bb.utils.contains('IMAGE_PKGTYPE', 'rpm', 'dnf-native:do_populate_sysroot', '', d)}"
104TESTIMAGEDEPENDS += "${@bb.utils.contains('IMAGE_PKGTYPE', 'rpm', 'createrepo-c-native:do_populate_sysroot', '', d)}" 106TESTIMAGEDEPENDS += "${@bb.utils.contains('IMAGE_PKGTYPE', 'rpm', 'createrepo-c-native:do_populate_sysroot', '', d)}"
105TESTIMAGEDEPENDS += "${@bb.utils.contains('IMAGE_PKGTYPE', 'ipk', 'opkg-utils-native:do_populate_sysroot package-index:do_package_index', '', d)}" 107TESTIMAGEDEPENDS += "${@bb.utils.contains('IMAGE_PKGTYPE', 'ipk', 'opkg-utils-native:do_populate_sysroot package-index:do_package_index', '', d)}"
@@ -110,7 +112,7 @@ TESTIMAGELOCK:qemuall = ""
110 112
111TESTIMAGE_DUMP_DIR ?= "${LOG_DIR}/runtime-hostdump/" 113TESTIMAGE_DUMP_DIR ?= "${LOG_DIR}/runtime-hostdump/"
112 114
113TESTIMAGE_UPDATE_VARS ?= "DL_DIR WORKDIR DEPLOY_DIR_IMAGE IMAGE_LINK_NAME" 115TESTIMAGE_UPDATE_VARS ?= "DL_DIR WORKDIR DEPLOY_DIR_IMAGE IMAGE_LINK_NAME IMAGE_NAME"
114 116
115testimage_dump_monitor () { 117testimage_dump_monitor () {
116 query-status 118 query-status
@@ -208,7 +210,7 @@ def testimage_main(d):
208 bb.utils.mkdirhier(d.getVar("TEST_LOG_DIR")) 210 bb.utils.mkdirhier(d.getVar("TEST_LOG_DIR"))
209 211
210 image_name = ("%s/%s" % (d.getVar('DEPLOY_DIR_IMAGE'), 212 image_name = ("%s/%s" % (d.getVar('DEPLOY_DIR_IMAGE'),
211 d.getVar('IMAGE_LINK_NAME'))) 213 d.getVar('IMAGE_LINK_NAME') or d.getVar('IMAGE_NAME')))
212 214
213 tdname = "%s.testdata.json" % image_name 215 tdname = "%s.testdata.json" % image_name
214 try: 216 try:
@@ -239,6 +241,8 @@ def testimage_main(d):
239 bb.fatal('Unsupported image type built. Add a compatible image to ' 241 bb.fatal('Unsupported image type built. Add a compatible image to '
240 'IMAGE_FSTYPES. Supported types: %s' % 242 'IMAGE_FSTYPES. Supported types: %s' %
241 ', '.join(supported_fstypes)) 243 ', '.join(supported_fstypes))
244 elif d.getVar("TEST_TARGET") == "serial":
245 bb.fatal('Serial target is currently only supported in testexport.')
242 qfstype = fstypes[0] 246 qfstype = fstypes[0]
243 qdeffstype = d.getVar("QB_DEFAULT_FSTYPE") 247 qdeffstype = d.getVar("QB_DEFAULT_FSTYPE")
244 if qdeffstype: 248 if qdeffstype:
@@ -376,7 +380,6 @@ def testimage_main(d):
376 bb.error('runqemu failed, shutting down...') 380 bb.error('runqemu failed, shutting down...')
377 if results: 381 if results:
378 results.stop() 382 results.stop()
379 results = tc.results
380 finally: 383 finally:
381 signal.signal(signal.SIGTERM, orig_sigterm_handler) 384 signal.signal(signal.SIGTERM, orig_sigterm_handler)
382 tc.target.stop() 385 tc.target.stop()
@@ -397,9 +400,9 @@ def testimage_main(d):
397 os.symlink(d.getVar("BB_LOGFILE"), os.path.join(targetdir, os.path.basename(d.getVar("BB_LOGFILE") + "." + d.getVar('DATETIME')))) 400 os.symlink(d.getVar("BB_LOGFILE"), os.path.join(targetdir, os.path.basename(d.getVar("BB_LOGFILE") + "." + d.getVar('DATETIME'))))
398 401
399 if not results or not complete: 402 if not results or not complete:
400 bb.fatal('%s - FAILED - tests were interrupted during execution, check the logs in %s' % (pn, d.getVar("LOG_DIR")), forcelog=True) 403 bb.error('%s - FAILED - tests were interrupted during execution, check the logs in %s' % (pn, d.getVar("LOG_DIR")), forcelog=True)
401 if not results.wasSuccessful(): 404 if results and not results.wasSuccessful():
402 bb.fatal('%s - FAILED - also check the logs in %s' % (pn, d.getVar("LOG_DIR")), forcelog=True) 405 bb.error('%s - FAILED - also check the logs in %s' % (pn, d.getVar("LOG_DIR")), forcelog=True)
403 406
404def get_runtime_paths(d): 407def get_runtime_paths(d):
405 """ 408 """
@@ -483,5 +486,3 @@ python () {
483 if oe.types.boolean(d.getVar("TESTIMAGE_AUTO") or "False"): 486 if oe.types.boolean(d.getVar("TESTIMAGE_AUTO") or "False"):
484 bb.build.addtask("testimage", "do_build", "do_image_complete", d) 487 bb.build.addtask("testimage", "do_build", "do_image_complete", d)
485} 488}
486
487inherit testsdk
diff --git a/meta/classes-recipe/testsdk.bbclass b/meta/classes-recipe/testsdk.bbclass
index fd82e6ef41..b1c4fa67e6 100644
--- a/meta/classes-recipe/testsdk.bbclass
+++ b/meta/classes-recipe/testsdk.bbclass
@@ -14,8 +14,12 @@
14# 14#
15# where "<image-name>" is an image like core-image-sato. 15# where "<image-name>" is an image like core-image-sato.
16 16
17# List of test modules to run, or run all that can be found if unset
18TESTSDK_SUITES ?= ""
19
17TESTSDK_CLASS_NAME ?= "oeqa.sdk.testsdk.TestSDK" 20TESTSDK_CLASS_NAME ?= "oeqa.sdk.testsdk.TestSDK"
18TESTSDKEXT_CLASS_NAME ?= "oeqa.sdkext.testsdk.TestSDKExt" 21TESTSDKEXT_CLASS_NAME ?= "oeqa.sdkext.testsdk.TestSDKExt"
22TESTSDK_CASE_DIRS ?= "sdk"
19 23
20def import_and_run(name, d): 24def import_and_run(name, d):
21 import importlib 25 import importlib
diff --git a/meta/classes-recipe/toolchain-scripts.bbclass b/meta/classes-recipe/toolchain-scripts.bbclass
index 6bfe0b6de0..3053cd0f1f 100644
--- a/meta/classes-recipe/toolchain-scripts.bbclass
+++ b/meta/classes-recipe/toolchain-scripts.bbclass
@@ -4,7 +4,7 @@
4# SPDX-License-Identifier: MIT 4# SPDX-License-Identifier: MIT
5# 5#
6 6
7inherit toolchain-scripts-base siteinfo kernel-arch 7inherit toolchain-scripts-base siteinfo kernel-arch meson-routines
8 8
9# We want to be able to change the value of MULTIMACH_TARGET_SYS, because it 9# We want to be able to change the value of MULTIMACH_TARGET_SYS, because it
10# doesn't always match our expectations... but we default to the stock value 10# doesn't always match our expectations... but we default to the stock value
@@ -16,6 +16,13 @@ DEBUG_PREFIX_MAP = ""
16 16
17EXPORT_SDK_PS1 = "${@ 'export PS1=\\"%s\\"' % d.getVar('SDK_PS1') if d.getVar('SDK_PS1') else ''}" 17EXPORT_SDK_PS1 = "${@ 'export PS1=\\"%s\\"' % d.getVar('SDK_PS1') if d.getVar('SDK_PS1') else ''}"
18 18
19def siteinfo_with_prefix(d, prefix):
20 # Return a prefixed value from siteinfo
21 for item in siteinfo_data_for_machine(d.getVar("TARGET_ARCH"), d.getVar("TARGET_OS"), d):
22 if item.startswith(prefix):
23 return item.replace(prefix, "")
24 raise KeyError
25
19# This function creates an environment-setup-script for use in a deployable SDK 26# This function creates an environment-setup-script for use in a deployable SDK
20toolchain_create_sdk_env_script () { 27toolchain_create_sdk_env_script () {
21 # Create environment setup script. Remember that $SDKTARGETSYSROOT should 28 # Create environment setup script. Remember that $SDKTARGETSYSROOT should
@@ -63,6 +70,12 @@ toolchain_create_sdk_env_script () {
63 echo 'export OECORE_BASELIB="${baselib}"' >> $script 70 echo 'export OECORE_BASELIB="${baselib}"' >> $script
64 echo 'export OECORE_TARGET_ARCH="${TARGET_ARCH}"' >>$script 71 echo 'export OECORE_TARGET_ARCH="${TARGET_ARCH}"' >>$script
65 echo 'export OECORE_TARGET_OS="${TARGET_OS}"' >>$script 72 echo 'export OECORE_TARGET_OS="${TARGET_OS}"' >>$script
73 echo 'export OECORE_TARGET_BITS="${@siteinfo_with_prefix(d, 'bit-')}"' >>$script
74 echo 'export OECORE_TARGET_ENDIAN="${@siteinfo_with_prefix(d, 'endian-')}"' >>$script
75 echo 'export OECORE_MESON_HOST_SYSTEM="${@meson_operating_system('TARGET_OS', d)}"' >>$script
76 echo 'export OECORE_MESON_HOST_CPU_FAMILY="${@meson_cpu_family('TARGET_ARCH', d)}"' >>$script
77 echo 'export OECORE_MESON_HOST_CPU="${TARGET_ARCH}"' >>$script
78 echo 'export OECORE_MESON_HOST_ENDIAN="${@meson_endian('TARGET', d)}"' >>$script
66 79
67 echo 'unset command_not_found_handle' >> $script 80 echo 'unset command_not_found_handle' >> $script
68 81
@@ -90,6 +103,12 @@ toolchain_create_tree_env_script () {
90 echo 'export OECORE_BASELIB="${baselib}"' >> $script 103 echo 'export OECORE_BASELIB="${baselib}"' >> $script
91 echo 'export OECORE_TARGET_ARCH="${TARGET_ARCH}"' >>$script 104 echo 'export OECORE_TARGET_ARCH="${TARGET_ARCH}"' >>$script
92 echo 'export OECORE_TARGET_OS="${TARGET_OS}"' >>$script 105 echo 'export OECORE_TARGET_OS="${TARGET_OS}"' >>$script
106 echo 'export OECORE_TARGET_BITS="${@siteinfo_with_prefix(d, 'bit-')}"' >>$script
107 echo 'export OECORE_TARGET_ENDIAN="${@siteinfo_with_prefix(d, 'endian-')}"' >>$script
108 echo 'export OECORE_MESON_HOST_SYSTEM="${@meson_operating_system('TARGET_OS', d)}"' >>$script
109 echo 'export OECORE_MESON_HOST_CPU_FAMILY="${@meson_cpu_family('TARGET_ARCH', d)}"' >>$script
110 echo 'export OECORE_MESON_HOST_CPU="${TARGET_ARCH}"' >>$script
111 echo 'export OECORE_MESON_HOST_ENDIAN="${@meson_endian('TARGET', d)}"' >>$script
93 112
94 toolchain_shared_env_script 113 toolchain_shared_env_script
95 114
@@ -192,7 +211,6 @@ EOF
192 211
193#we get the cached site config in the runtime 212#we get the cached site config in the runtime
194TOOLCHAIN_CONFIGSITE_NOCACHE = "${@' '.join(siteinfo_get_files(d)[0])}" 213TOOLCHAIN_CONFIGSITE_NOCACHE = "${@' '.join(siteinfo_get_files(d)[0])}"
195TOOLCHAIN_CONFIGSITE_SYSROOTCACHE = "${STAGING_DIR}/${MLPREFIX}${MACHINE}/${target_datadir}/${TARGET_SYS}_config_site.d"
196TOOLCHAIN_NEED_CONFIGSITE_CACHE ??= "virtual/${MLPREFIX}libc ncurses" 214TOOLCHAIN_NEED_CONFIGSITE_CACHE ??= "virtual/${MLPREFIX}libc ncurses"
197DEPENDS += "${TOOLCHAIN_NEED_CONFIGSITE_CACHE}" 215DEPENDS += "${TOOLCHAIN_NEED_CONFIGSITE_CACHE}"
198 216
@@ -214,14 +232,8 @@ toolchain_create_sdk_siteconfig () {
214 sitefile=`echo $sitefile | tr / _` 232 sitefile=`echo $sitefile | tr / _`
215 sitefile=`cat ${STAGING_DIR_TARGET}/sysroot-providers/$sitefile` 233 sitefile=`cat ${STAGING_DIR_TARGET}/sysroot-providers/$sitefile`
216 esac 234 esac
217
218 if [ -r ${TOOLCHAIN_CONFIGSITE_SYSROOTCACHE}/${sitefile}_config ]; then
219 cat ${TOOLCHAIN_CONFIGSITE_SYSROOTCACHE}/${sitefile}_config >> $siteconfig
220 fi
221 done 235 done
222} 236}
223# The immediate expansion above can result in unwanted path dependencies here
224toolchain_create_sdk_siteconfig[vardepsexclude] = "TOOLCHAIN_CONFIGSITE_SYSROOTCACHE"
225 237
226python __anonymous () { 238python __anonymous () {
227 import oe.classextend 239 import oe.classextend
diff --git a/meta/classes-recipe/uboot-config.bbclass b/meta/classes-recipe/uboot-config.bbclass
index e55fc38b7c..bc20913f73 100644
--- a/meta/classes-recipe/uboot-config.bbclass
+++ b/meta/classes-recipe/uboot-config.bbclass
@@ -19,15 +19,20 @@ def removesuffix(s, suffix):
19 return s[:-len(suffix)] 19 return s[:-len(suffix)]
20 return s 20 return s
21 21
22UBOOT_ENTRYPOINT ?= "20008000" 22UBOOT_ENTRYPOINT ?= "0x20008000"
23UBOOT_LOADADDRESS ?= "${UBOOT_ENTRYPOINT}" 23UBOOT_LOADADDRESS ?= "${UBOOT_ENTRYPOINT}"
24 24
25# When naming the files we install/deploy, the package version and revision
26# are part of the filename. Create a single variable to represent this and
27# allow it to be customized if desired.
28UBOOT_VERSION ?= "${PV}-${PR}"
29
25# Some versions of u-boot use .bin and others use .img. By default use .bin 30# Some versions of u-boot use .bin and others use .img. By default use .bin
26# but enable individual recipes to change this value. 31# but enable individual recipes to change this value.
27UBOOT_SUFFIX ??= "bin" 32UBOOT_SUFFIX ??= "bin"
28UBOOT_BINARY ?= "u-boot.${UBOOT_SUFFIX}" 33UBOOT_BINARY ?= "u-boot.${UBOOT_SUFFIX}"
29UBOOT_BINARYNAME ?= "${@os.path.splitext(d.getVar("UBOOT_BINARY"))[0]}" 34UBOOT_BINARYNAME ?= "${@os.path.splitext(d.getVar("UBOOT_BINARY"))[0]}"
30UBOOT_IMAGE ?= "${UBOOT_BINARYNAME}-${MACHINE}-${PV}-${PR}.${UBOOT_SUFFIX}" 35UBOOT_IMAGE ?= "${UBOOT_BINARYNAME}-${MACHINE}-${UBOOT_VERSION}.${UBOOT_SUFFIX}"
31UBOOT_SYMLINK ?= "${UBOOT_BINARYNAME}-${MACHINE}.${UBOOT_SUFFIX}" 36UBOOT_SYMLINK ?= "${UBOOT_BINARYNAME}-${MACHINE}.${UBOOT_SUFFIX}"
32UBOOT_MAKE_TARGET ?= "all" 37UBOOT_MAKE_TARGET ?= "all"
33 38
@@ -36,7 +41,7 @@ UBOOT_MAKE_TARGET ?= "all"
36# purposes. 41# purposes.
37UBOOT_ELF ?= "" 42UBOOT_ELF ?= ""
38UBOOT_ELF_SUFFIX ?= "elf" 43UBOOT_ELF_SUFFIX ?= "elf"
39UBOOT_ELF_IMAGE ?= "u-boot-${MACHINE}-${PV}-${PR}.${UBOOT_ELF_SUFFIX}" 44UBOOT_ELF_IMAGE ?= "u-boot-${MACHINE}-${UBOOT_VERSION}.${UBOOT_ELF_SUFFIX}"
40UBOOT_ELF_BINARY ?= "u-boot.${UBOOT_ELF_SUFFIX}" 45UBOOT_ELF_BINARY ?= "u-boot.${UBOOT_ELF_SUFFIX}"
41UBOOT_ELF_SYMLINK ?= "u-boot-${MACHINE}.${UBOOT_ELF_SUFFIX}" 46UBOOT_ELF_SYMLINK ?= "u-boot-${MACHINE}.${UBOOT_ELF_SUFFIX}"
42 47
@@ -49,7 +54,7 @@ SPL_BINARY ?= ""
49SPL_DELIMITER ?= "${@'.' if d.getVar("SPL_SUFFIX") else ''}" 54SPL_DELIMITER ?= "${@'.' if d.getVar("SPL_SUFFIX") else ''}"
50SPL_BINARYFILE ?= "${@os.path.basename(d.getVar("SPL_BINARY"))}" 55SPL_BINARYFILE ?= "${@os.path.basename(d.getVar("SPL_BINARY"))}"
51SPL_BINARYNAME ?= "${@removesuffix(d.getVar("SPL_BINARYFILE"), "." + d.getVar("SPL_SUFFIX"))}" 56SPL_BINARYNAME ?= "${@removesuffix(d.getVar("SPL_BINARYFILE"), "." + d.getVar("SPL_SUFFIX"))}"
52SPL_IMAGE ?= "${SPL_BINARYNAME}-${MACHINE}-${PV}-${PR}${SPL_DELIMITER}${SPL_SUFFIX}" 57SPL_IMAGE ?= "${SPL_BINARYNAME}-${MACHINE}-${UBOOT_VERSION}${SPL_DELIMITER}${SPL_SUFFIX}"
53SPL_SYMLINK ?= "${SPL_BINARYNAME}-${MACHINE}${SPL_DELIMITER}${SPL_SUFFIX}" 58SPL_SYMLINK ?= "${SPL_BINARYNAME}-${MACHINE}${SPL_DELIMITER}${SPL_SUFFIX}"
54 59
55# Additional environment variables or a script can be installed alongside 60# Additional environment variables or a script can be installed alongside
@@ -62,14 +67,14 @@ UBOOT_ENV ?= ""
62UBOOT_ENV_SRC_SUFFIX ?= "cmd" 67UBOOT_ENV_SRC_SUFFIX ?= "cmd"
63UBOOT_ENV_SRC ?= "${UBOOT_ENV}.${UBOOT_ENV_SRC_SUFFIX}" 68UBOOT_ENV_SRC ?= "${UBOOT_ENV}.${UBOOT_ENV_SRC_SUFFIX}"
64UBOOT_ENV_BINARY ?= "${UBOOT_ENV}.${UBOOT_ENV_SUFFIX}" 69UBOOT_ENV_BINARY ?= "${UBOOT_ENV}.${UBOOT_ENV_SUFFIX}"
65UBOOT_ENV_IMAGE ?= "${UBOOT_ENV}-${MACHINE}-${PV}-${PR}.${UBOOT_ENV_SUFFIX}" 70UBOOT_ENV_IMAGE ?= "${UBOOT_ENV}-${MACHINE}-${UBOOT_VERSION}.${UBOOT_ENV_SUFFIX}"
66UBOOT_ENV_SYMLINK ?= "${UBOOT_ENV}-${MACHINE}.${UBOOT_ENV_SUFFIX}" 71UBOOT_ENV_SYMLINK ?= "${UBOOT_ENV}-${MACHINE}.${UBOOT_ENV_SUFFIX}"
67 72
68# U-Boot EXTLINUX variables. U-Boot searches for /boot/extlinux/extlinux.conf 73# U-Boot EXTLINUX variables. U-Boot searches for /boot/extlinux/extlinux.conf
69# to find EXTLINUX conf file. 74# to find EXTLINUX conf file.
70UBOOT_EXTLINUX_INSTALL_DIR ?= "/boot/extlinux" 75UBOOT_EXTLINUX_INSTALL_DIR ?= "/boot/extlinux"
71UBOOT_EXTLINUX_CONF_NAME ?= "extlinux.conf" 76UBOOT_EXTLINUX_CONF_NAME ?= "extlinux.conf"
72UBOOT_EXTLINUX_SYMLINK ?= "${UBOOT_EXTLINUX_CONF_NAME}-${MACHINE}-${PR}" 77UBOOT_EXTLINUX_SYMLINK ?= "${UBOOT_EXTLINUX_CONF_NAME}-${MACHINE}-${UBOOT_VERSION}"
73 78
74# Options for the device tree compiler passed to mkimage '-D' feature: 79# Options for the device tree compiler passed to mkimage '-D' feature:
75UBOOT_MKIMAGE_DTCOPTS ??= "" 80UBOOT_MKIMAGE_DTCOPTS ??= ""
@@ -101,12 +106,12 @@ python () {
101 # The "doc" varflag is special, we don't want to see it here 106 # The "doc" varflag is special, we don't want to see it here
102 ubootconfigflags.pop('doc', None) 107 ubootconfigflags.pop('doc', None)
103 ubootconfig = (d.getVar('UBOOT_CONFIG') or "").split() 108 ubootconfig = (d.getVar('UBOOT_CONFIG') or "").split()
109 recipename = d.getVar("PN")
104 110
105 if not ubootmachine and not ubootconfig: 111 if not ubootmachine and not ubootconfig:
106 PN = d.getVar("PN")
107 FILE = os.path.basename(d.getVar("FILE")) 112 FILE = os.path.basename(d.getVar("FILE"))
108 bb.debug(1, "To build %s, see %s for instructions on \ 113 bb.debug(1, "To build %s, see %s for instructions on \
109 setting up your machine config" % (PN, FILE)) 114 setting up your machine config" % (recipename, FILE))
110 raise bb.parse.SkipRecipe("Either UBOOT_MACHINE or UBOOT_CONFIG must be set in the %s machine configuration." % d.getVar("MACHINE")) 115 raise bb.parse.SkipRecipe("Either UBOOT_MACHINE or UBOOT_CONFIG must be set in the %s machine configuration." % d.getVar("MACHINE"))
111 116
112 if ubootmachine and ubootconfig: 117 if ubootmachine and ubootconfig:
@@ -140,9 +145,12 @@ python () {
140 if not found: 145 if not found:
141 raise bb.parse.SkipRecipe("The selected UBOOT_CONFIG key %s has no match in %s." % (ubootconfig, ubootconfigflags.keys())) 146 raise bb.parse.SkipRecipe("The selected UBOOT_CONFIG key %s has no match in %s." % (ubootconfig, ubootconfigflags.keys()))
142 147
143 if len(ubootconfig) == 1: 148 # This recipe might be inherited e.g. by the kernel recipe via kernel-fitimage.bbclass
144 d.setVar('KCONFIG_CONFIG_ROOTDIR', os.path.join(d.getVar("B"), d.getVar("UBOOT_MACHINE").strip())) 149 # Ensure the uboot specific menuconfig settings do not leak into other recipes
145 else: 150 if 'u-boot' in recipename:
146 # Disable menuconfig for multiple configs 151 if len(ubootconfig) == 1:
147 d.setVar('KCONFIG_CONFIG_ENABLE_MENUCONFIG', "false") 152 d.setVar('KCONFIG_CONFIG_ROOTDIR', os.path.join("${B}", d.getVar("UBOOT_MACHINE").strip()))
153 else:
154 # Disable menuconfig for multiple configs
155 d.setVar('KCONFIG_CONFIG_ENABLE_MENUCONFIG', "false")
148} 156}
diff --git a/meta/classes-recipe/uboot-extlinux-config.bbclass b/meta/classes-recipe/uboot-extlinux-config.bbclass
index 0413e760bd..099476f5d6 100644
--- a/meta/classes-recipe/uboot-extlinux-config.bbclass
+++ b/meta/classes-recipe/uboot-extlinux-config.bbclass
@@ -15,6 +15,7 @@
15# UBOOT_EXTLINUX_KERNEL_IMAGE - Kernel image name. 15# UBOOT_EXTLINUX_KERNEL_IMAGE - Kernel image name.
16# UBOOT_EXTLINUX_FDTDIR - Device tree directory. 16# UBOOT_EXTLINUX_FDTDIR - Device tree directory.
17# UBOOT_EXTLINUX_FDT - Device tree file. 17# UBOOT_EXTLINUX_FDT - Device tree file.
18# UBOOT_EXTLINUX_FDTOVERLAYS - Device tree overlay files. Space-separated list.
18# UBOOT_EXTLINUX_INITRD - Indicates a list of filesystem images to 19# UBOOT_EXTLINUX_INITRD - Indicates a list of filesystem images to
19# concatenate and use as an initrd (optional). 20# concatenate and use as an initrd (optional).
20# UBOOT_EXTLINUX_MENU_DESCRIPTION - Name to use as description. 21# UBOOT_EXTLINUX_MENU_DESCRIPTION - Name to use as description.
@@ -66,6 +67,7 @@
66UBOOT_EXTLINUX_CONSOLE ??= "console=${console},${baudrate}" 67UBOOT_EXTLINUX_CONSOLE ??= "console=${console},${baudrate}"
67UBOOT_EXTLINUX_LABELS ??= "linux" 68UBOOT_EXTLINUX_LABELS ??= "linux"
68UBOOT_EXTLINUX_FDT ??= "" 69UBOOT_EXTLINUX_FDT ??= ""
70UBOOT_EXTLINUX_FDTOVERLAYS ??= ""
69UBOOT_EXTLINUX_FDTDIR ??= "../" 71UBOOT_EXTLINUX_FDTDIR ??= "../"
70UBOOT_EXTLINUX_KERNEL_IMAGE ??= "../${KERNEL_IMAGETYPE}" 72UBOOT_EXTLINUX_KERNEL_IMAGE ??= "../${KERNEL_IMAGETYPE}"
71UBOOT_EXTLINUX_KERNEL_ARGS ??= "rootwait rw" 73UBOOT_EXTLINUX_KERNEL_ARGS ??= "rootwait rw"
@@ -136,15 +138,17 @@ python do_create_extlinux_config() {
136 fdtdir = localdata.getVar('UBOOT_EXTLINUX_FDTDIR') 138 fdtdir = localdata.getVar('UBOOT_EXTLINUX_FDTDIR')
137 139
138 fdt = localdata.getVar('UBOOT_EXTLINUX_FDT') 140 fdt = localdata.getVar('UBOOT_EXTLINUX_FDT')
141 fdtoverlays = localdata.getVar('UBOOT_EXTLINUX_FDTOVERLAYS')
142
143 cfgfile.write('LABEL %s\n\tKERNEL %s\n' % (menu_description, kernel_image))
139 144
140 if fdt: 145 if fdt:
141 cfgfile.write('LABEL %s\n\tKERNEL %s\n\tFDT %s\n' % 146 cfgfile.write('\tFDT %s\n' % (fdt))
142 (menu_description, kernel_image, fdt))
143 elif fdtdir: 147 elif fdtdir:
144 cfgfile.write('LABEL %s\n\tKERNEL %s\n\tFDTDIR %s\n' % 148 cfgfile.write('\tFDTDIR %s\n' % (fdtdir))
145 (menu_description, kernel_image, fdtdir)) 149
146 else: 150 if fdtoverlays:
147 cfgfile.write('LABEL %s\n\tKERNEL %s\n' % (menu_description, kernel_image)) 151 cfgfile.write('\tFDTOVERLAYS %s\n' % (' '.join(fdtoverlays.split())))
148 152
149 kernel_args = localdata.getVar('UBOOT_EXTLINUX_KERNEL_ARGS') 153 kernel_args = localdata.getVar('UBOOT_EXTLINUX_KERNEL_ARGS')
150 154
@@ -158,7 +162,7 @@ python do_create_extlinux_config() {
158 except OSError: 162 except OSError:
159 bb.fatal('Unable to open %s' % (cfile)) 163 bb.fatal('Unable to open %s' % (cfile))
160} 164}
161UBOOT_EXTLINUX_VARS = "CONSOLE MENU_DESCRIPTION ROOT KERNEL_IMAGE FDTDIR FDT KERNEL_ARGS INITRD" 165UBOOT_EXTLINUX_VARS = "CONSOLE MENU_DESCRIPTION ROOT KERNEL_IMAGE FDTDIR FDT FDTOVERLAYS KERNEL_ARGS INITRD"
162do_create_extlinux_config[vardeps] += "${@' '.join(['UBOOT_EXTLINUX_%s:%s' % (v, l) for v in d.getVar('UBOOT_EXTLINUX_VARS').split() for l in d.getVar('UBOOT_EXTLINUX_LABELS').split()])}" 166do_create_extlinux_config[vardeps] += "${@' '.join(['UBOOT_EXTLINUX_%s:%s' % (v, l) for v in d.getVar('UBOOT_EXTLINUX_VARS').split() for l in d.getVar('UBOOT_EXTLINUX_LABELS').split()])}"
163do_create_extlinux_config[vardepsexclude] += "OVERRIDES" 167do_create_extlinux_config[vardepsexclude] += "OVERRIDES"
164 168
diff --git a/meta/classes-recipe/uboot-sign.bbclass b/meta/classes-recipe/uboot-sign.bbclass
index c8e097f2f2..0f387a3a3e 100644
--- a/meta/classes-recipe/uboot-sign.bbclass
+++ b/meta/classes-recipe/uboot-sign.bbclass
@@ -26,6 +26,7 @@
26 26
27# We need some variables from u-boot-config 27# We need some variables from u-boot-config
28inherit uboot-config 28inherit uboot-config
29require conf/image-fitimage.conf
29 30
30# Enable use of a U-Boot fitImage 31# Enable use of a U-Boot fitImage
31UBOOT_FITIMAGE_ENABLE ?= "0" 32UBOOT_FITIMAGE_ENABLE ?= "0"
@@ -49,6 +50,8 @@ UBOOT_FITIMAGE_BINARY ?= "u-boot-fitImage"
49UBOOT_FITIMAGE_SYMLINK ?= "u-boot-fitImage-${MACHINE}" 50UBOOT_FITIMAGE_SYMLINK ?= "u-boot-fitImage-${MACHINE}"
50SPL_DIR ?= "spl" 51SPL_DIR ?= "spl"
51SPL_DTB_IMAGE ?= "u-boot-spl-${MACHINE}-${PV}-${PR}.dtb" 52SPL_DTB_IMAGE ?= "u-boot-spl-${MACHINE}-${PV}-${PR}.dtb"
53# When SPL is not used, set SPL_DTB_BINARY ?= "" to explicitly indicate
54# that no SPL DTB should be created or signed.
52SPL_DTB_BINARY ?= "u-boot-spl.dtb" 55SPL_DTB_BINARY ?= "u-boot-spl.dtb"
53SPL_DTB_SIGNED ?= "${SPL_DTB_BINARY}-signed" 56SPL_DTB_SIGNED ?= "${SPL_DTB_BINARY}-signed"
54SPL_DTB_SYMLINK ?= "u-boot-spl-${MACHINE}.dtb" 57SPL_DTB_SYMLINK ?= "u-boot-spl-${MACHINE}.dtb"
@@ -85,19 +88,40 @@ UBOOT_FIT_KEY_SIGN_PKCS ?= "-x509"
85# ex: 1 32bits address, 2 64bits address 88# ex: 1 32bits address, 2 64bits address
86UBOOT_FIT_ADDRESS_CELLS ?= "1" 89UBOOT_FIT_ADDRESS_CELLS ?= "1"
87 90
88# This is only necessary for determining the signing configuration 91# ARM Trusted Firmware(ATF) is a reference implementation of secure world
89KERNEL_PN = "${PREFERRED_PROVIDER_virtual/kernel}" 92# software for Arm A-Profile architectures, (Armv8-A and Armv7-A), including
93# an Exception Level 3 (EL3) Secure Monitor.
94UBOOT_FIT_ARM_TRUSTED_FIRMWARE ?= "0"
95UBOOT_FIT_ARM_TRUSTED_FIRMWARE_IMAGE ?= "bl31.bin"
96
97# A Trusted Execution Environment(TEE) is an environment for executing code,
98# in which those executing the code can have high levels of trust in the asset
99# management of that surrounding environment.
100UBOOT_FIT_TEE ?= "0"
101UBOOT_FIT_TEE_IMAGE ?= "tee-raw.bin"
102
103# User specific settings
104UBOOT_FIT_USER_SETTINGS ?= ""
105
106# Sets the firmware property to select the image to boot first.
107# If not set, the first entry in "loadables" is used instead.
108UBOOT_FIT_CONF_FIRMWARE ?= ""
109
110# Unit name containing a list of users additional binaries to be loaded.
111# It is a comma-separated list of strings.
112UBOOT_FIT_CONF_USER_LOADABLES ?= ''
90 113
91UBOOT_FIT_UBOOT_LOADADDRESS ?= "${UBOOT_LOADADDRESS}" 114UBOOT_FIT_UBOOT_LOADADDRESS ?= "${UBOOT_LOADADDRESS}"
92UBOOT_FIT_UBOOT_ENTRYPOINT ?= "${UBOOT_ENTRYPOINT}" 115UBOOT_FIT_UBOOT_ENTRYPOINT ?= "${UBOOT_ENTRYPOINT}"
93 116
117
118DEPENDS:append = " ${@'kernel-signing-keys-native' if d.getVar('FIT_GENERATE_KEYS') == '1' else ''}"
119
94python() { 120python() {
95 # We need u-boot-tools-native if we're creating a U-Boot fitImage 121 # We need u-boot-tools-native if we're creating a U-Boot fitImage
96 sign = d.getVar('UBOOT_SIGN_ENABLE') == '1' 122 sign = d.getVar('UBOOT_SIGN_ENABLE') == '1'
97 if d.getVar('UBOOT_FITIMAGE_ENABLE') == '1' or sign: 123 if d.getVar('UBOOT_FITIMAGE_ENABLE') == '1' or sign:
98 d.appendVar('DEPENDS', " u-boot-tools-native dtc-native") 124 d.appendVar('DEPENDS', " u-boot-tools-native dtc-native")
99 if sign:
100 d.appendVar('DEPENDS', " " + d.getVar('KERNEL_PN'))
101} 125}
102 126
103concat_dtb() { 127concat_dtb() {
@@ -105,38 +129,92 @@ concat_dtb() {
105 binary="$2" 129 binary="$2"
106 130
107 if [ -e "${UBOOT_DTB_BINARY}" ]; then 131 if [ -e "${UBOOT_DTB_BINARY}" ]; then
108 # Re-sign the kernel in order to add the keys to our dtb 132 # Signing individual images is not recommended as that
133 # makes fitImage susceptible to mix-and-match attack.
134 #
135 # OE FIT_SIGN_INDIVIDUAL is implemented in an unusual manner,
136 # where the resulting signed fitImage contains both signed
137 # images and signed configurations. This is redundant. In
138 # order to prevent mix-and-match attack, it is sufficient
139 # to sign configurations. The FIT_SIGN_INDIVIDUAL = "1"
140 # support is kept to avoid breakage of existing layers, but
141 # it is highly recommended to avoid FIT_SIGN_INDIVIDUAL = "1",
142 # i.e. set FIT_SIGN_INDIVIDUAL = "0" .
143 if [ "${FIT_SIGN_INDIVIDUAL}" = "1" ] ; then
144 # Sign dummy image images in order to
145 # add the image signing keys to our dtb
146 ${UBOOT_MKIMAGE_SIGN} \
147 ${@'-D "${UBOOT_MKIMAGE_DTCOPTS}"' if len('${UBOOT_MKIMAGE_DTCOPTS}') else ''} \
148 -f auto \
149 -k "${UBOOT_SIGN_KEYDIR}" \
150 -o "${FIT_HASH_ALG},${FIT_SIGN_ALG}" \
151 -g "${UBOOT_SIGN_IMG_KEYNAME}" \
152 -K "${UBOOT_DTB_BINARY}" \
153 -d /dev/null \
154 -r ${B}/unused.itb \
155 ${UBOOT_MKIMAGE_SIGN_ARGS}
156 fi
157
158 # Sign dummy image configurations in order to
159 # add the configuration signing keys to our dtb
109 ${UBOOT_MKIMAGE_SIGN} \ 160 ${UBOOT_MKIMAGE_SIGN} \
110 ${@'-D "${UBOOT_MKIMAGE_DTCOPTS}"' if len('${UBOOT_MKIMAGE_DTCOPTS}') else ''} \ 161 ${@'-D "${UBOOT_MKIMAGE_DTCOPTS}"' if len('${UBOOT_MKIMAGE_DTCOPTS}') else ''} \
111 -F -k "${UBOOT_SIGN_KEYDIR}" \ 162 -f auto-conf \
163 -k "${UBOOT_SIGN_KEYDIR}" \
164 -o "${FIT_HASH_ALG},${FIT_SIGN_ALG}" \
165 -g "${UBOOT_SIGN_KEYNAME}" \
112 -K "${UBOOT_DTB_BINARY}" \ 166 -K "${UBOOT_DTB_BINARY}" \
113 -r ${B}/fitImage-linux \ 167 -d /dev/null \
168 -r ${B}/unused.itb \
114 ${UBOOT_MKIMAGE_SIGN_ARGS} 169 ${UBOOT_MKIMAGE_SIGN_ARGS}
115 # Verify the kernel image and u-boot dtb 170
116 ${UBOOT_FIT_CHECK_SIGN} \ 171 # Verify the dummy fitImage signature against u-boot.dtb
117 -k "${UBOOT_DTB_BINARY}" \ 172 # augmented using public key material.
118 -f ${B}/fitImage-linux 173 #
174 # This only works for FIT_SIGN_INDIVIDUAL = "0", because
175 # mkimage -f auto-conf does not support -F to extend the
176 # existing unused.itb , and instead rewrites unused.itb
177 # from scratch.
178 #
179 # Using two separate unused.itb for mkimage -f auto and
180 # mkimage -f auto-conf invocation above would not help, as
181 # the signature verification process below checks whether
182 # all keys inserted into u-boot.dtb /signature node pass
183 # the verification. Separate unused.itb would each miss one
184 # of the signatures.
185 #
186 # The FIT_SIGN_INDIVIDUAL = "1" support is kept to avoid
187 # breakage of existing layers, but it is highly recommended
188 # to not use FIT_SIGN_INDIVIDUAL = "1", i.e. set
189 # FIT_SIGN_INDIVIDUAL = "0" .
190 if [ "${FIT_SIGN_INDIVIDUAL}" != "1" ] ; then
191 ${UBOOT_FIT_CHECK_SIGN} \
192 -k "${UBOOT_DTB_BINARY}" \
193 -f ${B}/unused.itb
194 fi
119 cp ${UBOOT_DTB_BINARY} ${UBOOT_DTB_SIGNED} 195 cp ${UBOOT_DTB_BINARY} ${UBOOT_DTB_SIGNED}
120 fi 196 fi
121 197
122 # If we're not using a signed u-boot fit, concatenate SPL w/o DTB & U-Boot DTB 198 # If we're not using a signed u-boot fit, concatenate SPL w/o DTB & U-Boot DTB
123 # with public key (otherwise U-Boot will be packaged by uboot_fitimage_assemble) 199 # with public key (otherwise U-Boot will be packaged by uboot_fitimage_assemble)
124 if [ "${SPL_SIGN_ENABLE}" != "1" ] ; then 200 if [ "${SPL_SIGN_ENABLE}" != "1" ] ; then
125 if [ "x${UBOOT_SUFFIX}" = "ximg" -o "x${UBOOT_SUFFIX}" = "xrom" ] && \ 201 if [ ! -e "${UBOOT_DTB_BINARY}" ]; then
126 [ -e "${UBOOT_DTB_BINARY}" ]; then 202 bbwarn "Failure while adding public key to u-boot binary. Verified boot won't be available."
203 return
204 fi
205
206 if [ "x${UBOOT_SUFFIX}" = "ximg" ] || [ "x${UBOOT_SUFFIX}" = "xrom" ]; then
127 oe_runmake EXT_DTB="${UBOOT_DTB_SIGNED}" ${UBOOT_MAKE_TARGET} 207 oe_runmake EXT_DTB="${UBOOT_DTB_SIGNED}" ${UBOOT_MAKE_TARGET}
128 if [ -n "${binary}" ]; then 208 if [ -n "${binary}" ]; then
129 cp ${binary} ${UBOOT_BINARYNAME}-${type}.${UBOOT_SUFFIX} 209 cp ${binary} ${UBOOT_BINARYNAME}-${type}.${UBOOT_SUFFIX}
130 fi 210 fi
131 elif [ -e "${UBOOT_NODTB_BINARY}" -a -e "${UBOOT_DTB_BINARY}" ]; then 211 elif [ -e "${UBOOT_NODTB_BINARY}" ]; then
132 if [ -n "${binary}" ]; then 212 if [ -n "${binary}" ]; then
133 cat ${UBOOT_NODTB_BINARY} ${UBOOT_DTB_SIGNED} | tee ${binary} > \ 213 cat ${UBOOT_NODTB_BINARY} ${UBOOT_DTB_SIGNED} | tee ${binary} > \
134 ${UBOOT_BINARYNAME}-${type}.${UBOOT_SUFFIX} 214 ${UBOOT_BINARYNAME}-${type}.${UBOOT_SUFFIX}
135 else 215 else
136 cat ${UBOOT_NODTB_BINARY} ${UBOOT_DTB_SIGNED} > ${UBOOT_BINARY} 216 cat ${UBOOT_NODTB_BINARY} ${UBOOT_DTB_SIGNED} > ${UBOOT_BINARY}
137 fi 217 fi
138 else
139 bbwarn "Failure while adding public key to u-boot binary. Verified boot won't be available."
140 fi 218 fi
141 fi 219 fi
142} 220}
@@ -168,7 +246,7 @@ deploy_dtb() {
168} 246}
169 247
170concat_spl_dtb() { 248concat_spl_dtb() {
171 if [ -e "${SPL_DIR}/${SPL_NODTB_BINARY}" -a -e "${SPL_DIR}/${SPL_DTB_BINARY}" ] ; then 249 if [ -e "${SPL_DIR}/${SPL_NODTB_BINARY}" ] && [ -e "${SPL_DIR}/${SPL_DTB_BINARY}" ] ; then
172 cat ${SPL_DIR}/${SPL_NODTB_BINARY} ${SPL_DIR}/${SPL_DTB_SIGNED} > "${SPL_BINARY}" 250 cat ${SPL_DIR}/${SPL_NODTB_BINARY} ${SPL_DIR}/${SPL_DTB_SIGNED} > "${SPL_BINARY}"
173 else 251 else
174 bbwarn "Failure while adding public key to spl binary. Verified U-Boot boot won't be available." 252 bbwarn "Failure while adding public key to spl binary. Verified U-Boot boot won't be available."
@@ -234,9 +312,65 @@ do_uboot_generate_rsa_keys() {
234 312
235addtask uboot_generate_rsa_keys before do_uboot_assemble_fitimage after do_compile 313addtask uboot_generate_rsa_keys before do_uboot_assemble_fitimage after do_compile
236 314
315# Create a ITS file for the atf
316uboot_fitimage_atf() {
317 cat << EOF >> ${UBOOT_ITS}
318 atf {
319 description = "ARM Trusted Firmware";
320 data = /incbin/("${UBOOT_FIT_ARM_TRUSTED_FIRMWARE_IMAGE}");
321 type = "firmware";
322 arch = "${UBOOT_ARCH}";
323 os = "arm-trusted-firmware";
324 load = <${UBOOT_FIT_ARM_TRUSTED_FIRMWARE_LOADADDRESS}>;
325 entry = <${UBOOT_FIT_ARM_TRUSTED_FIRMWARE_ENTRYPOINT}>;
326 compression = "none";
327EOF
328 if [ "${SPL_SIGN_ENABLE}" = "1" ] ; then
329 cat << EOF >> ${UBOOT_ITS}
330 signature {
331 algo = "${UBOOT_FIT_HASH_ALG},${UBOOT_FIT_SIGN_ALG}";
332 key-name-hint = "${SPL_SIGN_KEYNAME}";
333 };
334EOF
335 fi
336
337 cat << EOF >> ${UBOOT_ITS}
338 };
339EOF
340}
341
342# Create a ITS file for the tee
343uboot_fitimage_tee() {
344 cat << EOF >> ${UBOOT_ITS}
345 tee {
346 description = "Trusted Execution Environment";
347 data = /incbin/("${UBOOT_FIT_TEE_IMAGE}");
348 type = "tee";
349 arch = "${UBOOT_ARCH}";
350 os = "tee";
351 load = <${UBOOT_FIT_TEE_LOADADDRESS}>;
352 entry = <${UBOOT_FIT_TEE_ENTRYPOINT}>;
353 compression = "none";
354EOF
355 if [ "${SPL_SIGN_ENABLE}" = "1" ] ; then
356 cat << EOF >> ${UBOOT_ITS}
357 signature {
358 algo = "${UBOOT_FIT_HASH_ALG},${UBOOT_FIT_SIGN_ALG}";
359 key-name-hint = "${SPL_SIGN_KEYNAME}";
360 };
361EOF
362 fi
363
364 cat << EOF >> ${UBOOT_ITS}
365 };
366EOF
367}
368
237# Create a ITS file for the U-boot FIT, for use when 369# Create a ITS file for the U-boot FIT, for use when
238# we want to sign it so that the SPL can verify it 370# we want to sign it so that the SPL can verify it
239uboot_fitimage_assemble() { 371uboot_fitimage_assemble() {
372 conf_loadables="\"uboot\""
373 conf_firmware=""
240 rm -f ${UBOOT_ITS} ${UBOOT_FITIMAGE_BINARY} 374 rm -f ${UBOOT_ITS} ${UBOOT_FITIMAGE_BINARY}
241 375
242 # First we create the ITS script 376 # First we create the ITS script
@@ -289,13 +423,38 @@ EOF
289 423
290 cat << EOF >> ${UBOOT_ITS} 424 cat << EOF >> ${UBOOT_ITS}
291 }; 425 };
426EOF
427 if [ "${UBOOT_FIT_TEE}" = "1" ] ; then
428 conf_loadables="\"tee\", ${conf_loadables}"
429 uboot_fitimage_tee
430 fi
431
432 if [ "${UBOOT_FIT_ARM_TRUSTED_FIRMWARE}" = "1" ] ; then
433 conf_loadables="\"atf\", ${conf_loadables}"
434 uboot_fitimage_atf
435 fi
436
437 if [ -n "${UBOOT_FIT_USER_SETTINGS}" ] ; then
438 printf "%b" "${UBOOT_FIT_USER_SETTINGS}" >> ${UBOOT_ITS}
439 fi
440
441 if [ -n "${UBOOT_FIT_CONF_USER_LOADABLES}" ] ; then
442 conf_loadables="${conf_loadables}${UBOOT_FIT_CONF_USER_LOADABLES}"
443 fi
444
445 if [ -n "${UBOOT_FIT_CONF_FIRMWARE}" ] ; then
446 conf_firmware="firmware = \"${UBOOT_FIT_CONF_FIRMWARE}\";"
447 fi
448
449 cat << EOF >> ${UBOOT_ITS}
292 }; 450 };
293 451
294 configurations { 452 configurations {
295 default = "conf"; 453 default = "conf";
296 conf { 454 conf {
297 description = "Boot with signed U-Boot FIT"; 455 description = "Boot with signed U-Boot FIT";
298 loadables = "uboot"; 456 ${conf_firmware}
457 loadables = ${conf_loadables};
299 fdt = "fdt"; 458 fdt = "fdt";
300 }; 459 };
301 }; 460 };
@@ -311,25 +470,31 @@ EOF
311 ${UBOOT_FITIMAGE_BINARY} 470 ${UBOOT_FITIMAGE_BINARY}
312 471
313 if [ "${SPL_SIGN_ENABLE}" = "1" ] ; then 472 if [ "${SPL_SIGN_ENABLE}" = "1" ] ; then
314 # 473 if [ -n "${SPL_DTB_BINARY}" ] ; then
315 # Sign the U-boot FIT image and add public key to SPL dtb 474 #
316 # 475 # Sign the U-boot FIT image and add public key to SPL dtb
317 ${UBOOT_MKIMAGE_SIGN} \ 476 #
318 ${@'-D "${SPL_MKIMAGE_DTCOPTS}"' if len('${SPL_MKIMAGE_DTCOPTS}') else ''} \ 477 ${UBOOT_MKIMAGE_SIGN} \
319 -F -k "${SPL_SIGN_KEYDIR}" \ 478 ${@'-D "${SPL_MKIMAGE_DTCOPTS}"' if len('${SPL_MKIMAGE_DTCOPTS}') else ''} \
320 -K "${SPL_DIR}/${SPL_DTB_BINARY}" \ 479 -F -k "${SPL_SIGN_KEYDIR}" \
321 -r ${UBOOT_FITIMAGE_BINARY} \ 480 -K "${SPL_DIR}/${SPL_DTB_BINARY}" \
322 ${SPL_MKIMAGE_SIGN_ARGS} 481 -r ${UBOOT_FITIMAGE_BINARY} \
323 # 482 ${SPL_MKIMAGE_SIGN_ARGS}
324 # Verify the U-boot FIT image and SPL dtb 483
325 # 484 # Verify the U-boot FIT image and SPL dtb
326 ${UBOOT_FIT_CHECK_SIGN} \ 485 ${UBOOT_FIT_CHECK_SIGN} \
327 -k "${SPL_DIR}/${SPL_DTB_BINARY}" \ 486 -k "${SPL_DIR}/${SPL_DTB_BINARY}" \
328 -f ${UBOOT_FITIMAGE_BINARY} 487 -f ${UBOOT_FITIMAGE_BINARY}
329 fi 488
330 489 cp ${SPL_DIR}/${SPL_DTB_BINARY} ${SPL_DIR}/${SPL_DTB_SIGNED}
331 if [ -e "${SPL_DIR}/${SPL_DTB_BINARY}" ]; then 490 else
332 cp ${SPL_DIR}/${SPL_DTB_BINARY} ${SPL_DIR}/${SPL_DTB_SIGNED} 491 # Sign the U-boot FIT image
492 ${UBOOT_MKIMAGE_SIGN} \
493 ${@'-D "${SPL_MKIMAGE_DTCOPTS}"' if len('${SPL_MKIMAGE_DTCOPTS}') else ''} \
494 -F -k "${SPL_SIGN_KEYDIR}" \
495 -r ${UBOOT_FITIMAGE_BINARY} \
496 ${SPL_MKIMAGE_SIGN_ARGS}
497 fi
333 fi 498 fi
334} 499}
335 500
@@ -337,27 +502,24 @@ uboot_assemble_fitimage_helper() {
337 type="$1" 502 type="$1"
338 binary="$2" 503 binary="$2"
339 504
340 if [ "${UBOOT_SIGN_ENABLE}" = "1" -a -n "${UBOOT_DTB_BINARY}" ] ; then 505 if [ "${UBOOT_SIGN_ENABLE}" = "1" ] && [ -n "${UBOOT_DTB_BINARY}" ] ; then
341 concat_dtb $type $binary 506 concat_dtb "$type" "$binary"
342 fi 507 fi
343 508
344 if [ "${UBOOT_FITIMAGE_ENABLE}" = "1" -a -n "${SPL_DTB_BINARY}" ]; then 509 if [ "${UBOOT_FITIMAGE_ENABLE}" = "1" ]; then
345 uboot_fitimage_assemble 510 uboot_fitimage_assemble
346 fi 511 fi
347 512
348 if [ "${SPL_SIGN_ENABLE}" = "1" -a -n "${SPL_DTB_BINARY}" ] ; then 513 if [ "${SPL_SIGN_ENABLE}" = "1" ] && [ -n "${SPL_DTB_BINARY}" ] ; then
349 concat_spl_dtb 514 concat_spl_dtb
350 fi 515 fi
351} 516}
352 517
353do_uboot_assemble_fitimage() { 518do_uboot_assemble_fitimage() {
354 if [ "${UBOOT_SIGN_ENABLE}" = "1" ] ; then
355 cp "${STAGING_DIR_HOST}/sysroot-only/fitImage" "${B}/fitImage-linux"
356 fi
357
358 if [ -n "${UBOOT_CONFIG}" ]; then 519 if [ -n "${UBOOT_CONFIG}" ]; then
359 unset i j k 520 unset i
360 for config in ${UBOOT_MACHINE}; do 521 for config in ${UBOOT_MACHINE}; do
522 unset j k
361 i=$(expr $i + 1); 523 i=$(expr $i + 1);
362 for type in ${UBOOT_CONFIG}; do 524 for type in ${UBOOT_CONFIG}; do
363 j=$(expr $j + 1); 525 j=$(expr $j + 1);
@@ -387,11 +549,11 @@ addtask uboot_assemble_fitimage before do_install do_deploy after do_compile
387deploy_helper() { 549deploy_helper() {
388 type="$1" 550 type="$1"
389 551
390 if [ "${UBOOT_SIGN_ENABLE}" = "1" -a -n "${UBOOT_DTB_SIGNED}" ] ; then 552 if [ "${UBOOT_SIGN_ENABLE}" = "1" ] && [ -n "${UBOOT_DTB_SIGNED}" ] ; then
391 deploy_dtb $type 553 deploy_dtb $type
392 fi 554 fi
393 555
394 if [ "${UBOOT_FITIMAGE_ENABLE}" = "1" -a -n "${SPL_DTB_BINARY}" ]; then 556 if [ "${UBOOT_FITIMAGE_ENABLE}" = "1" ]; then
395 if [ -n "${type}" ]; then 557 if [ -n "${type}" ]; then
396 uboot_its_image="u-boot-its-${type}-${PV}-${PR}" 558 uboot_its_image="u-boot-its-${type}-${PV}-${PR}"
397 uboot_fitimage_image="u-boot-fitImage-${type}-${PV}-${PR}" 559 uboot_fitimage_image="u-boot-fitImage-${type}-${PV}-${PR}"
@@ -409,7 +571,7 @@ deploy_helper() {
409 fi 571 fi
410 fi 572 fi
411 573
412 if [ "${SPL_SIGN_ENABLE}" = "1" -a -n "${SPL_DTB_SIGNED}" ] ; then 574 if [ "${SPL_SIGN_ENABLE}" = "1" ] && [ -n "${SPL_DTB_BINARY}" ] ; then
413 deploy_spl_dtb $type 575 deploy_spl_dtb $type
414 fi 576 fi
415} 577}
@@ -434,7 +596,7 @@ do_deploy:prepend() {
434 deploy_helper "" 596 deploy_helper ""
435 fi 597 fi
436 598
437 if [ "${UBOOT_SIGN_ENABLE}" = "1" -a -n "${UBOOT_DTB_BINARY}" ] ; then 599 if [ "${UBOOT_SIGN_ENABLE}" = "1" ] && [ -n "${UBOOT_DTB_BINARY}" ] ; then
438 ln -sf ${UBOOT_DTB_IMAGE} ${DEPLOYDIR}/${UBOOT_DTB_BINARY} 600 ln -sf ${UBOOT_DTB_IMAGE} ${DEPLOYDIR}/${UBOOT_DTB_BINARY}
439 ln -sf ${UBOOT_DTB_IMAGE} ${DEPLOYDIR}/${UBOOT_DTB_SYMLINK} 601 ln -sf ${UBOOT_DTB_IMAGE} ${DEPLOYDIR}/${UBOOT_DTB_SYMLINK}
440 ln -sf ${UBOOT_NODTB_IMAGE} ${DEPLOYDIR}/${UBOOT_NODTB_SYMLINK} 602 ln -sf ${UBOOT_NODTB_IMAGE} ${DEPLOYDIR}/${UBOOT_NODTB_SYMLINK}
@@ -448,7 +610,7 @@ do_deploy:prepend() {
448 ln -sf ${UBOOT_FITIMAGE_IMAGE} ${DEPLOYDIR}/${UBOOT_FITIMAGE_SYMLINK} 610 ln -sf ${UBOOT_FITIMAGE_IMAGE} ${DEPLOYDIR}/${UBOOT_FITIMAGE_SYMLINK}
449 fi 611 fi
450 612
451 if [ "${SPL_SIGN_ENABLE}" = "1" -a -n "${SPL_DTB_BINARY}" ] ; then 613 if [ "${SPL_SIGN_ENABLE}" = "1" ] && [ -n "${SPL_DTB_BINARY}" ] ; then
452 ln -sf ${SPL_DTB_IMAGE} ${DEPLOYDIR}/${SPL_DTB_SYMLINK} 614 ln -sf ${SPL_DTB_IMAGE} ${DEPLOYDIR}/${SPL_DTB_SYMLINK}
453 ln -sf ${SPL_DTB_IMAGE} ${DEPLOYDIR}/${SPL_DTB_BINARY} 615 ln -sf ${SPL_DTB_IMAGE} ${DEPLOYDIR}/${SPL_DTB_BINARY}
454 ln -sf ${SPL_NODTB_IMAGE} ${DEPLOYDIR}/${SPL_NODTB_SYMLINK} 616 ln -sf ${SPL_NODTB_IMAGE} ${DEPLOYDIR}/${SPL_NODTB_SYMLINK}
diff --git a/meta/classes-recipe/uki.bbclass b/meta/classes-recipe/uki.bbclass
new file mode 100644
index 0000000000..fedff222c6
--- /dev/null
+++ b/meta/classes-recipe/uki.bbclass
@@ -0,0 +1,194 @@
1# Unified kernel image (UKI) class
2#
3# This bbclass merges kernel, initrd etc as a UKI standard UEFI binary,
4# to be loaded with UEFI firmware and systemd-boot on target HW.
5# TPM PCR pre-calculation is not supported since systemd-measure tooling
6# is meant to run on target, not in cross compile environment.
7#
8# See:
9# https://www.freedesktop.org/software/systemd/man/latest/ukify.html
10# https://uapi-group.org/specifications/specs/unified_kernel_image/
11#
12# The UKI contains:
13#
14# - UEFI stub
15# The linux kernel can generate a UEFI stub, however the one from systemd-boot can fetch
16# the command line from a separate section of the EFI application, avoiding the need to
17# rebuild the kernel.
18# - kernel
19# - initramfs
20# - kernel command line
21# - uname -r kernel version
22# - /etc/os-release to create a boot menu with version details
23# - optionally secure boot signature(s)
24# - other metadata (e.g. TPM PCR measurements)
25#
26# Usage instructions:
27#
28# - requires UEFI compatible firmware on target, e.g. qemuarm64-secureboot u-boot based
29# from meta-arm or qemux86 ovmf/edk2 based firmware for x86_64
30#
31# - Distro/build config:
32#
33# INIT_MANAGER = "systemd"
34# MACHINE_FEATURES:append = " efi"
35# EFI_PROVIDER = "systemd-boot"
36# INITRAMFS_IMAGE = "core-image-minimal-initramfs"
37#
38# - image recipe:
39#
40# inherit uki
41#
42# - qemuboot/runqemu changes in image recipe or build config:
43#
44# # Kernel command line must be inside the signed uki
45# QB_KERNEL_ROOT = ""
46# # kernel is in the uki image, not loaded separately
47# QB_DEFAULT_KERNEL = "none"
48#
49# - for UEFI secure boot, systemd-boot and uki (including kernel) can
50# be signed but require sbsign-tool-native (recipe available from meta-secure-core,
51# see also qemuarm64-secureboot from meta-arm). Set variable
52# UKI_SB_KEY to path of private key and UKI_SB_CERT for certificate.
53# Note that systemd-boot also need to be signed with the same key.
54#
55# - at runtime, UEFI firmware will load and boot systemd-boot which
56# creates a menu from all detected uki binaries. No need to manually
57# setup boot menu entries.
58#
59# - see efi-uki-bootdisk.wks.in how to create ESP partition which hosts systemd-boot,
60# config file(s) for systemd-boot and the UKI binaries.
61#
62
63DEPENDS += "\
64 os-release \
65 systemd-boot \
66 systemd-boot-native \
67 virtual/cross-binutils \
68 virtual/kernel \
69"
70
71inherit image-artifact-names
72require ../conf/image-uefi.conf
73
74INITRAMFS_IMAGE ?= "core-image-minimal-initramfs"
75
76INITRD_ARCHIVE ?= "${INITRAMFS_IMAGE}-${MACHINE}.${INITRAMFS_FSTYPES}"
77
78do_image_complete[depends] += "${INITRAMFS_IMAGE}:do_image_complete"
79
80UKIFY_CMD ?= "ukify build"
81UKI_CONFIG_FILE ?= "${UNPACKDIR}/uki.conf"
82UKI_FILENAME ?= "uki.efi"
83UKI_KERNEL_FILENAME ?= "${KERNEL_IMAGETYPE}"
84UKI_CMDLINE ?= "rootwait root=LABEL=root"
85# secure boot keys and cert, needs sbsign-tools-native (meta-secure-core)
86#UKI_SB_KEY ?= ""
87#UKI_SB_CERT ?= ""
88
89IMAGE_EFI_BOOT_FILES ?= "${UKI_FILENAME};EFI/Linux/${UKI_FILENAME}"
90
91do_uki[depends] += " \
92 systemd-boot:do_deploy \
93 virtual/kernel:do_deploy \
94 "
95do_uki[depends] += "${@ '${INITRAMFS_IMAGE}:do_image_complete' if d.getVar('INITRAMFS_IMAGE') else ''}"
96
97# ensure that the build directory is empty everytime we generate a newly-created uki
98do_uki[cleandirs] = "${B}"
99# influence the build directory at the start of the builds
100do_uki[dirs] = "${B}"
101
102# we want to allow specifying files in SRC_URI, such as for signing the UKI
103python () {
104 d.delVarFlag("do_fetch","noexec")
105 d.delVarFlag("do_unpack","noexec")
106}
107
108# main task
109python do_uki() {
110 import glob
111 import bb.process
112
113 # base ukify command, can be extended if needed
114 ukify_cmd = d.getVar('UKIFY_CMD')
115
116 deploy_dir_image = d.getVar('DEPLOY_DIR_IMAGE')
117
118 # architecture
119 target_arch = d.getVar('EFI_ARCH')
120 if target_arch:
121 ukify_cmd += " --efi-arch %s" % (target_arch)
122
123 # systemd stubs
124 stub = "%s/linux%s.efi.stub" % (d.getVar('DEPLOY_DIR_IMAGE'), target_arch)
125 if not os.path.exists(stub):
126 bb.fatal(f"ERROR: cannot find {stub}.")
127 ukify_cmd += " --stub %s" % (stub)
128
129 # initrd
130 initramfs_image = "%s" % (d.getVar('INITRD_ARCHIVE'))
131 ukify_cmd += " --initrd=%s" % (os.path.join(deploy_dir_image, initramfs_image))
132
133 # kernel
134 kernel_filename = d.getVar('UKI_KERNEL_FILENAME') or None
135 if kernel_filename:
136 kernel = "%s/%s" % (deploy_dir_image, kernel_filename)
137 if not os.path.exists(kernel):
138 bb.fatal(f"ERROR: cannot find %s" % (kernel))
139 ukify_cmd += " --linux=%s" % (kernel)
140 # not always needed, ukify can detect version from kernel binary
141 kernel_version = d.getVar('KERNEL_VERSION')
142 if kernel_version:
143 ukify_cmd += "--uname %s" % (kernel_version)
144 else:
145 bb.fatal("ERROR - UKI_KERNEL_FILENAME not set")
146
147 # command line
148 cmdline = d.getVar('UKI_CMDLINE')
149 if cmdline:
150 ukify_cmd += " --cmdline='%s'" % (cmdline)
151
152 # dtb
153 if d.getVar('KERNEL_DEVICETREE'):
154 for dtb in d.getVar('KERNEL_DEVICETREE').split():
155 dtb_path = "%s/%s" % (deploy_dir_image, dtb)
156 if not os.path.exists(dtb_path):
157 bb.fatal(f"ERROR: cannot find {dtb_path}.")
158 ukify_cmd += " --devicetree %s" % (dtb_path)
159
160 # custom config for ukify
161 if os.path.exists(d.getVar('UKI_CONFIG_FILE')):
162 ukify_cmd += " --config=%s" % (d.getVar('UKI_CONFIG_FILE'))
163
164 # systemd tools
165 ukify_cmd += " --tools=%s%s/lib/systemd/tools" % \
166 (d.getVar("RECIPE_SYSROOT_NATIVE"), d.getVar("prefix"))
167
168 # version
169 ukify_cmd += " --os-release=@%s%s/lib/os-release" % \
170 (d.getVar("RECIPE_SYSROOT"), d.getVar("prefix"))
171
172 # TODO: tpm2 measure for secure boot, depends on systemd-native and TPM tooling
173 # needed in systemd > 254 to fulfill ConditionSecurity=measured-uki
174 # Requires TPM device on build host, thus not supported at build time.
175 #ukify_cmd += " --measure"
176
177 # securebooot signing, also for kernel
178 key = d.getVar('UKI_SB_KEY')
179 if key:
180 ukify_cmd += " --sign-kernel --secureboot-private-key='%s'" % (key)
181 cert = d.getVar('UKI_SB_CERT')
182 if cert:
183 ukify_cmd += " --secureboot-certificate='%s'" % (cert)
184
185 # custom output UKI filename
186 output = " --output=%s/%s" % (d.getVar('DEPLOY_DIR_IMAGE'), d.getVar('UKI_FILENAME'))
187 ukify_cmd += " %s" % (output)
188
189 # Run the ukify command
190 bb.debug(2, "uki: running command: %s" % (ukify_cmd))
191 out, err = bb.process.run(ukify_cmd, shell=True)
192 bb.debug(2, "%s\n%s" % (out, err))
193}
194addtask uki after do_rootfs before do_deploy do_image_complete do_image_wic
diff --git a/meta/classes-recipe/update-alternatives.bbclass b/meta/classes-recipe/update-alternatives.bbclass
index b153e1b297..5f40dc23ea 100644
--- a/meta/classes-recipe/update-alternatives.bbclass
+++ b/meta/classes-recipe/update-alternatives.bbclass
@@ -73,24 +73,6 @@ UPDALTVARS = "ALTERNATIVE ALTERNATIVE_LINK_NAME ALTERNATIVE_TARGET ALTERNATIVE_
73 73
74PACKAGE_WRITE_DEPS += "virtual/update-alternatives-native" 74PACKAGE_WRITE_DEPS += "virtual/update-alternatives-native"
75 75
76def gen_updatealternativesvardeps(d):
77 pkgs = (d.getVar("PACKAGES") or "").split()
78 vars = (d.getVar("UPDALTVARS") or "").split()
79
80 # First compute them for non_pkg versions
81 for v in vars:
82 for flag in sorted((d.getVarFlags(v) or {}).keys()):
83 if flag == "doc" or flag == "vardeps" or flag == "vardepsexp":
84 continue
85 d.appendVar('%s_VARDEPS' % (v), ' %s:%s' % (flag, d.getVarFlag(v, flag, False)))
86
87 for p in pkgs:
88 for v in vars:
89 for flag in sorted((d.getVarFlags("%s:%s" % (v,p)) or {}).keys()):
90 if flag == "doc" or flag == "vardeps" or flag == "vardepsexp":
91 continue
92 d.appendVar('%s_VARDEPS_%s' % (v,p), ' %s:%s' % (flag, d.getVarFlag('%s:%s' % (v,p), flag, False)))
93
94def ua_extend_depends(d): 76def ua_extend_depends(d):
95 if not 'virtual/update-alternatives' in d.getVar('PROVIDES'): 77 if not 'virtual/update-alternatives' in d.getVar('PROVIDES'):
96 d.appendVar('DEPENDS', ' virtual/${MLPREFIX}update-alternatives') 78 d.appendVar('DEPENDS', ' virtual/${MLPREFIX}update-alternatives')
@@ -112,9 +94,6 @@ python __anonymous() {
112 if not update_alternatives_enabled(d): 94 if not update_alternatives_enabled(d):
113 return 95 return
114 96
115 # compute special vardeps
116 gen_updatealternativesvardeps(d)
117
118 # extend the depends to include virtual/update-alternatives 97 # extend the depends to include virtual/update-alternatives
119 ua_extend_depends(d) 98 ua_extend_depends(d)
120} 99}
@@ -124,13 +103,20 @@ def gen_updatealternativesvars(d):
124 pkgs = (d.getVar("PACKAGES") or "").split() 103 pkgs = (d.getVar("PACKAGES") or "").split()
125 vars = (d.getVar("UPDALTVARS") or "").split() 104 vars = (d.getVar("UPDALTVARS") or "").split()
126 105
106 # First compute them for non_pkg versions
127 for v in vars: 107 for v in vars:
128 ret.append(v + "_VARDEPS") 108 for flag in sorted((d.getVarFlags(v) or {}).keys()):
109 if flag == "doc" or flag == "vardeps" or flag == "vardepsexp":
110 continue
111 ret.append(v + "[" + flag + "]")
129 112
130 for p in pkgs: 113 for p in pkgs:
131 for v in vars: 114 for v in vars:
132 ret.append(v + ":" + p) 115 for flag in sorted((d.getVarFlags("%s:%s" % (v,p)) or {}).keys()):
133 ret.append(v + "_VARDEPS_" + p) 116 if flag == "doc" or flag == "vardeps" or flag == "vardepsexp":
117 continue
118 ret.append('%s:%s' % (v,p) + "[" + flag + "]")
119
134 return " ".join(ret) 120 return " ".join(ret)
135 121
136# Now the new stuff, we use a custom function to generate the right values 122# Now the new stuff, we use a custom function to generate the right values